• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "assembler_mips64.h"
18 
19 #include "base/bit_utils.h"
20 #include "base/casts.h"
21 #include "entrypoints/quick/quick_entrypoints.h"
22 #include "entrypoints/quick/quick_entrypoints_enum.h"
23 #include "memory_region.h"
24 #include "thread.h"
25 
26 namespace art {
27 namespace mips64 {
28 
29 static_assert(static_cast<size_t>(kMips64PointerSize) == kMips64DoublewordSize,
30               "Unexpected Mips64 pointer size.");
31 static_assert(kMips64PointerSize == PointerSize::k64, "Unexpected Mips64 pointer size.");
32 
33 
FinalizeCode()34 void Mips64Assembler::FinalizeCode() {
35   for (auto& exception_block : exception_blocks_) {
36     EmitExceptionPoll(&exception_block);
37   }
38   ReserveJumpTableSpace();
39   EmitLiterals();
40   PromoteBranches();
41 }
42 
FinalizeInstructions(const MemoryRegion & region)43 void Mips64Assembler::FinalizeInstructions(const MemoryRegion& region) {
44   EmitBranches();
45   EmitJumpTables();
46   Assembler::FinalizeInstructions(region);
47   PatchCFI();
48 }
49 
PatchCFI()50 void Mips64Assembler::PatchCFI() {
51   if (cfi().NumberOfDelayedAdvancePCs() == 0u) {
52     return;
53   }
54 
55   typedef DebugFrameOpCodeWriterForAssembler::DelayedAdvancePC DelayedAdvancePC;
56   const auto data = cfi().ReleaseStreamAndPrepareForDelayedAdvancePC();
57   const std::vector<uint8_t>& old_stream = data.first;
58   const std::vector<DelayedAdvancePC>& advances = data.second;
59 
60   // Refill our data buffer with patched opcodes.
61   cfi().ReserveCFIStream(old_stream.size() + advances.size() + 16);
62   size_t stream_pos = 0;
63   for (const DelayedAdvancePC& advance : advances) {
64     DCHECK_GE(advance.stream_pos, stream_pos);
65     // Copy old data up to the point where advance was issued.
66     cfi().AppendRawData(old_stream, stream_pos, advance.stream_pos);
67     stream_pos = advance.stream_pos;
68     // Insert the advance command with its final offset.
69     size_t final_pc = GetAdjustedPosition(advance.pc);
70     cfi().AdvancePC(final_pc);
71   }
72   // Copy the final segment if any.
73   cfi().AppendRawData(old_stream, stream_pos, old_stream.size());
74 }
75 
EmitBranches()76 void Mips64Assembler::EmitBranches() {
77   CHECK(!overwriting_);
78   // Switch from appending instructions at the end of the buffer to overwriting
79   // existing instructions (branch placeholders) in the buffer.
80   overwriting_ = true;
81   for (auto& branch : branches_) {
82     EmitBranch(&branch);
83   }
84   overwriting_ = false;
85 }
86 
Emit(uint32_t value)87 void Mips64Assembler::Emit(uint32_t value) {
88   if (overwriting_) {
89     // Branches to labels are emitted into their placeholders here.
90     buffer_.Store<uint32_t>(overwrite_location_, value);
91     overwrite_location_ += sizeof(uint32_t);
92   } else {
93     // Other instructions are simply appended at the end here.
94     AssemblerBuffer::EnsureCapacity ensured(&buffer_);
95     buffer_.Emit<uint32_t>(value);
96   }
97 }
98 
EmitR(int opcode,GpuRegister rs,GpuRegister rt,GpuRegister rd,int shamt,int funct)99 void Mips64Assembler::EmitR(int opcode, GpuRegister rs, GpuRegister rt, GpuRegister rd,
100                             int shamt, int funct) {
101   CHECK_NE(rs, kNoGpuRegister);
102   CHECK_NE(rt, kNoGpuRegister);
103   CHECK_NE(rd, kNoGpuRegister);
104   uint32_t encoding = static_cast<uint32_t>(opcode) << kOpcodeShift |
105                       static_cast<uint32_t>(rs) << kRsShift |
106                       static_cast<uint32_t>(rt) << kRtShift |
107                       static_cast<uint32_t>(rd) << kRdShift |
108                       shamt << kShamtShift |
109                       funct;
110   Emit(encoding);
111 }
112 
EmitRsd(int opcode,GpuRegister rs,GpuRegister rd,int shamt,int funct)113 void Mips64Assembler::EmitRsd(int opcode, GpuRegister rs, GpuRegister rd,
114                               int shamt, int funct) {
115   CHECK_NE(rs, kNoGpuRegister);
116   CHECK_NE(rd, kNoGpuRegister);
117   uint32_t encoding = static_cast<uint32_t>(opcode) << kOpcodeShift |
118                       static_cast<uint32_t>(rs) << kRsShift |
119                       static_cast<uint32_t>(ZERO) << kRtShift |
120                       static_cast<uint32_t>(rd) << kRdShift |
121                       shamt << kShamtShift |
122                       funct;
123   Emit(encoding);
124 }
125 
EmitRtd(int opcode,GpuRegister rt,GpuRegister rd,int shamt,int funct)126 void Mips64Assembler::EmitRtd(int opcode, GpuRegister rt, GpuRegister rd,
127                               int shamt, int funct) {
128   CHECK_NE(rt, kNoGpuRegister);
129   CHECK_NE(rd, kNoGpuRegister);
130   uint32_t encoding = static_cast<uint32_t>(opcode) << kOpcodeShift |
131                       static_cast<uint32_t>(ZERO) << kRsShift |
132                       static_cast<uint32_t>(rt) << kRtShift |
133                       static_cast<uint32_t>(rd) << kRdShift |
134                       shamt << kShamtShift |
135                       funct;
136   Emit(encoding);
137 }
138 
EmitI(int opcode,GpuRegister rs,GpuRegister rt,uint16_t imm)139 void Mips64Assembler::EmitI(int opcode, GpuRegister rs, GpuRegister rt, uint16_t imm) {
140   CHECK_NE(rs, kNoGpuRegister);
141   CHECK_NE(rt, kNoGpuRegister);
142   uint32_t encoding = static_cast<uint32_t>(opcode) << kOpcodeShift |
143                       static_cast<uint32_t>(rs) << kRsShift |
144                       static_cast<uint32_t>(rt) << kRtShift |
145                       imm;
146   Emit(encoding);
147 }
148 
EmitI21(int opcode,GpuRegister rs,uint32_t imm21)149 void Mips64Assembler::EmitI21(int opcode, GpuRegister rs, uint32_t imm21) {
150   CHECK_NE(rs, kNoGpuRegister);
151   CHECK(IsUint<21>(imm21)) << imm21;
152   uint32_t encoding = static_cast<uint32_t>(opcode) << kOpcodeShift |
153                       static_cast<uint32_t>(rs) << kRsShift |
154                       imm21;
155   Emit(encoding);
156 }
157 
EmitI26(int opcode,uint32_t imm26)158 void Mips64Assembler::EmitI26(int opcode, uint32_t imm26) {
159   CHECK(IsUint<26>(imm26)) << imm26;
160   uint32_t encoding = static_cast<uint32_t>(opcode) << kOpcodeShift | imm26;
161   Emit(encoding);
162 }
163 
EmitFR(int opcode,int fmt,FpuRegister ft,FpuRegister fs,FpuRegister fd,int funct)164 void Mips64Assembler::EmitFR(int opcode, int fmt, FpuRegister ft, FpuRegister fs, FpuRegister fd,
165                              int funct) {
166   CHECK_NE(ft, kNoFpuRegister);
167   CHECK_NE(fs, kNoFpuRegister);
168   CHECK_NE(fd, kNoFpuRegister);
169   uint32_t encoding = static_cast<uint32_t>(opcode) << kOpcodeShift |
170                       fmt << kFmtShift |
171                       static_cast<uint32_t>(ft) << kFtShift |
172                       static_cast<uint32_t>(fs) << kFsShift |
173                       static_cast<uint32_t>(fd) << kFdShift |
174                       funct;
175   Emit(encoding);
176 }
177 
EmitFI(int opcode,int fmt,FpuRegister ft,uint16_t imm)178 void Mips64Assembler::EmitFI(int opcode, int fmt, FpuRegister ft, uint16_t imm) {
179   CHECK_NE(ft, kNoFpuRegister);
180   uint32_t encoding = static_cast<uint32_t>(opcode) << kOpcodeShift |
181                       fmt << kFmtShift |
182                       static_cast<uint32_t>(ft) << kFtShift |
183                       imm;
184   Emit(encoding);
185 }
186 
EmitMsa3R(int operation,int df,VectorRegister wt,VectorRegister ws,VectorRegister wd,int minor_opcode)187 void Mips64Assembler::EmitMsa3R(int operation,
188                                 int df,
189                                 VectorRegister wt,
190                                 VectorRegister ws,
191                                 VectorRegister wd,
192                                 int minor_opcode) {
193   CHECK_NE(wt, kNoVectorRegister);
194   CHECK_NE(ws, kNoVectorRegister);
195   CHECK_NE(wd, kNoVectorRegister);
196   uint32_t encoding = static_cast<uint32_t>(kMsaMajorOpcode) << kOpcodeShift |
197                       operation << kMsaOperationShift |
198                       df << kDfShift |
199                       static_cast<uint32_t>(wt) << kWtShift |
200                       static_cast<uint32_t>(ws) << kWsShift |
201                       static_cast<uint32_t>(wd) << kWdShift |
202                       minor_opcode;
203   Emit(encoding);
204 }
205 
EmitMsaBIT(int operation,int df_m,VectorRegister ws,VectorRegister wd,int minor_opcode)206 void Mips64Assembler::EmitMsaBIT(int operation,
207                                  int df_m,
208                                  VectorRegister ws,
209                                  VectorRegister wd,
210                                  int minor_opcode) {
211   CHECK_NE(ws, kNoVectorRegister);
212   CHECK_NE(wd, kNoVectorRegister);
213   uint32_t encoding = static_cast<uint32_t>(kMsaMajorOpcode) << kOpcodeShift |
214                       operation << kMsaOperationShift |
215                       df_m << kDfMShift |
216                       static_cast<uint32_t>(ws) << kWsShift |
217                       static_cast<uint32_t>(wd) << kWdShift |
218                       minor_opcode;
219   Emit(encoding);
220 }
221 
EmitMsaELM(int operation,int df_n,VectorRegister ws,VectorRegister wd,int minor_opcode)222 void Mips64Assembler::EmitMsaELM(int operation,
223                                  int df_n,
224                                  VectorRegister ws,
225                                  VectorRegister wd,
226                                  int minor_opcode) {
227   CHECK_NE(ws, kNoVectorRegister);
228   CHECK_NE(wd, kNoVectorRegister);
229   uint32_t encoding = static_cast<uint32_t>(kMsaMajorOpcode) << kOpcodeShift |
230                       operation << kMsaELMOperationShift |
231                       df_n << kDfNShift |
232                       static_cast<uint32_t>(ws) << kWsShift |
233                       static_cast<uint32_t>(wd) << kWdShift |
234                       minor_opcode;
235   Emit(encoding);
236 }
237 
EmitMsaMI10(int s10,GpuRegister rs,VectorRegister wd,int minor_opcode,int df)238 void Mips64Assembler::EmitMsaMI10(int s10,
239                                   GpuRegister rs,
240                                   VectorRegister wd,
241                                   int minor_opcode,
242                                   int df) {
243   CHECK_NE(rs, kNoGpuRegister);
244   CHECK_NE(wd, kNoVectorRegister);
245   CHECK(IsUint<10>(s10)) << s10;
246   uint32_t encoding = static_cast<uint32_t>(kMsaMajorOpcode) << kOpcodeShift |
247                       s10 << kS10Shift |
248                       static_cast<uint32_t>(rs) << kWsShift |
249                       static_cast<uint32_t>(wd) << kWdShift |
250                       minor_opcode << kS10MinorShift |
251                       df;
252   Emit(encoding);
253 }
254 
EmitMsaI10(int operation,int df,int i10,VectorRegister wd,int minor_opcode)255 void Mips64Assembler::EmitMsaI10(int operation,
256                                  int df,
257                                  int i10,
258                                  VectorRegister wd,
259                                  int minor_opcode) {
260   CHECK_NE(wd, kNoVectorRegister);
261   CHECK(IsUint<10>(i10)) << i10;
262   uint32_t encoding = static_cast<uint32_t>(kMsaMajorOpcode) << kOpcodeShift |
263                       operation << kMsaOperationShift |
264                       df << kDfShift |
265                       i10 << kI10Shift |
266                       static_cast<uint32_t>(wd) << kWdShift |
267                       minor_opcode;
268   Emit(encoding);
269 }
270 
EmitMsa2R(int operation,int df,VectorRegister ws,VectorRegister wd,int minor_opcode)271 void Mips64Assembler::EmitMsa2R(int operation,
272                                 int df,
273                                 VectorRegister ws,
274                                 VectorRegister wd,
275                                 int minor_opcode) {
276   CHECK_NE(ws, kNoVectorRegister);
277   CHECK_NE(wd, kNoVectorRegister);
278   uint32_t encoding = static_cast<uint32_t>(kMsaMajorOpcode) << kOpcodeShift |
279                       operation << kMsa2ROperationShift |
280                       df << kDf2RShift |
281                       static_cast<uint32_t>(ws) << kWsShift |
282                       static_cast<uint32_t>(wd) << kWdShift |
283                       minor_opcode;
284   Emit(encoding);
285 }
286 
EmitMsa2RF(int operation,int df,VectorRegister ws,VectorRegister wd,int minor_opcode)287 void Mips64Assembler::EmitMsa2RF(int operation,
288                                  int df,
289                                  VectorRegister ws,
290                                  VectorRegister wd,
291                                  int minor_opcode) {
292   CHECK_NE(ws, kNoVectorRegister);
293   CHECK_NE(wd, kNoVectorRegister);
294   uint32_t encoding = static_cast<uint32_t>(kMsaMajorOpcode) << kOpcodeShift |
295                       operation << kMsa2RFOperationShift |
296                       df << kDf2RShift |
297                       static_cast<uint32_t>(ws) << kWsShift |
298                       static_cast<uint32_t>(wd) << kWdShift |
299                       minor_opcode;
300   Emit(encoding);
301 }
302 
Addu(GpuRegister rd,GpuRegister rs,GpuRegister rt)303 void Mips64Assembler::Addu(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
304   EmitR(0, rs, rt, rd, 0, 0x21);
305 }
306 
Addiu(GpuRegister rt,GpuRegister rs,uint16_t imm16)307 void Mips64Assembler::Addiu(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
308   EmitI(0x9, rs, rt, imm16);
309 }
310 
Daddu(GpuRegister rd,GpuRegister rs,GpuRegister rt)311 void Mips64Assembler::Daddu(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
312   EmitR(0, rs, rt, rd, 0, 0x2d);
313 }
314 
Daddiu(GpuRegister rt,GpuRegister rs,uint16_t imm16)315 void Mips64Assembler::Daddiu(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
316   EmitI(0x19, rs, rt, imm16);
317 }
318 
Subu(GpuRegister rd,GpuRegister rs,GpuRegister rt)319 void Mips64Assembler::Subu(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
320   EmitR(0, rs, rt, rd, 0, 0x23);
321 }
322 
Dsubu(GpuRegister rd,GpuRegister rs,GpuRegister rt)323 void Mips64Assembler::Dsubu(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
324   EmitR(0, rs, rt, rd, 0, 0x2f);
325 }
326 
MulR6(GpuRegister rd,GpuRegister rs,GpuRegister rt)327 void Mips64Assembler::MulR6(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
328   EmitR(0, rs, rt, rd, 2, 0x18);
329 }
330 
MuhR6(GpuRegister rd,GpuRegister rs,GpuRegister rt)331 void Mips64Assembler::MuhR6(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
332   EmitR(0, rs, rt, rd, 3, 0x18);
333 }
334 
DivR6(GpuRegister rd,GpuRegister rs,GpuRegister rt)335 void Mips64Assembler::DivR6(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
336   EmitR(0, rs, rt, rd, 2, 0x1a);
337 }
338 
ModR6(GpuRegister rd,GpuRegister rs,GpuRegister rt)339 void Mips64Assembler::ModR6(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
340   EmitR(0, rs, rt, rd, 3, 0x1a);
341 }
342 
DivuR6(GpuRegister rd,GpuRegister rs,GpuRegister rt)343 void Mips64Assembler::DivuR6(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
344   EmitR(0, rs, rt, rd, 2, 0x1b);
345 }
346 
ModuR6(GpuRegister rd,GpuRegister rs,GpuRegister rt)347 void Mips64Assembler::ModuR6(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
348   EmitR(0, rs, rt, rd, 3, 0x1b);
349 }
350 
Dmul(GpuRegister rd,GpuRegister rs,GpuRegister rt)351 void Mips64Assembler::Dmul(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
352   EmitR(0, rs, rt, rd, 2, 0x1c);
353 }
354 
Dmuh(GpuRegister rd,GpuRegister rs,GpuRegister rt)355 void Mips64Assembler::Dmuh(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
356   EmitR(0, rs, rt, rd, 3, 0x1c);
357 }
358 
Ddiv(GpuRegister rd,GpuRegister rs,GpuRegister rt)359 void Mips64Assembler::Ddiv(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
360   EmitR(0, rs, rt, rd, 2, 0x1e);
361 }
362 
Dmod(GpuRegister rd,GpuRegister rs,GpuRegister rt)363 void Mips64Assembler::Dmod(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
364   EmitR(0, rs, rt, rd, 3, 0x1e);
365 }
366 
Ddivu(GpuRegister rd,GpuRegister rs,GpuRegister rt)367 void Mips64Assembler::Ddivu(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
368   EmitR(0, rs, rt, rd, 2, 0x1f);
369 }
370 
Dmodu(GpuRegister rd,GpuRegister rs,GpuRegister rt)371 void Mips64Assembler::Dmodu(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
372   EmitR(0, rs, rt, rd, 3, 0x1f);
373 }
374 
And(GpuRegister rd,GpuRegister rs,GpuRegister rt)375 void Mips64Assembler::And(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
376   EmitR(0, rs, rt, rd, 0, 0x24);
377 }
378 
Andi(GpuRegister rt,GpuRegister rs,uint16_t imm16)379 void Mips64Assembler::Andi(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
380   EmitI(0xc, rs, rt, imm16);
381 }
382 
Or(GpuRegister rd,GpuRegister rs,GpuRegister rt)383 void Mips64Assembler::Or(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
384   EmitR(0, rs, rt, rd, 0, 0x25);
385 }
386 
Ori(GpuRegister rt,GpuRegister rs,uint16_t imm16)387 void Mips64Assembler::Ori(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
388   EmitI(0xd, rs, rt, imm16);
389 }
390 
Xor(GpuRegister rd,GpuRegister rs,GpuRegister rt)391 void Mips64Assembler::Xor(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
392   EmitR(0, rs, rt, rd, 0, 0x26);
393 }
394 
Xori(GpuRegister rt,GpuRegister rs,uint16_t imm16)395 void Mips64Assembler::Xori(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
396   EmitI(0xe, rs, rt, imm16);
397 }
398 
Nor(GpuRegister rd,GpuRegister rs,GpuRegister rt)399 void Mips64Assembler::Nor(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
400   EmitR(0, rs, rt, rd, 0, 0x27);
401 }
402 
Bitswap(GpuRegister rd,GpuRegister rt)403 void Mips64Assembler::Bitswap(GpuRegister rd, GpuRegister rt) {
404   EmitRtd(0x1f, rt, rd, 0x0, 0x20);
405 }
406 
Dbitswap(GpuRegister rd,GpuRegister rt)407 void Mips64Assembler::Dbitswap(GpuRegister rd, GpuRegister rt) {
408   EmitRtd(0x1f, rt, rd, 0x0, 0x24);
409 }
410 
Seb(GpuRegister rd,GpuRegister rt)411 void Mips64Assembler::Seb(GpuRegister rd, GpuRegister rt) {
412   EmitR(0x1f, static_cast<GpuRegister>(0), rt, rd, 0x10, 0x20);
413 }
414 
Seh(GpuRegister rd,GpuRegister rt)415 void Mips64Assembler::Seh(GpuRegister rd, GpuRegister rt) {
416   EmitR(0x1f, static_cast<GpuRegister>(0), rt, rd, 0x18, 0x20);
417 }
418 
Dsbh(GpuRegister rd,GpuRegister rt)419 void Mips64Assembler::Dsbh(GpuRegister rd, GpuRegister rt) {
420   EmitRtd(0x1f, rt, rd, 0x2, 0x24);
421 }
422 
Dshd(GpuRegister rd,GpuRegister rt)423 void Mips64Assembler::Dshd(GpuRegister rd, GpuRegister rt) {
424   EmitRtd(0x1f, rt, rd, 0x5, 0x24);
425 }
426 
Dext(GpuRegister rt,GpuRegister rs,int pos,int size)427 void Mips64Assembler::Dext(GpuRegister rt, GpuRegister rs, int pos, int size) {
428   CHECK(IsUint<5>(pos)) << pos;
429   CHECK(IsUint<5>(size - 1)) << size;
430   EmitR(0x1f, rs, rt, static_cast<GpuRegister>(size - 1), pos, 0x3);
431 }
432 
Dinsu(GpuRegister rt,GpuRegister rs,int pos,int size)433 void Mips64Assembler::Dinsu(GpuRegister rt, GpuRegister rs, int pos, int size) {
434   CHECK(IsUint<5>(pos - 32)) << pos;
435   CHECK(IsUint<5>(size - 1)) << size;
436   CHECK(IsUint<5>(pos + size - 33)) << pos << " + " << size;
437   EmitR(0x1f, rs, rt, static_cast<GpuRegister>(pos + size - 33), pos - 32, 0x6);
438 }
439 
Lsa(GpuRegister rd,GpuRegister rs,GpuRegister rt,int saPlusOne)440 void Mips64Assembler::Lsa(GpuRegister rd, GpuRegister rs, GpuRegister rt, int saPlusOne) {
441   CHECK(1 <= saPlusOne && saPlusOne <= 4) << saPlusOne;
442   int sa = saPlusOne - 1;
443   EmitR(0x0, rs, rt, rd, sa, 0x05);
444 }
445 
Dlsa(GpuRegister rd,GpuRegister rs,GpuRegister rt,int saPlusOne)446 void Mips64Assembler::Dlsa(GpuRegister rd, GpuRegister rs, GpuRegister rt, int saPlusOne) {
447   CHECK(1 <= saPlusOne && saPlusOne <= 4) << saPlusOne;
448   int sa = saPlusOne - 1;
449   EmitR(0x0, rs, rt, rd, sa, 0x15);
450 }
451 
Wsbh(GpuRegister rd,GpuRegister rt)452 void Mips64Assembler::Wsbh(GpuRegister rd, GpuRegister rt) {
453   EmitRtd(0x1f, rt, rd, 2, 0x20);
454 }
455 
Sc(GpuRegister rt,GpuRegister base,int16_t imm9)456 void Mips64Assembler::Sc(GpuRegister rt, GpuRegister base, int16_t imm9) {
457   CHECK(IsInt<9>(imm9));
458   EmitI(0x1f, base, rt, ((imm9 & 0x1FF) << 7) | 0x26);
459 }
460 
Scd(GpuRegister rt,GpuRegister base,int16_t imm9)461 void Mips64Assembler::Scd(GpuRegister rt, GpuRegister base, int16_t imm9) {
462   CHECK(IsInt<9>(imm9));
463   EmitI(0x1f, base, rt, ((imm9 & 0x1FF) << 7) | 0x27);
464 }
465 
Ll(GpuRegister rt,GpuRegister base,int16_t imm9)466 void Mips64Assembler::Ll(GpuRegister rt, GpuRegister base, int16_t imm9) {
467   CHECK(IsInt<9>(imm9));
468   EmitI(0x1f, base, rt, ((imm9 & 0x1FF) << 7) | 0x36);
469 }
470 
Lld(GpuRegister rt,GpuRegister base,int16_t imm9)471 void Mips64Assembler::Lld(GpuRegister rt, GpuRegister base, int16_t imm9) {
472   CHECK(IsInt<9>(imm9));
473   EmitI(0x1f, base, rt, ((imm9 & 0x1FF) << 7) | 0x37);
474 }
475 
Sll(GpuRegister rd,GpuRegister rt,int shamt)476 void Mips64Assembler::Sll(GpuRegister rd, GpuRegister rt, int shamt) {
477   EmitR(0, static_cast<GpuRegister>(0), rt, rd, shamt, 0x00);
478 }
479 
Srl(GpuRegister rd,GpuRegister rt,int shamt)480 void Mips64Assembler::Srl(GpuRegister rd, GpuRegister rt, int shamt) {
481   EmitR(0, static_cast<GpuRegister>(0), rt, rd, shamt, 0x02);
482 }
483 
Rotr(GpuRegister rd,GpuRegister rt,int shamt)484 void Mips64Assembler::Rotr(GpuRegister rd, GpuRegister rt, int shamt) {
485   EmitR(0, static_cast<GpuRegister>(1), rt, rd, shamt, 0x02);
486 }
487 
Sra(GpuRegister rd,GpuRegister rt,int shamt)488 void Mips64Assembler::Sra(GpuRegister rd, GpuRegister rt, int shamt) {
489   EmitR(0, static_cast<GpuRegister>(0), rt, rd, shamt, 0x03);
490 }
491 
Sllv(GpuRegister rd,GpuRegister rt,GpuRegister rs)492 void Mips64Assembler::Sllv(GpuRegister rd, GpuRegister rt, GpuRegister rs) {
493   EmitR(0, rs, rt, rd, 0, 0x04);
494 }
495 
Rotrv(GpuRegister rd,GpuRegister rt,GpuRegister rs)496 void Mips64Assembler::Rotrv(GpuRegister rd, GpuRegister rt, GpuRegister rs) {
497   EmitR(0, rs, rt, rd, 1, 0x06);
498 }
499 
Srlv(GpuRegister rd,GpuRegister rt,GpuRegister rs)500 void Mips64Assembler::Srlv(GpuRegister rd, GpuRegister rt, GpuRegister rs) {
501   EmitR(0, rs, rt, rd, 0, 0x06);
502 }
503 
Srav(GpuRegister rd,GpuRegister rt,GpuRegister rs)504 void Mips64Assembler::Srav(GpuRegister rd, GpuRegister rt, GpuRegister rs) {
505   EmitR(0, rs, rt, rd, 0, 0x07);
506 }
507 
Dsll(GpuRegister rd,GpuRegister rt,int shamt)508 void Mips64Assembler::Dsll(GpuRegister rd, GpuRegister rt, int shamt) {
509   EmitR(0, static_cast<GpuRegister>(0), rt, rd, shamt, 0x38);
510 }
511 
Dsrl(GpuRegister rd,GpuRegister rt,int shamt)512 void Mips64Assembler::Dsrl(GpuRegister rd, GpuRegister rt, int shamt) {
513   EmitR(0, static_cast<GpuRegister>(0), rt, rd, shamt, 0x3a);
514 }
515 
Drotr(GpuRegister rd,GpuRegister rt,int shamt)516 void Mips64Assembler::Drotr(GpuRegister rd, GpuRegister rt, int shamt) {
517   EmitR(0, static_cast<GpuRegister>(1), rt, rd, shamt, 0x3a);
518 }
519 
Dsra(GpuRegister rd,GpuRegister rt,int shamt)520 void Mips64Assembler::Dsra(GpuRegister rd, GpuRegister rt, int shamt) {
521   EmitR(0, static_cast<GpuRegister>(0), rt, rd, shamt, 0x3b);
522 }
523 
Dsll32(GpuRegister rd,GpuRegister rt,int shamt)524 void Mips64Assembler::Dsll32(GpuRegister rd, GpuRegister rt, int shamt) {
525   EmitR(0, static_cast<GpuRegister>(0), rt, rd, shamt, 0x3c);
526 }
527 
Dsrl32(GpuRegister rd,GpuRegister rt,int shamt)528 void Mips64Assembler::Dsrl32(GpuRegister rd, GpuRegister rt, int shamt) {
529   EmitR(0, static_cast<GpuRegister>(0), rt, rd, shamt, 0x3e);
530 }
531 
Drotr32(GpuRegister rd,GpuRegister rt,int shamt)532 void Mips64Assembler::Drotr32(GpuRegister rd, GpuRegister rt, int shamt) {
533   EmitR(0, static_cast<GpuRegister>(1), rt, rd, shamt, 0x3e);
534 }
535 
Dsra32(GpuRegister rd,GpuRegister rt,int shamt)536 void Mips64Assembler::Dsra32(GpuRegister rd, GpuRegister rt, int shamt) {
537   EmitR(0, static_cast<GpuRegister>(0), rt, rd, shamt, 0x3f);
538 }
539 
Dsllv(GpuRegister rd,GpuRegister rt,GpuRegister rs)540 void Mips64Assembler::Dsllv(GpuRegister rd, GpuRegister rt, GpuRegister rs) {
541   EmitR(0, rs, rt, rd, 0, 0x14);
542 }
543 
Dsrlv(GpuRegister rd,GpuRegister rt,GpuRegister rs)544 void Mips64Assembler::Dsrlv(GpuRegister rd, GpuRegister rt, GpuRegister rs) {
545   EmitR(0, rs, rt, rd, 0, 0x16);
546 }
547 
Drotrv(GpuRegister rd,GpuRegister rt,GpuRegister rs)548 void Mips64Assembler::Drotrv(GpuRegister rd, GpuRegister rt, GpuRegister rs) {
549   EmitR(0, rs, rt, rd, 1, 0x16);
550 }
551 
Dsrav(GpuRegister rd,GpuRegister rt,GpuRegister rs)552 void Mips64Assembler::Dsrav(GpuRegister rd, GpuRegister rt, GpuRegister rs) {
553   EmitR(0, rs, rt, rd, 0, 0x17);
554 }
555 
Lb(GpuRegister rt,GpuRegister rs,uint16_t imm16)556 void Mips64Assembler::Lb(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
557   EmitI(0x20, rs, rt, imm16);
558 }
559 
Lh(GpuRegister rt,GpuRegister rs,uint16_t imm16)560 void Mips64Assembler::Lh(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
561   EmitI(0x21, rs, rt, imm16);
562 }
563 
Lw(GpuRegister rt,GpuRegister rs,uint16_t imm16)564 void Mips64Assembler::Lw(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
565   EmitI(0x23, rs, rt, imm16);
566 }
567 
Ld(GpuRegister rt,GpuRegister rs,uint16_t imm16)568 void Mips64Assembler::Ld(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
569   EmitI(0x37, rs, rt, imm16);
570 }
571 
Lbu(GpuRegister rt,GpuRegister rs,uint16_t imm16)572 void Mips64Assembler::Lbu(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
573   EmitI(0x24, rs, rt, imm16);
574 }
575 
Lhu(GpuRegister rt,GpuRegister rs,uint16_t imm16)576 void Mips64Assembler::Lhu(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
577   EmitI(0x25, rs, rt, imm16);
578 }
579 
Lwu(GpuRegister rt,GpuRegister rs,uint16_t imm16)580 void Mips64Assembler::Lwu(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
581   EmitI(0x27, rs, rt, imm16);
582 }
583 
Lwpc(GpuRegister rs,uint32_t imm19)584 void Mips64Assembler::Lwpc(GpuRegister rs, uint32_t imm19) {
585   CHECK(IsUint<19>(imm19)) << imm19;
586   EmitI21(0x3B, rs, (0x01 << 19) | imm19);
587 }
588 
Lwupc(GpuRegister rs,uint32_t imm19)589 void Mips64Assembler::Lwupc(GpuRegister rs, uint32_t imm19) {
590   CHECK(IsUint<19>(imm19)) << imm19;
591   EmitI21(0x3B, rs, (0x02 << 19) | imm19);
592 }
593 
Ldpc(GpuRegister rs,uint32_t imm18)594 void Mips64Assembler::Ldpc(GpuRegister rs, uint32_t imm18) {
595   CHECK(IsUint<18>(imm18)) << imm18;
596   EmitI21(0x3B, rs, (0x06 << 18) | imm18);
597 }
598 
Lui(GpuRegister rt,uint16_t imm16)599 void Mips64Assembler::Lui(GpuRegister rt, uint16_t imm16) {
600   EmitI(0xf, static_cast<GpuRegister>(0), rt, imm16);
601 }
602 
Aui(GpuRegister rt,GpuRegister rs,uint16_t imm16)603 void Mips64Assembler::Aui(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
604   EmitI(0xf, rs, rt, imm16);
605 }
606 
Daui(GpuRegister rt,GpuRegister rs,uint16_t imm16)607 void Mips64Assembler::Daui(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
608   CHECK_NE(rs, ZERO);
609   EmitI(0x1d, rs, rt, imm16);
610 }
611 
Dahi(GpuRegister rs,uint16_t imm16)612 void Mips64Assembler::Dahi(GpuRegister rs, uint16_t imm16) {
613   EmitI(1, rs, static_cast<GpuRegister>(6), imm16);
614 }
615 
Dati(GpuRegister rs,uint16_t imm16)616 void Mips64Assembler::Dati(GpuRegister rs, uint16_t imm16) {
617   EmitI(1, rs, static_cast<GpuRegister>(0x1e), imm16);
618 }
619 
Sync(uint32_t stype)620 void Mips64Assembler::Sync(uint32_t stype) {
621   EmitR(0, static_cast<GpuRegister>(0), static_cast<GpuRegister>(0),
622            static_cast<GpuRegister>(0), stype & 0x1f, 0xf);
623 }
624 
Sb(GpuRegister rt,GpuRegister rs,uint16_t imm16)625 void Mips64Assembler::Sb(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
626   EmitI(0x28, rs, rt, imm16);
627 }
628 
Sh(GpuRegister rt,GpuRegister rs,uint16_t imm16)629 void Mips64Assembler::Sh(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
630   EmitI(0x29, rs, rt, imm16);
631 }
632 
Sw(GpuRegister rt,GpuRegister rs,uint16_t imm16)633 void Mips64Assembler::Sw(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
634   EmitI(0x2b, rs, rt, imm16);
635 }
636 
Sd(GpuRegister rt,GpuRegister rs,uint16_t imm16)637 void Mips64Assembler::Sd(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
638   EmitI(0x3f, rs, rt, imm16);
639 }
640 
Slt(GpuRegister rd,GpuRegister rs,GpuRegister rt)641 void Mips64Assembler::Slt(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
642   EmitR(0, rs, rt, rd, 0, 0x2a);
643 }
644 
Sltu(GpuRegister rd,GpuRegister rs,GpuRegister rt)645 void Mips64Assembler::Sltu(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
646   EmitR(0, rs, rt, rd, 0, 0x2b);
647 }
648 
Slti(GpuRegister rt,GpuRegister rs,uint16_t imm16)649 void Mips64Assembler::Slti(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
650   EmitI(0xa, rs, rt, imm16);
651 }
652 
Sltiu(GpuRegister rt,GpuRegister rs,uint16_t imm16)653 void Mips64Assembler::Sltiu(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
654   EmitI(0xb, rs, rt, imm16);
655 }
656 
Seleqz(GpuRegister rd,GpuRegister rs,GpuRegister rt)657 void Mips64Assembler::Seleqz(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
658   EmitR(0, rs, rt, rd, 0, 0x35);
659 }
660 
Selnez(GpuRegister rd,GpuRegister rs,GpuRegister rt)661 void Mips64Assembler::Selnez(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
662   EmitR(0, rs, rt, rd, 0, 0x37);
663 }
664 
Clz(GpuRegister rd,GpuRegister rs)665 void Mips64Assembler::Clz(GpuRegister rd, GpuRegister rs) {
666   EmitRsd(0, rs, rd, 0x01, 0x10);
667 }
668 
Clo(GpuRegister rd,GpuRegister rs)669 void Mips64Assembler::Clo(GpuRegister rd, GpuRegister rs) {
670   EmitRsd(0, rs, rd, 0x01, 0x11);
671 }
672 
Dclz(GpuRegister rd,GpuRegister rs)673 void Mips64Assembler::Dclz(GpuRegister rd, GpuRegister rs) {
674   EmitRsd(0, rs, rd, 0x01, 0x12);
675 }
676 
Dclo(GpuRegister rd,GpuRegister rs)677 void Mips64Assembler::Dclo(GpuRegister rd, GpuRegister rs) {
678   EmitRsd(0, rs, rd, 0x01, 0x13);
679 }
680 
Jalr(GpuRegister rd,GpuRegister rs)681 void Mips64Assembler::Jalr(GpuRegister rd, GpuRegister rs) {
682   EmitR(0, rs, static_cast<GpuRegister>(0), rd, 0, 0x09);
683 }
684 
Jalr(GpuRegister rs)685 void Mips64Assembler::Jalr(GpuRegister rs) {
686   Jalr(RA, rs);
687 }
688 
Jr(GpuRegister rs)689 void Mips64Assembler::Jr(GpuRegister rs) {
690   Jalr(ZERO, rs);
691 }
692 
Auipc(GpuRegister rs,uint16_t imm16)693 void Mips64Assembler::Auipc(GpuRegister rs, uint16_t imm16) {
694   EmitI(0x3B, rs, static_cast<GpuRegister>(0x1E), imm16);
695 }
696 
Addiupc(GpuRegister rs,uint32_t imm19)697 void Mips64Assembler::Addiupc(GpuRegister rs, uint32_t imm19) {
698   CHECK(IsUint<19>(imm19)) << imm19;
699   EmitI21(0x3B, rs, imm19);
700 }
701 
Bc(uint32_t imm26)702 void Mips64Assembler::Bc(uint32_t imm26) {
703   EmitI26(0x32, imm26);
704 }
705 
Balc(uint32_t imm26)706 void Mips64Assembler::Balc(uint32_t imm26) {
707   EmitI26(0x3A, imm26);
708 }
709 
Jic(GpuRegister rt,uint16_t imm16)710 void Mips64Assembler::Jic(GpuRegister rt, uint16_t imm16) {
711   EmitI(0x36, static_cast<GpuRegister>(0), rt, imm16);
712 }
713 
Jialc(GpuRegister rt,uint16_t imm16)714 void Mips64Assembler::Jialc(GpuRegister rt, uint16_t imm16) {
715   EmitI(0x3E, static_cast<GpuRegister>(0), rt, imm16);
716 }
717 
Bltc(GpuRegister rs,GpuRegister rt,uint16_t imm16)718 void Mips64Assembler::Bltc(GpuRegister rs, GpuRegister rt, uint16_t imm16) {
719   CHECK_NE(rs, ZERO);
720   CHECK_NE(rt, ZERO);
721   CHECK_NE(rs, rt);
722   EmitI(0x17, rs, rt, imm16);
723 }
724 
Bltzc(GpuRegister rt,uint16_t imm16)725 void Mips64Assembler::Bltzc(GpuRegister rt, uint16_t imm16) {
726   CHECK_NE(rt, ZERO);
727   EmitI(0x17, rt, rt, imm16);
728 }
729 
Bgtzc(GpuRegister rt,uint16_t imm16)730 void Mips64Assembler::Bgtzc(GpuRegister rt, uint16_t imm16) {
731   CHECK_NE(rt, ZERO);
732   EmitI(0x17, static_cast<GpuRegister>(0), rt, imm16);
733 }
734 
Bgec(GpuRegister rs,GpuRegister rt,uint16_t imm16)735 void Mips64Assembler::Bgec(GpuRegister rs, GpuRegister rt, uint16_t imm16) {
736   CHECK_NE(rs, ZERO);
737   CHECK_NE(rt, ZERO);
738   CHECK_NE(rs, rt);
739   EmitI(0x16, rs, rt, imm16);
740 }
741 
Bgezc(GpuRegister rt,uint16_t imm16)742 void Mips64Assembler::Bgezc(GpuRegister rt, uint16_t imm16) {
743   CHECK_NE(rt, ZERO);
744   EmitI(0x16, rt, rt, imm16);
745 }
746 
Blezc(GpuRegister rt,uint16_t imm16)747 void Mips64Assembler::Blezc(GpuRegister rt, uint16_t imm16) {
748   CHECK_NE(rt, ZERO);
749   EmitI(0x16, static_cast<GpuRegister>(0), rt, imm16);
750 }
751 
Bltuc(GpuRegister rs,GpuRegister rt,uint16_t imm16)752 void Mips64Assembler::Bltuc(GpuRegister rs, GpuRegister rt, uint16_t imm16) {
753   CHECK_NE(rs, ZERO);
754   CHECK_NE(rt, ZERO);
755   CHECK_NE(rs, rt);
756   EmitI(0x7, rs, rt, imm16);
757 }
758 
Bgeuc(GpuRegister rs,GpuRegister rt,uint16_t imm16)759 void Mips64Assembler::Bgeuc(GpuRegister rs, GpuRegister rt, uint16_t imm16) {
760   CHECK_NE(rs, ZERO);
761   CHECK_NE(rt, ZERO);
762   CHECK_NE(rs, rt);
763   EmitI(0x6, rs, rt, imm16);
764 }
765 
Beqc(GpuRegister rs,GpuRegister rt,uint16_t imm16)766 void Mips64Assembler::Beqc(GpuRegister rs, GpuRegister rt, uint16_t imm16) {
767   CHECK_NE(rs, ZERO);
768   CHECK_NE(rt, ZERO);
769   CHECK_NE(rs, rt);
770   EmitI(0x8, std::min(rs, rt), std::max(rs, rt), imm16);
771 }
772 
Bnec(GpuRegister rs,GpuRegister rt,uint16_t imm16)773 void Mips64Assembler::Bnec(GpuRegister rs, GpuRegister rt, uint16_t imm16) {
774   CHECK_NE(rs, ZERO);
775   CHECK_NE(rt, ZERO);
776   CHECK_NE(rs, rt);
777   EmitI(0x18, std::min(rs, rt), std::max(rs, rt), imm16);
778 }
779 
Beqzc(GpuRegister rs,uint32_t imm21)780 void Mips64Assembler::Beqzc(GpuRegister rs, uint32_t imm21) {
781   CHECK_NE(rs, ZERO);
782   EmitI21(0x36, rs, imm21);
783 }
784 
Bnezc(GpuRegister rs,uint32_t imm21)785 void Mips64Assembler::Bnezc(GpuRegister rs, uint32_t imm21) {
786   CHECK_NE(rs, ZERO);
787   EmitI21(0x3E, rs, imm21);
788 }
789 
Bc1eqz(FpuRegister ft,uint16_t imm16)790 void Mips64Assembler::Bc1eqz(FpuRegister ft, uint16_t imm16) {
791   EmitFI(0x11, 0x9, ft, imm16);
792 }
793 
Bc1nez(FpuRegister ft,uint16_t imm16)794 void Mips64Assembler::Bc1nez(FpuRegister ft, uint16_t imm16) {
795   EmitFI(0x11, 0xD, ft, imm16);
796 }
797 
EmitBcondc(BranchCondition cond,GpuRegister rs,GpuRegister rt,uint32_t imm16_21)798 void Mips64Assembler::EmitBcondc(BranchCondition cond,
799                                  GpuRegister rs,
800                                  GpuRegister rt,
801                                  uint32_t imm16_21) {
802   switch (cond) {
803     case kCondLT:
804       Bltc(rs, rt, imm16_21);
805       break;
806     case kCondGE:
807       Bgec(rs, rt, imm16_21);
808       break;
809     case kCondLE:
810       Bgec(rt, rs, imm16_21);
811       break;
812     case kCondGT:
813       Bltc(rt, rs, imm16_21);
814       break;
815     case kCondLTZ:
816       CHECK_EQ(rt, ZERO);
817       Bltzc(rs, imm16_21);
818       break;
819     case kCondGEZ:
820       CHECK_EQ(rt, ZERO);
821       Bgezc(rs, imm16_21);
822       break;
823     case kCondLEZ:
824       CHECK_EQ(rt, ZERO);
825       Blezc(rs, imm16_21);
826       break;
827     case kCondGTZ:
828       CHECK_EQ(rt, ZERO);
829       Bgtzc(rs, imm16_21);
830       break;
831     case kCondEQ:
832       Beqc(rs, rt, imm16_21);
833       break;
834     case kCondNE:
835       Bnec(rs, rt, imm16_21);
836       break;
837     case kCondEQZ:
838       CHECK_EQ(rt, ZERO);
839       Beqzc(rs, imm16_21);
840       break;
841     case kCondNEZ:
842       CHECK_EQ(rt, ZERO);
843       Bnezc(rs, imm16_21);
844       break;
845     case kCondLTU:
846       Bltuc(rs, rt, imm16_21);
847       break;
848     case kCondGEU:
849       Bgeuc(rs, rt, imm16_21);
850       break;
851     case kCondF:
852       CHECK_EQ(rt, ZERO);
853       Bc1eqz(static_cast<FpuRegister>(rs), imm16_21);
854       break;
855     case kCondT:
856       CHECK_EQ(rt, ZERO);
857       Bc1nez(static_cast<FpuRegister>(rs), imm16_21);
858       break;
859     case kUncond:
860       LOG(FATAL) << "Unexpected branch condition " << cond;
861       UNREACHABLE();
862   }
863 }
864 
AddS(FpuRegister fd,FpuRegister fs,FpuRegister ft)865 void Mips64Assembler::AddS(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
866   EmitFR(0x11, 0x10, ft, fs, fd, 0x0);
867 }
868 
SubS(FpuRegister fd,FpuRegister fs,FpuRegister ft)869 void Mips64Assembler::SubS(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
870   EmitFR(0x11, 0x10, ft, fs, fd, 0x1);
871 }
872 
MulS(FpuRegister fd,FpuRegister fs,FpuRegister ft)873 void Mips64Assembler::MulS(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
874   EmitFR(0x11, 0x10, ft, fs, fd, 0x2);
875 }
876 
DivS(FpuRegister fd,FpuRegister fs,FpuRegister ft)877 void Mips64Assembler::DivS(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
878   EmitFR(0x11, 0x10, ft, fs, fd, 0x3);
879 }
880 
AddD(FpuRegister fd,FpuRegister fs,FpuRegister ft)881 void Mips64Assembler::AddD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
882   EmitFR(0x11, 0x11, ft, fs, fd, 0x0);
883 }
884 
SubD(FpuRegister fd,FpuRegister fs,FpuRegister ft)885 void Mips64Assembler::SubD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
886   EmitFR(0x11, 0x11, ft, fs, fd, 0x1);
887 }
888 
MulD(FpuRegister fd,FpuRegister fs,FpuRegister ft)889 void Mips64Assembler::MulD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
890   EmitFR(0x11, 0x11, ft, fs, fd, 0x2);
891 }
892 
DivD(FpuRegister fd,FpuRegister fs,FpuRegister ft)893 void Mips64Assembler::DivD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
894   EmitFR(0x11, 0x11, ft, fs, fd, 0x3);
895 }
896 
SqrtS(FpuRegister fd,FpuRegister fs)897 void Mips64Assembler::SqrtS(FpuRegister fd, FpuRegister fs) {
898   EmitFR(0x11, 0x10, static_cast<FpuRegister>(0), fs, fd, 0x4);
899 }
900 
SqrtD(FpuRegister fd,FpuRegister fs)901 void Mips64Assembler::SqrtD(FpuRegister fd, FpuRegister fs) {
902   EmitFR(0x11, 0x11, static_cast<FpuRegister>(0), fs, fd, 0x4);
903 }
904 
AbsS(FpuRegister fd,FpuRegister fs)905 void Mips64Assembler::AbsS(FpuRegister fd, FpuRegister fs) {
906   EmitFR(0x11, 0x10, static_cast<FpuRegister>(0), fs, fd, 0x5);
907 }
908 
AbsD(FpuRegister fd,FpuRegister fs)909 void Mips64Assembler::AbsD(FpuRegister fd, FpuRegister fs) {
910   EmitFR(0x11, 0x11, static_cast<FpuRegister>(0), fs, fd, 0x5);
911 }
912 
MovS(FpuRegister fd,FpuRegister fs)913 void Mips64Assembler::MovS(FpuRegister fd, FpuRegister fs) {
914   EmitFR(0x11, 0x10, static_cast<FpuRegister>(0), fs, fd, 0x6);
915 }
916 
MovD(FpuRegister fd,FpuRegister fs)917 void Mips64Assembler::MovD(FpuRegister fd, FpuRegister fs) {
918   EmitFR(0x11, 0x11, static_cast<FpuRegister>(0), fs, fd, 0x6);
919 }
920 
NegS(FpuRegister fd,FpuRegister fs)921 void Mips64Assembler::NegS(FpuRegister fd, FpuRegister fs) {
922   EmitFR(0x11, 0x10, static_cast<FpuRegister>(0), fs, fd, 0x7);
923 }
924 
NegD(FpuRegister fd,FpuRegister fs)925 void Mips64Assembler::NegD(FpuRegister fd, FpuRegister fs) {
926   EmitFR(0x11, 0x11, static_cast<FpuRegister>(0), fs, fd, 0x7);
927 }
928 
RoundLS(FpuRegister fd,FpuRegister fs)929 void Mips64Assembler::RoundLS(FpuRegister fd, FpuRegister fs) {
930   EmitFR(0x11, 0x10, static_cast<FpuRegister>(0), fs, fd, 0x8);
931 }
932 
RoundLD(FpuRegister fd,FpuRegister fs)933 void Mips64Assembler::RoundLD(FpuRegister fd, FpuRegister fs) {
934   EmitFR(0x11, 0x11, static_cast<FpuRegister>(0), fs, fd, 0x8);
935 }
936 
RoundWS(FpuRegister fd,FpuRegister fs)937 void Mips64Assembler::RoundWS(FpuRegister fd, FpuRegister fs) {
938   EmitFR(0x11, 0x10, static_cast<FpuRegister>(0), fs, fd, 0xc);
939 }
940 
RoundWD(FpuRegister fd,FpuRegister fs)941 void Mips64Assembler::RoundWD(FpuRegister fd, FpuRegister fs) {
942   EmitFR(0x11, 0x11, static_cast<FpuRegister>(0), fs, fd, 0xc);
943 }
944 
TruncLS(FpuRegister fd,FpuRegister fs)945 void Mips64Assembler::TruncLS(FpuRegister fd, FpuRegister fs) {
946   EmitFR(0x11, 0x10, static_cast<FpuRegister>(0), fs, fd, 0x9);
947 }
948 
TruncLD(FpuRegister fd,FpuRegister fs)949 void Mips64Assembler::TruncLD(FpuRegister fd, FpuRegister fs) {
950   EmitFR(0x11, 0x11, static_cast<FpuRegister>(0), fs, fd, 0x9);
951 }
952 
TruncWS(FpuRegister fd,FpuRegister fs)953 void Mips64Assembler::TruncWS(FpuRegister fd, FpuRegister fs) {
954   EmitFR(0x11, 0x10, static_cast<FpuRegister>(0), fs, fd, 0xd);
955 }
956 
TruncWD(FpuRegister fd,FpuRegister fs)957 void Mips64Assembler::TruncWD(FpuRegister fd, FpuRegister fs) {
958   EmitFR(0x11, 0x11, static_cast<FpuRegister>(0), fs, fd, 0xd);
959 }
960 
CeilLS(FpuRegister fd,FpuRegister fs)961 void Mips64Assembler::CeilLS(FpuRegister fd, FpuRegister fs) {
962   EmitFR(0x11, 0x10, static_cast<FpuRegister>(0), fs, fd, 0xa);
963 }
964 
CeilLD(FpuRegister fd,FpuRegister fs)965 void Mips64Assembler::CeilLD(FpuRegister fd, FpuRegister fs) {
966   EmitFR(0x11, 0x11, static_cast<FpuRegister>(0), fs, fd, 0xa);
967 }
968 
CeilWS(FpuRegister fd,FpuRegister fs)969 void Mips64Assembler::CeilWS(FpuRegister fd, FpuRegister fs) {
970   EmitFR(0x11, 0x10, static_cast<FpuRegister>(0), fs, fd, 0xe);
971 }
972 
CeilWD(FpuRegister fd,FpuRegister fs)973 void Mips64Assembler::CeilWD(FpuRegister fd, FpuRegister fs) {
974   EmitFR(0x11, 0x11, static_cast<FpuRegister>(0), fs, fd, 0xe);
975 }
976 
FloorLS(FpuRegister fd,FpuRegister fs)977 void Mips64Assembler::FloorLS(FpuRegister fd, FpuRegister fs) {
978   EmitFR(0x11, 0x10, static_cast<FpuRegister>(0), fs, fd, 0xb);
979 }
980 
FloorLD(FpuRegister fd,FpuRegister fs)981 void Mips64Assembler::FloorLD(FpuRegister fd, FpuRegister fs) {
982   EmitFR(0x11, 0x11, static_cast<FpuRegister>(0), fs, fd, 0xb);
983 }
984 
FloorWS(FpuRegister fd,FpuRegister fs)985 void Mips64Assembler::FloorWS(FpuRegister fd, FpuRegister fs) {
986   EmitFR(0x11, 0x10, static_cast<FpuRegister>(0), fs, fd, 0xf);
987 }
988 
FloorWD(FpuRegister fd,FpuRegister fs)989 void Mips64Assembler::FloorWD(FpuRegister fd, FpuRegister fs) {
990   EmitFR(0x11, 0x11, static_cast<FpuRegister>(0), fs, fd, 0xf);
991 }
992 
SelS(FpuRegister fd,FpuRegister fs,FpuRegister ft)993 void Mips64Assembler::SelS(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
994   EmitFR(0x11, 0x10, ft, fs, fd, 0x10);
995 }
996 
SelD(FpuRegister fd,FpuRegister fs,FpuRegister ft)997 void Mips64Assembler::SelD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
998   EmitFR(0x11, 0x11, ft, fs, fd, 0x10);
999 }
1000 
RintS(FpuRegister fd,FpuRegister fs)1001 void Mips64Assembler::RintS(FpuRegister fd, FpuRegister fs) {
1002   EmitFR(0x11, 0x10, static_cast<FpuRegister>(0), fs, fd, 0x1a);
1003 }
1004 
RintD(FpuRegister fd,FpuRegister fs)1005 void Mips64Assembler::RintD(FpuRegister fd, FpuRegister fs) {
1006   EmitFR(0x11, 0x11, static_cast<FpuRegister>(0), fs, fd, 0x1a);
1007 }
1008 
ClassS(FpuRegister fd,FpuRegister fs)1009 void Mips64Assembler::ClassS(FpuRegister fd, FpuRegister fs) {
1010   EmitFR(0x11, 0x10, static_cast<FpuRegister>(0), fs, fd, 0x1b);
1011 }
1012 
ClassD(FpuRegister fd,FpuRegister fs)1013 void Mips64Assembler::ClassD(FpuRegister fd, FpuRegister fs) {
1014   EmitFR(0x11, 0x11, static_cast<FpuRegister>(0), fs, fd, 0x1b);
1015 }
1016 
MinS(FpuRegister fd,FpuRegister fs,FpuRegister ft)1017 void Mips64Assembler::MinS(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
1018   EmitFR(0x11, 0x10, ft, fs, fd, 0x1c);
1019 }
1020 
MinD(FpuRegister fd,FpuRegister fs,FpuRegister ft)1021 void Mips64Assembler::MinD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
1022   EmitFR(0x11, 0x11, ft, fs, fd, 0x1c);
1023 }
1024 
MaxS(FpuRegister fd,FpuRegister fs,FpuRegister ft)1025 void Mips64Assembler::MaxS(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
1026   EmitFR(0x11, 0x10, ft, fs, fd, 0x1e);
1027 }
1028 
MaxD(FpuRegister fd,FpuRegister fs,FpuRegister ft)1029 void Mips64Assembler::MaxD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
1030   EmitFR(0x11, 0x11, ft, fs, fd, 0x1e);
1031 }
1032 
CmpUnS(FpuRegister fd,FpuRegister fs,FpuRegister ft)1033 void Mips64Assembler::CmpUnS(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
1034   EmitFR(0x11, 0x14, ft, fs, fd, 0x01);
1035 }
1036 
CmpEqS(FpuRegister fd,FpuRegister fs,FpuRegister ft)1037 void Mips64Assembler::CmpEqS(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
1038   EmitFR(0x11, 0x14, ft, fs, fd, 0x02);
1039 }
1040 
CmpUeqS(FpuRegister fd,FpuRegister fs,FpuRegister ft)1041 void Mips64Assembler::CmpUeqS(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
1042   EmitFR(0x11, 0x14, ft, fs, fd, 0x03);
1043 }
1044 
CmpLtS(FpuRegister fd,FpuRegister fs,FpuRegister ft)1045 void Mips64Assembler::CmpLtS(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
1046   EmitFR(0x11, 0x14, ft, fs, fd, 0x04);
1047 }
1048 
CmpUltS(FpuRegister fd,FpuRegister fs,FpuRegister ft)1049 void Mips64Assembler::CmpUltS(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
1050   EmitFR(0x11, 0x14, ft, fs, fd, 0x05);
1051 }
1052 
CmpLeS(FpuRegister fd,FpuRegister fs,FpuRegister ft)1053 void Mips64Assembler::CmpLeS(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
1054   EmitFR(0x11, 0x14, ft, fs, fd, 0x06);
1055 }
1056 
CmpUleS(FpuRegister fd,FpuRegister fs,FpuRegister ft)1057 void Mips64Assembler::CmpUleS(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
1058   EmitFR(0x11, 0x14, ft, fs, fd, 0x07);
1059 }
1060 
CmpOrS(FpuRegister fd,FpuRegister fs,FpuRegister ft)1061 void Mips64Assembler::CmpOrS(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
1062   EmitFR(0x11, 0x14, ft, fs, fd, 0x11);
1063 }
1064 
CmpUneS(FpuRegister fd,FpuRegister fs,FpuRegister ft)1065 void Mips64Assembler::CmpUneS(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
1066   EmitFR(0x11, 0x14, ft, fs, fd, 0x12);
1067 }
1068 
CmpNeS(FpuRegister fd,FpuRegister fs,FpuRegister ft)1069 void Mips64Assembler::CmpNeS(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
1070   EmitFR(0x11, 0x14, ft, fs, fd, 0x13);
1071 }
1072 
CmpUnD(FpuRegister fd,FpuRegister fs,FpuRegister ft)1073 void Mips64Assembler::CmpUnD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
1074   EmitFR(0x11, 0x15, ft, fs, fd, 0x01);
1075 }
1076 
CmpEqD(FpuRegister fd,FpuRegister fs,FpuRegister ft)1077 void Mips64Assembler::CmpEqD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
1078   EmitFR(0x11, 0x15, ft, fs, fd, 0x02);
1079 }
1080 
CmpUeqD(FpuRegister fd,FpuRegister fs,FpuRegister ft)1081 void Mips64Assembler::CmpUeqD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
1082   EmitFR(0x11, 0x15, ft, fs, fd, 0x03);
1083 }
1084 
CmpLtD(FpuRegister fd,FpuRegister fs,FpuRegister ft)1085 void Mips64Assembler::CmpLtD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
1086   EmitFR(0x11, 0x15, ft, fs, fd, 0x04);
1087 }
1088 
CmpUltD(FpuRegister fd,FpuRegister fs,FpuRegister ft)1089 void Mips64Assembler::CmpUltD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
1090   EmitFR(0x11, 0x15, ft, fs, fd, 0x05);
1091 }
1092 
CmpLeD(FpuRegister fd,FpuRegister fs,FpuRegister ft)1093 void Mips64Assembler::CmpLeD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
1094   EmitFR(0x11, 0x15, ft, fs, fd, 0x06);
1095 }
1096 
CmpUleD(FpuRegister fd,FpuRegister fs,FpuRegister ft)1097 void Mips64Assembler::CmpUleD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
1098   EmitFR(0x11, 0x15, ft, fs, fd, 0x07);
1099 }
1100 
CmpOrD(FpuRegister fd,FpuRegister fs,FpuRegister ft)1101 void Mips64Assembler::CmpOrD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
1102   EmitFR(0x11, 0x15, ft, fs, fd, 0x11);
1103 }
1104 
CmpUneD(FpuRegister fd,FpuRegister fs,FpuRegister ft)1105 void Mips64Assembler::CmpUneD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
1106   EmitFR(0x11, 0x15, ft, fs, fd, 0x12);
1107 }
1108 
CmpNeD(FpuRegister fd,FpuRegister fs,FpuRegister ft)1109 void Mips64Assembler::CmpNeD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
1110   EmitFR(0x11, 0x15, ft, fs, fd, 0x13);
1111 }
1112 
Cvtsw(FpuRegister fd,FpuRegister fs)1113 void Mips64Assembler::Cvtsw(FpuRegister fd, FpuRegister fs) {
1114   EmitFR(0x11, 0x14, static_cast<FpuRegister>(0), fs, fd, 0x20);
1115 }
1116 
Cvtdw(FpuRegister fd,FpuRegister fs)1117 void Mips64Assembler::Cvtdw(FpuRegister fd, FpuRegister fs) {
1118   EmitFR(0x11, 0x14, static_cast<FpuRegister>(0), fs, fd, 0x21);
1119 }
1120 
Cvtsd(FpuRegister fd,FpuRegister fs)1121 void Mips64Assembler::Cvtsd(FpuRegister fd, FpuRegister fs) {
1122   EmitFR(0x11, 0x11, static_cast<FpuRegister>(0), fs, fd, 0x20);
1123 }
1124 
Cvtds(FpuRegister fd,FpuRegister fs)1125 void Mips64Assembler::Cvtds(FpuRegister fd, FpuRegister fs) {
1126   EmitFR(0x11, 0x10, static_cast<FpuRegister>(0), fs, fd, 0x21);
1127 }
1128 
Cvtsl(FpuRegister fd,FpuRegister fs)1129 void Mips64Assembler::Cvtsl(FpuRegister fd, FpuRegister fs) {
1130   EmitFR(0x11, 0x15, static_cast<FpuRegister>(0), fs, fd, 0x20);
1131 }
1132 
Cvtdl(FpuRegister fd,FpuRegister fs)1133 void Mips64Assembler::Cvtdl(FpuRegister fd, FpuRegister fs) {
1134   EmitFR(0x11, 0x15, static_cast<FpuRegister>(0), fs, fd, 0x21);
1135 }
1136 
Mfc1(GpuRegister rt,FpuRegister fs)1137 void Mips64Assembler::Mfc1(GpuRegister rt, FpuRegister fs) {
1138   EmitFR(0x11, 0x00, static_cast<FpuRegister>(rt), fs, static_cast<FpuRegister>(0), 0x0);
1139 }
1140 
Mfhc1(GpuRegister rt,FpuRegister fs)1141 void Mips64Assembler::Mfhc1(GpuRegister rt, FpuRegister fs) {
1142   EmitFR(0x11, 0x03, static_cast<FpuRegister>(rt), fs, static_cast<FpuRegister>(0), 0x0);
1143 }
1144 
Mtc1(GpuRegister rt,FpuRegister fs)1145 void Mips64Assembler::Mtc1(GpuRegister rt, FpuRegister fs) {
1146   EmitFR(0x11, 0x04, static_cast<FpuRegister>(rt), fs, static_cast<FpuRegister>(0), 0x0);
1147 }
1148 
Mthc1(GpuRegister rt,FpuRegister fs)1149 void Mips64Assembler::Mthc1(GpuRegister rt, FpuRegister fs) {
1150   EmitFR(0x11, 0x07, static_cast<FpuRegister>(rt), fs, static_cast<FpuRegister>(0), 0x0);
1151 }
1152 
Dmfc1(GpuRegister rt,FpuRegister fs)1153 void Mips64Assembler::Dmfc1(GpuRegister rt, FpuRegister fs) {
1154   EmitFR(0x11, 0x01, static_cast<FpuRegister>(rt), fs, static_cast<FpuRegister>(0), 0x0);
1155 }
1156 
Dmtc1(GpuRegister rt,FpuRegister fs)1157 void Mips64Assembler::Dmtc1(GpuRegister rt, FpuRegister fs) {
1158   EmitFR(0x11, 0x05, static_cast<FpuRegister>(rt), fs, static_cast<FpuRegister>(0), 0x0);
1159 }
1160 
Lwc1(FpuRegister ft,GpuRegister rs,uint16_t imm16)1161 void Mips64Assembler::Lwc1(FpuRegister ft, GpuRegister rs, uint16_t imm16) {
1162   EmitI(0x31, rs, static_cast<GpuRegister>(ft), imm16);
1163 }
1164 
Ldc1(FpuRegister ft,GpuRegister rs,uint16_t imm16)1165 void Mips64Assembler::Ldc1(FpuRegister ft, GpuRegister rs, uint16_t imm16) {
1166   EmitI(0x35, rs, static_cast<GpuRegister>(ft), imm16);
1167 }
1168 
Swc1(FpuRegister ft,GpuRegister rs,uint16_t imm16)1169 void Mips64Assembler::Swc1(FpuRegister ft, GpuRegister rs, uint16_t imm16) {
1170   EmitI(0x39, rs, static_cast<GpuRegister>(ft), imm16);
1171 }
1172 
Sdc1(FpuRegister ft,GpuRegister rs,uint16_t imm16)1173 void Mips64Assembler::Sdc1(FpuRegister ft, GpuRegister rs, uint16_t imm16) {
1174   EmitI(0x3d, rs, static_cast<GpuRegister>(ft), imm16);
1175 }
1176 
Break()1177 void Mips64Assembler::Break() {
1178   EmitR(0, static_cast<GpuRegister>(0), static_cast<GpuRegister>(0),
1179         static_cast<GpuRegister>(0), 0, 0xD);
1180 }
1181 
Nop()1182 void Mips64Assembler::Nop() {
1183   EmitR(0x0, static_cast<GpuRegister>(0), static_cast<GpuRegister>(0),
1184         static_cast<GpuRegister>(0), 0, 0x0);
1185 }
1186 
Move(GpuRegister rd,GpuRegister rs)1187 void Mips64Assembler::Move(GpuRegister rd, GpuRegister rs) {
1188   Or(rd, rs, ZERO);
1189 }
1190 
Clear(GpuRegister rd)1191 void Mips64Assembler::Clear(GpuRegister rd) {
1192   Move(rd, ZERO);
1193 }
1194 
Not(GpuRegister rd,GpuRegister rs)1195 void Mips64Assembler::Not(GpuRegister rd, GpuRegister rs) {
1196   Nor(rd, rs, ZERO);
1197 }
1198 
AndV(VectorRegister wd,VectorRegister ws,VectorRegister wt)1199 void Mips64Assembler::AndV(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1200   CHECK(HasMsa());
1201   EmitMsa3R(0x0, 0x0, wt, ws, wd, 0x1e);
1202 }
1203 
OrV(VectorRegister wd,VectorRegister ws,VectorRegister wt)1204 void Mips64Assembler::OrV(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1205   CHECK(HasMsa());
1206   EmitMsa3R(0x0, 0x1, wt, ws, wd, 0x1e);
1207 }
1208 
NorV(VectorRegister wd,VectorRegister ws,VectorRegister wt)1209 void Mips64Assembler::NorV(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1210   CHECK(HasMsa());
1211   EmitMsa3R(0x0, 0x2, wt, ws, wd, 0x1e);
1212 }
1213 
XorV(VectorRegister wd,VectorRegister ws,VectorRegister wt)1214 void Mips64Assembler::XorV(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1215   CHECK(HasMsa());
1216   EmitMsa3R(0x0, 0x3, wt, ws, wd, 0x1e);
1217 }
1218 
AddvB(VectorRegister wd,VectorRegister ws,VectorRegister wt)1219 void Mips64Assembler::AddvB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1220   CHECK(HasMsa());
1221   EmitMsa3R(0x0, 0x0, wt, ws, wd, 0xe);
1222 }
1223 
AddvH(VectorRegister wd,VectorRegister ws,VectorRegister wt)1224 void Mips64Assembler::AddvH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1225   CHECK(HasMsa());
1226   EmitMsa3R(0x0, 0x1, wt, ws, wd, 0xe);
1227 }
1228 
AddvW(VectorRegister wd,VectorRegister ws,VectorRegister wt)1229 void Mips64Assembler::AddvW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1230   CHECK(HasMsa());
1231   EmitMsa3R(0x0, 0x2, wt, ws, wd, 0xe);
1232 }
1233 
AddvD(VectorRegister wd,VectorRegister ws,VectorRegister wt)1234 void Mips64Assembler::AddvD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1235   CHECK(HasMsa());
1236   EmitMsa3R(0x0, 0x3, wt, ws, wd, 0xe);
1237 }
1238 
SubvB(VectorRegister wd,VectorRegister ws,VectorRegister wt)1239 void Mips64Assembler::SubvB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1240   CHECK(HasMsa());
1241   EmitMsa3R(0x1, 0x0, wt, ws, wd, 0xe);
1242 }
1243 
SubvH(VectorRegister wd,VectorRegister ws,VectorRegister wt)1244 void Mips64Assembler::SubvH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1245   CHECK(HasMsa());
1246   EmitMsa3R(0x1, 0x1, wt, ws, wd, 0xe);
1247 }
1248 
SubvW(VectorRegister wd,VectorRegister ws,VectorRegister wt)1249 void Mips64Assembler::SubvW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1250   CHECK(HasMsa());
1251   EmitMsa3R(0x1, 0x2, wt, ws, wd, 0xe);
1252 }
1253 
SubvD(VectorRegister wd,VectorRegister ws,VectorRegister wt)1254 void Mips64Assembler::SubvD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1255   CHECK(HasMsa());
1256   EmitMsa3R(0x1, 0x3, wt, ws, wd, 0xe);
1257 }
1258 
MulvB(VectorRegister wd,VectorRegister ws,VectorRegister wt)1259 void Mips64Assembler::MulvB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1260   CHECK(HasMsa());
1261   EmitMsa3R(0x0, 0x0, wt, ws, wd, 0x12);
1262 }
1263 
MulvH(VectorRegister wd,VectorRegister ws,VectorRegister wt)1264 void Mips64Assembler::MulvH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1265   CHECK(HasMsa());
1266   EmitMsa3R(0x0, 0x1, wt, ws, wd, 0x12);
1267 }
1268 
MulvW(VectorRegister wd,VectorRegister ws,VectorRegister wt)1269 void Mips64Assembler::MulvW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1270   CHECK(HasMsa());
1271   EmitMsa3R(0x0, 0x2, wt, ws, wd, 0x12);
1272 }
1273 
MulvD(VectorRegister wd,VectorRegister ws,VectorRegister wt)1274 void Mips64Assembler::MulvD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1275   CHECK(HasMsa());
1276   EmitMsa3R(0x0, 0x3, wt, ws, wd, 0x12);
1277 }
1278 
Div_sB(VectorRegister wd,VectorRegister ws,VectorRegister wt)1279 void Mips64Assembler::Div_sB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1280   CHECK(HasMsa());
1281   EmitMsa3R(0x4, 0x0, wt, ws, wd, 0x12);
1282 }
1283 
Div_sH(VectorRegister wd,VectorRegister ws,VectorRegister wt)1284 void Mips64Assembler::Div_sH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1285   CHECK(HasMsa());
1286   EmitMsa3R(0x4, 0x1, wt, ws, wd, 0x12);
1287 }
1288 
Div_sW(VectorRegister wd,VectorRegister ws,VectorRegister wt)1289 void Mips64Assembler::Div_sW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1290   CHECK(HasMsa());
1291   EmitMsa3R(0x4, 0x2, wt, ws, wd, 0x12);
1292 }
1293 
Div_sD(VectorRegister wd,VectorRegister ws,VectorRegister wt)1294 void Mips64Assembler::Div_sD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1295   CHECK(HasMsa());
1296   EmitMsa3R(0x4, 0x3, wt, ws, wd, 0x12);
1297 }
1298 
Div_uB(VectorRegister wd,VectorRegister ws,VectorRegister wt)1299 void Mips64Assembler::Div_uB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1300   CHECK(HasMsa());
1301   EmitMsa3R(0x5, 0x0, wt, ws, wd, 0x12);
1302 }
1303 
Div_uH(VectorRegister wd,VectorRegister ws,VectorRegister wt)1304 void Mips64Assembler::Div_uH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1305   CHECK(HasMsa());
1306   EmitMsa3R(0x5, 0x1, wt, ws, wd, 0x12);
1307 }
1308 
Div_uW(VectorRegister wd,VectorRegister ws,VectorRegister wt)1309 void Mips64Assembler::Div_uW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1310   CHECK(HasMsa());
1311   EmitMsa3R(0x5, 0x2, wt, ws, wd, 0x12);
1312 }
1313 
Div_uD(VectorRegister wd,VectorRegister ws,VectorRegister wt)1314 void Mips64Assembler::Div_uD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1315   CHECK(HasMsa());
1316   EmitMsa3R(0x5, 0x3, wt, ws, wd, 0x12);
1317 }
1318 
Mod_sB(VectorRegister wd,VectorRegister ws,VectorRegister wt)1319 void Mips64Assembler::Mod_sB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1320   CHECK(HasMsa());
1321   EmitMsa3R(0x6, 0x0, wt, ws, wd, 0x12);
1322 }
1323 
Mod_sH(VectorRegister wd,VectorRegister ws,VectorRegister wt)1324 void Mips64Assembler::Mod_sH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1325   CHECK(HasMsa());
1326   EmitMsa3R(0x6, 0x1, wt, ws, wd, 0x12);
1327 }
1328 
Mod_sW(VectorRegister wd,VectorRegister ws,VectorRegister wt)1329 void Mips64Assembler::Mod_sW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1330   CHECK(HasMsa());
1331   EmitMsa3R(0x6, 0x2, wt, ws, wd, 0x12);
1332 }
1333 
Mod_sD(VectorRegister wd,VectorRegister ws,VectorRegister wt)1334 void Mips64Assembler::Mod_sD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1335   CHECK(HasMsa());
1336   EmitMsa3R(0x6, 0x3, wt, ws, wd, 0x12);
1337 }
1338 
Mod_uB(VectorRegister wd,VectorRegister ws,VectorRegister wt)1339 void Mips64Assembler::Mod_uB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1340   CHECK(HasMsa());
1341   EmitMsa3R(0x7, 0x0, wt, ws, wd, 0x12);
1342 }
1343 
Mod_uH(VectorRegister wd,VectorRegister ws,VectorRegister wt)1344 void Mips64Assembler::Mod_uH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1345   CHECK(HasMsa());
1346   EmitMsa3R(0x7, 0x1, wt, ws, wd, 0x12);
1347 }
1348 
Mod_uW(VectorRegister wd,VectorRegister ws,VectorRegister wt)1349 void Mips64Assembler::Mod_uW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1350   CHECK(HasMsa());
1351   EmitMsa3R(0x7, 0x2, wt, ws, wd, 0x12);
1352 }
1353 
Mod_uD(VectorRegister wd,VectorRegister ws,VectorRegister wt)1354 void Mips64Assembler::Mod_uD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1355   CHECK(HasMsa());
1356   EmitMsa3R(0x7, 0x3, wt, ws, wd, 0x12);
1357 }
1358 
Add_aB(VectorRegister wd,VectorRegister ws,VectorRegister wt)1359 void Mips64Assembler::Add_aB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1360   CHECK(HasMsa());
1361   EmitMsa3R(0x0, 0x0, wt, ws, wd, 0x10);
1362 }
1363 
Add_aH(VectorRegister wd,VectorRegister ws,VectorRegister wt)1364 void Mips64Assembler::Add_aH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1365   CHECK(HasMsa());
1366   EmitMsa3R(0x0, 0x1, wt, ws, wd, 0x10);
1367 }
1368 
Add_aW(VectorRegister wd,VectorRegister ws,VectorRegister wt)1369 void Mips64Assembler::Add_aW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1370   CHECK(HasMsa());
1371   EmitMsa3R(0x0, 0x2, wt, ws, wd, 0x10);
1372 }
1373 
Add_aD(VectorRegister wd,VectorRegister ws,VectorRegister wt)1374 void Mips64Assembler::Add_aD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1375   CHECK(HasMsa());
1376   EmitMsa3R(0x0, 0x3, wt, ws, wd, 0x10);
1377 }
1378 
Ave_sB(VectorRegister wd,VectorRegister ws,VectorRegister wt)1379 void Mips64Assembler::Ave_sB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1380   CHECK(HasMsa());
1381   EmitMsa3R(0x4, 0x0, wt, ws, wd, 0x10);
1382 }
1383 
Ave_sH(VectorRegister wd,VectorRegister ws,VectorRegister wt)1384 void Mips64Assembler::Ave_sH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1385   CHECK(HasMsa());
1386   EmitMsa3R(0x4, 0x1, wt, ws, wd, 0x10);
1387 }
1388 
Ave_sW(VectorRegister wd,VectorRegister ws,VectorRegister wt)1389 void Mips64Assembler::Ave_sW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1390   CHECK(HasMsa());
1391   EmitMsa3R(0x4, 0x2, wt, ws, wd, 0x10);
1392 }
1393 
Ave_sD(VectorRegister wd,VectorRegister ws,VectorRegister wt)1394 void Mips64Assembler::Ave_sD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1395   CHECK(HasMsa());
1396   EmitMsa3R(0x4, 0x3, wt, ws, wd, 0x10);
1397 }
1398 
Ave_uB(VectorRegister wd,VectorRegister ws,VectorRegister wt)1399 void Mips64Assembler::Ave_uB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1400   CHECK(HasMsa());
1401   EmitMsa3R(0x5, 0x0, wt, ws, wd, 0x10);
1402 }
1403 
Ave_uH(VectorRegister wd,VectorRegister ws,VectorRegister wt)1404 void Mips64Assembler::Ave_uH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1405   CHECK(HasMsa());
1406   EmitMsa3R(0x5, 0x1, wt, ws, wd, 0x10);
1407 }
1408 
Ave_uW(VectorRegister wd,VectorRegister ws,VectorRegister wt)1409 void Mips64Assembler::Ave_uW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1410   CHECK(HasMsa());
1411   EmitMsa3R(0x5, 0x2, wt, ws, wd, 0x10);
1412 }
1413 
Ave_uD(VectorRegister wd,VectorRegister ws,VectorRegister wt)1414 void Mips64Assembler::Ave_uD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1415   CHECK(HasMsa());
1416   EmitMsa3R(0x5, 0x3, wt, ws, wd, 0x10);
1417 }
1418 
Aver_sB(VectorRegister wd,VectorRegister ws,VectorRegister wt)1419 void Mips64Assembler::Aver_sB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1420   CHECK(HasMsa());
1421   EmitMsa3R(0x6, 0x0, wt, ws, wd, 0x10);
1422 }
1423 
Aver_sH(VectorRegister wd,VectorRegister ws,VectorRegister wt)1424 void Mips64Assembler::Aver_sH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1425   CHECK(HasMsa());
1426   EmitMsa3R(0x6, 0x1, wt, ws, wd, 0x10);
1427 }
1428 
Aver_sW(VectorRegister wd,VectorRegister ws,VectorRegister wt)1429 void Mips64Assembler::Aver_sW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1430   CHECK(HasMsa());
1431   EmitMsa3R(0x6, 0x2, wt, ws, wd, 0x10);
1432 }
1433 
Aver_sD(VectorRegister wd,VectorRegister ws,VectorRegister wt)1434 void Mips64Assembler::Aver_sD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1435   CHECK(HasMsa());
1436   EmitMsa3R(0x6, 0x3, wt, ws, wd, 0x10);
1437 }
1438 
Aver_uB(VectorRegister wd,VectorRegister ws,VectorRegister wt)1439 void Mips64Assembler::Aver_uB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1440   CHECK(HasMsa());
1441   EmitMsa3R(0x7, 0x0, wt, ws, wd, 0x10);
1442 }
1443 
Aver_uH(VectorRegister wd,VectorRegister ws,VectorRegister wt)1444 void Mips64Assembler::Aver_uH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1445   CHECK(HasMsa());
1446   EmitMsa3R(0x7, 0x1, wt, ws, wd, 0x10);
1447 }
1448 
Aver_uW(VectorRegister wd,VectorRegister ws,VectorRegister wt)1449 void Mips64Assembler::Aver_uW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1450   CHECK(HasMsa());
1451   EmitMsa3R(0x7, 0x2, wt, ws, wd, 0x10);
1452 }
1453 
Aver_uD(VectorRegister wd,VectorRegister ws,VectorRegister wt)1454 void Mips64Assembler::Aver_uD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1455   CHECK(HasMsa());
1456   EmitMsa3R(0x7, 0x3, wt, ws, wd, 0x10);
1457 }
1458 
Max_sB(VectorRegister wd,VectorRegister ws,VectorRegister wt)1459 void Mips64Assembler::Max_sB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1460   CHECK(HasMsa());
1461   EmitMsa3R(0x2, 0x0, wt, ws, wd, 0xe);
1462 }
1463 
Max_sH(VectorRegister wd,VectorRegister ws,VectorRegister wt)1464 void Mips64Assembler::Max_sH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1465   CHECK(HasMsa());
1466   EmitMsa3R(0x2, 0x1, wt, ws, wd, 0xe);
1467 }
1468 
Max_sW(VectorRegister wd,VectorRegister ws,VectorRegister wt)1469 void Mips64Assembler::Max_sW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1470   CHECK(HasMsa());
1471   EmitMsa3R(0x2, 0x2, wt, ws, wd, 0xe);
1472 }
1473 
Max_sD(VectorRegister wd,VectorRegister ws,VectorRegister wt)1474 void Mips64Assembler::Max_sD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1475   CHECK(HasMsa());
1476   EmitMsa3R(0x2, 0x3, wt, ws, wd, 0xe);
1477 }
1478 
Max_uB(VectorRegister wd,VectorRegister ws,VectorRegister wt)1479 void Mips64Assembler::Max_uB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1480   CHECK(HasMsa());
1481   EmitMsa3R(0x3, 0x0, wt, ws, wd, 0xe);
1482 }
1483 
Max_uH(VectorRegister wd,VectorRegister ws,VectorRegister wt)1484 void Mips64Assembler::Max_uH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1485   CHECK(HasMsa());
1486   EmitMsa3R(0x3, 0x1, wt, ws, wd, 0xe);
1487 }
1488 
Max_uW(VectorRegister wd,VectorRegister ws,VectorRegister wt)1489 void Mips64Assembler::Max_uW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1490   CHECK(HasMsa());
1491   EmitMsa3R(0x3, 0x2, wt, ws, wd, 0xe);
1492 }
1493 
Max_uD(VectorRegister wd,VectorRegister ws,VectorRegister wt)1494 void Mips64Assembler::Max_uD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1495   CHECK(HasMsa());
1496   EmitMsa3R(0x3, 0x3, wt, ws, wd, 0xe);
1497 }
1498 
Min_sB(VectorRegister wd,VectorRegister ws,VectorRegister wt)1499 void Mips64Assembler::Min_sB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1500   CHECK(HasMsa());
1501   EmitMsa3R(0x4, 0x0, wt, ws, wd, 0xe);
1502 }
1503 
Min_sH(VectorRegister wd,VectorRegister ws,VectorRegister wt)1504 void Mips64Assembler::Min_sH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1505   CHECK(HasMsa());
1506   EmitMsa3R(0x4, 0x1, wt, ws, wd, 0xe);
1507 }
1508 
Min_sW(VectorRegister wd,VectorRegister ws,VectorRegister wt)1509 void Mips64Assembler::Min_sW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1510   CHECK(HasMsa());
1511   EmitMsa3R(0x4, 0x2, wt, ws, wd, 0xe);
1512 }
1513 
Min_sD(VectorRegister wd,VectorRegister ws,VectorRegister wt)1514 void Mips64Assembler::Min_sD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1515   CHECK(HasMsa());
1516   EmitMsa3R(0x4, 0x3, wt, ws, wd, 0xe);
1517 }
1518 
Min_uB(VectorRegister wd,VectorRegister ws,VectorRegister wt)1519 void Mips64Assembler::Min_uB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1520   CHECK(HasMsa());
1521   EmitMsa3R(0x5, 0x0, wt, ws, wd, 0xe);
1522 }
1523 
Min_uH(VectorRegister wd,VectorRegister ws,VectorRegister wt)1524 void Mips64Assembler::Min_uH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1525   CHECK(HasMsa());
1526   EmitMsa3R(0x5, 0x1, wt, ws, wd, 0xe);
1527 }
1528 
Min_uW(VectorRegister wd,VectorRegister ws,VectorRegister wt)1529 void Mips64Assembler::Min_uW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1530   CHECK(HasMsa());
1531   EmitMsa3R(0x5, 0x2, wt, ws, wd, 0xe);
1532 }
1533 
Min_uD(VectorRegister wd,VectorRegister ws,VectorRegister wt)1534 void Mips64Assembler::Min_uD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1535   CHECK(HasMsa());
1536   EmitMsa3R(0x5, 0x3, wt, ws, wd, 0xe);
1537 }
1538 
FaddW(VectorRegister wd,VectorRegister ws,VectorRegister wt)1539 void Mips64Assembler::FaddW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1540   CHECK(HasMsa());
1541   EmitMsa3R(0x0, 0x0, wt, ws, wd, 0x1b);
1542 }
1543 
FaddD(VectorRegister wd,VectorRegister ws,VectorRegister wt)1544 void Mips64Assembler::FaddD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1545   CHECK(HasMsa());
1546   EmitMsa3R(0x0, 0x1, wt, ws, wd, 0x1b);
1547 }
1548 
FsubW(VectorRegister wd,VectorRegister ws,VectorRegister wt)1549 void Mips64Assembler::FsubW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1550   CHECK(HasMsa());
1551   EmitMsa3R(0x0, 0x2, wt, ws, wd, 0x1b);
1552 }
1553 
FsubD(VectorRegister wd,VectorRegister ws,VectorRegister wt)1554 void Mips64Assembler::FsubD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1555   CHECK(HasMsa());
1556   EmitMsa3R(0x0, 0x3, wt, ws, wd, 0x1b);
1557 }
1558 
FmulW(VectorRegister wd,VectorRegister ws,VectorRegister wt)1559 void Mips64Assembler::FmulW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1560   CHECK(HasMsa());
1561   EmitMsa3R(0x1, 0x0, wt, ws, wd, 0x1b);
1562 }
1563 
FmulD(VectorRegister wd,VectorRegister ws,VectorRegister wt)1564 void Mips64Assembler::FmulD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1565   CHECK(HasMsa());
1566   EmitMsa3R(0x1, 0x1, wt, ws, wd, 0x1b);
1567 }
1568 
FdivW(VectorRegister wd,VectorRegister ws,VectorRegister wt)1569 void Mips64Assembler::FdivW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1570   CHECK(HasMsa());
1571   EmitMsa3R(0x1, 0x2, wt, ws, wd, 0x1b);
1572 }
1573 
FdivD(VectorRegister wd,VectorRegister ws,VectorRegister wt)1574 void Mips64Assembler::FdivD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1575   CHECK(HasMsa());
1576   EmitMsa3R(0x1, 0x3, wt, ws, wd, 0x1b);
1577 }
1578 
FmaxW(VectorRegister wd,VectorRegister ws,VectorRegister wt)1579 void Mips64Assembler::FmaxW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1580   CHECK(HasMsa());
1581   EmitMsa3R(0x7, 0x0, wt, ws, wd, 0x1b);
1582 }
1583 
FmaxD(VectorRegister wd,VectorRegister ws,VectorRegister wt)1584 void Mips64Assembler::FmaxD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1585   CHECK(HasMsa());
1586   EmitMsa3R(0x7, 0x1, wt, ws, wd, 0x1b);
1587 }
1588 
FminW(VectorRegister wd,VectorRegister ws,VectorRegister wt)1589 void Mips64Assembler::FminW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1590   CHECK(HasMsa());
1591   EmitMsa3R(0x6, 0x0, wt, ws, wd, 0x1b);
1592 }
1593 
FminD(VectorRegister wd,VectorRegister ws,VectorRegister wt)1594 void Mips64Assembler::FminD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1595   CHECK(HasMsa());
1596   EmitMsa3R(0x6, 0x1, wt, ws, wd, 0x1b);
1597 }
1598 
Ffint_sW(VectorRegister wd,VectorRegister ws)1599 void Mips64Assembler::Ffint_sW(VectorRegister wd, VectorRegister ws) {
1600   CHECK(HasMsa());
1601   EmitMsa2RF(0x19e, 0x0, ws, wd, 0x1e);
1602 }
1603 
Ffint_sD(VectorRegister wd,VectorRegister ws)1604 void Mips64Assembler::Ffint_sD(VectorRegister wd, VectorRegister ws) {
1605   CHECK(HasMsa());
1606   EmitMsa2RF(0x19e, 0x1, ws, wd, 0x1e);
1607 }
1608 
Ftint_sW(VectorRegister wd,VectorRegister ws)1609 void Mips64Assembler::Ftint_sW(VectorRegister wd, VectorRegister ws) {
1610   CHECK(HasMsa());
1611   EmitMsa2RF(0x19c, 0x0, ws, wd, 0x1e);
1612 }
1613 
Ftint_sD(VectorRegister wd,VectorRegister ws)1614 void Mips64Assembler::Ftint_sD(VectorRegister wd, VectorRegister ws) {
1615   CHECK(HasMsa());
1616   EmitMsa2RF(0x19c, 0x1, ws, wd, 0x1e);
1617 }
1618 
SllB(VectorRegister wd,VectorRegister ws,VectorRegister wt)1619 void Mips64Assembler::SllB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1620   CHECK(HasMsa());
1621   EmitMsa3R(0x0, 0x0, wt, ws, wd, 0xd);
1622 }
1623 
SllH(VectorRegister wd,VectorRegister ws,VectorRegister wt)1624 void Mips64Assembler::SllH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1625   CHECK(HasMsa());
1626   EmitMsa3R(0x0, 0x1, wt, ws, wd, 0xd);
1627 }
1628 
SllW(VectorRegister wd,VectorRegister ws,VectorRegister wt)1629 void Mips64Assembler::SllW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1630   CHECK(HasMsa());
1631   EmitMsa3R(0x0, 0x2, wt, ws, wd, 0xd);
1632 }
1633 
SllD(VectorRegister wd,VectorRegister ws,VectorRegister wt)1634 void Mips64Assembler::SllD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1635   CHECK(HasMsa());
1636   EmitMsa3R(0x0, 0x3, wt, ws, wd, 0xd);
1637 }
1638 
SraB(VectorRegister wd,VectorRegister ws,VectorRegister wt)1639 void Mips64Assembler::SraB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1640   CHECK(HasMsa());
1641   EmitMsa3R(0x1, 0x0, wt, ws, wd, 0xd);
1642 }
1643 
SraH(VectorRegister wd,VectorRegister ws,VectorRegister wt)1644 void Mips64Assembler::SraH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1645   CHECK(HasMsa());
1646   EmitMsa3R(0x1, 0x1, wt, ws, wd, 0xd);
1647 }
1648 
SraW(VectorRegister wd,VectorRegister ws,VectorRegister wt)1649 void Mips64Assembler::SraW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1650   CHECK(HasMsa());
1651   EmitMsa3R(0x1, 0x2, wt, ws, wd, 0xd);
1652 }
1653 
SraD(VectorRegister wd,VectorRegister ws,VectorRegister wt)1654 void Mips64Assembler::SraD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1655   CHECK(HasMsa());
1656   EmitMsa3R(0x1, 0x3, wt, ws, wd, 0xd);
1657 }
1658 
SrlB(VectorRegister wd,VectorRegister ws,VectorRegister wt)1659 void Mips64Assembler::SrlB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1660   CHECK(HasMsa());
1661   EmitMsa3R(0x2, 0x0, wt, ws, wd, 0xd);
1662 }
1663 
SrlH(VectorRegister wd,VectorRegister ws,VectorRegister wt)1664 void Mips64Assembler::SrlH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1665   CHECK(HasMsa());
1666   EmitMsa3R(0x2, 0x1, wt, ws, wd, 0xd);
1667 }
1668 
SrlW(VectorRegister wd,VectorRegister ws,VectorRegister wt)1669 void Mips64Assembler::SrlW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1670   CHECK(HasMsa());
1671   EmitMsa3R(0x2, 0x2, wt, ws, wd, 0xd);
1672 }
1673 
SrlD(VectorRegister wd,VectorRegister ws,VectorRegister wt)1674 void Mips64Assembler::SrlD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1675   CHECK(HasMsa());
1676   EmitMsa3R(0x2, 0x3, wt, ws, wd, 0xd);
1677 }
1678 
SlliB(VectorRegister wd,VectorRegister ws,int shamt3)1679 void Mips64Assembler::SlliB(VectorRegister wd, VectorRegister ws, int shamt3) {
1680   CHECK(HasMsa());
1681   CHECK(IsUint<3>(shamt3)) << shamt3;
1682   EmitMsaBIT(0x0, shamt3 | kMsaDfMByteMask, ws, wd, 0x9);
1683 }
1684 
SlliH(VectorRegister wd,VectorRegister ws,int shamt4)1685 void Mips64Assembler::SlliH(VectorRegister wd, VectorRegister ws, int shamt4) {
1686   CHECK(HasMsa());
1687   CHECK(IsUint<4>(shamt4)) << shamt4;
1688   EmitMsaBIT(0x0, shamt4 | kMsaDfMHalfwordMask, ws, wd, 0x9);
1689 }
1690 
SlliW(VectorRegister wd,VectorRegister ws,int shamt5)1691 void Mips64Assembler::SlliW(VectorRegister wd, VectorRegister ws, int shamt5) {
1692   CHECK(HasMsa());
1693   CHECK(IsUint<5>(shamt5)) << shamt5;
1694   EmitMsaBIT(0x0, shamt5 | kMsaDfMWordMask, ws, wd, 0x9);
1695 }
1696 
SlliD(VectorRegister wd,VectorRegister ws,int shamt6)1697 void Mips64Assembler::SlliD(VectorRegister wd, VectorRegister ws, int shamt6) {
1698   CHECK(HasMsa());
1699   CHECK(IsUint<6>(shamt6)) << shamt6;
1700   EmitMsaBIT(0x0, shamt6 | kMsaDfMDoublewordMask, ws, wd, 0x9);
1701 }
1702 
SraiB(VectorRegister wd,VectorRegister ws,int shamt3)1703 void Mips64Assembler::SraiB(VectorRegister wd, VectorRegister ws, int shamt3) {
1704   CHECK(HasMsa());
1705   CHECK(IsUint<3>(shamt3)) << shamt3;
1706   EmitMsaBIT(0x1, shamt3 | kMsaDfMByteMask, ws, wd, 0x9);
1707 }
1708 
SraiH(VectorRegister wd,VectorRegister ws,int shamt4)1709 void Mips64Assembler::SraiH(VectorRegister wd, VectorRegister ws, int shamt4) {
1710   CHECK(HasMsa());
1711   CHECK(IsUint<4>(shamt4)) << shamt4;
1712   EmitMsaBIT(0x1, shamt4 | kMsaDfMHalfwordMask, ws, wd, 0x9);
1713 }
1714 
SraiW(VectorRegister wd,VectorRegister ws,int shamt5)1715 void Mips64Assembler::SraiW(VectorRegister wd, VectorRegister ws, int shamt5) {
1716   CHECK(HasMsa());
1717   CHECK(IsUint<5>(shamt5)) << shamt5;
1718   EmitMsaBIT(0x1, shamt5 | kMsaDfMWordMask, ws, wd, 0x9);
1719 }
1720 
SraiD(VectorRegister wd,VectorRegister ws,int shamt6)1721 void Mips64Assembler::SraiD(VectorRegister wd, VectorRegister ws, int shamt6) {
1722   CHECK(HasMsa());
1723   CHECK(IsUint<6>(shamt6)) << shamt6;
1724   EmitMsaBIT(0x1, shamt6 | kMsaDfMDoublewordMask, ws, wd, 0x9);
1725 }
1726 
SrliB(VectorRegister wd,VectorRegister ws,int shamt3)1727 void Mips64Assembler::SrliB(VectorRegister wd, VectorRegister ws, int shamt3) {
1728   CHECK(HasMsa());
1729   CHECK(IsUint<3>(shamt3)) << shamt3;
1730   EmitMsaBIT(0x2, shamt3 | kMsaDfMByteMask, ws, wd, 0x9);
1731 }
1732 
SrliH(VectorRegister wd,VectorRegister ws,int shamt4)1733 void Mips64Assembler::SrliH(VectorRegister wd, VectorRegister ws, int shamt4) {
1734   CHECK(HasMsa());
1735   CHECK(IsUint<4>(shamt4)) << shamt4;
1736   EmitMsaBIT(0x2, shamt4 | kMsaDfMHalfwordMask, ws, wd, 0x9);
1737 }
1738 
SrliW(VectorRegister wd,VectorRegister ws,int shamt5)1739 void Mips64Assembler::SrliW(VectorRegister wd, VectorRegister ws, int shamt5) {
1740   CHECK(HasMsa());
1741   CHECK(IsUint<5>(shamt5)) << shamt5;
1742   EmitMsaBIT(0x2, shamt5 | kMsaDfMWordMask, ws, wd, 0x9);
1743 }
1744 
SrliD(VectorRegister wd,VectorRegister ws,int shamt6)1745 void Mips64Assembler::SrliD(VectorRegister wd, VectorRegister ws, int shamt6) {
1746   CHECK(HasMsa());
1747   CHECK(IsUint<6>(shamt6)) << shamt6;
1748   EmitMsaBIT(0x2, shamt6 | kMsaDfMDoublewordMask, ws, wd, 0x9);
1749 }
1750 
MoveV(VectorRegister wd,VectorRegister ws)1751 void Mips64Assembler::MoveV(VectorRegister wd, VectorRegister ws) {
1752   CHECK(HasMsa());
1753   EmitMsaBIT(0x1, 0x3e, ws, wd, 0x19);
1754 }
1755 
SplatiB(VectorRegister wd,VectorRegister ws,int n4)1756 void Mips64Assembler::SplatiB(VectorRegister wd, VectorRegister ws, int n4) {
1757   CHECK(HasMsa());
1758   CHECK(IsUint<4>(n4)) << n4;
1759   EmitMsaELM(0x1, n4 | kMsaDfNByteMask, ws, wd, 0x19);
1760 }
1761 
SplatiH(VectorRegister wd,VectorRegister ws,int n3)1762 void Mips64Assembler::SplatiH(VectorRegister wd, VectorRegister ws, int n3) {
1763   CHECK(HasMsa());
1764   CHECK(IsUint<3>(n3)) << n3;
1765   EmitMsaELM(0x1, n3 | kMsaDfNHalfwordMask, ws, wd, 0x19);
1766 }
1767 
SplatiW(VectorRegister wd,VectorRegister ws,int n2)1768 void Mips64Assembler::SplatiW(VectorRegister wd, VectorRegister ws, int n2) {
1769   CHECK(HasMsa());
1770   CHECK(IsUint<2>(n2)) << n2;
1771   EmitMsaELM(0x1, n2 | kMsaDfNWordMask, ws, wd, 0x19);
1772 }
1773 
SplatiD(VectorRegister wd,VectorRegister ws,int n1)1774 void Mips64Assembler::SplatiD(VectorRegister wd, VectorRegister ws, int n1) {
1775   CHECK(HasMsa());
1776   CHECK(IsUint<1>(n1)) << n1;
1777   EmitMsaELM(0x1, n1 | kMsaDfNDoublewordMask, ws, wd, 0x19);
1778 }
1779 
FillB(VectorRegister wd,GpuRegister rs)1780 void Mips64Assembler::FillB(VectorRegister wd, GpuRegister rs) {
1781   CHECK(HasMsa());
1782   EmitMsa2R(0xc0, 0x0, static_cast<VectorRegister>(rs), wd, 0x1e);
1783 }
1784 
FillH(VectorRegister wd,GpuRegister rs)1785 void Mips64Assembler::FillH(VectorRegister wd, GpuRegister rs) {
1786   CHECK(HasMsa());
1787   EmitMsa2R(0xc0, 0x1, static_cast<VectorRegister>(rs), wd, 0x1e);
1788 }
1789 
FillW(VectorRegister wd,GpuRegister rs)1790 void Mips64Assembler::FillW(VectorRegister wd, GpuRegister rs) {
1791   CHECK(HasMsa());
1792   EmitMsa2R(0xc0, 0x2, static_cast<VectorRegister>(rs), wd, 0x1e);
1793 }
1794 
FillD(VectorRegister wd,GpuRegister rs)1795 void Mips64Assembler::FillD(VectorRegister wd, GpuRegister rs) {
1796   CHECK(HasMsa());
1797   EmitMsa2R(0xc0, 0x3, static_cast<VectorRegister>(rs), wd, 0x1e);
1798 }
1799 
LdiB(VectorRegister wd,int imm8)1800 void Mips64Assembler::LdiB(VectorRegister wd, int imm8) {
1801   CHECK(HasMsa());
1802   CHECK(IsInt<8>(imm8)) << imm8;
1803   EmitMsaI10(0x6, 0x0, imm8 & kMsaS10Mask, wd, 0x7);
1804 }
1805 
LdiH(VectorRegister wd,int imm10)1806 void Mips64Assembler::LdiH(VectorRegister wd, int imm10) {
1807   CHECK(HasMsa());
1808   CHECK(IsInt<10>(imm10)) << imm10;
1809   EmitMsaI10(0x6, 0x1, imm10 & kMsaS10Mask, wd, 0x7);
1810 }
1811 
LdiW(VectorRegister wd,int imm10)1812 void Mips64Assembler::LdiW(VectorRegister wd, int imm10) {
1813   CHECK(HasMsa());
1814   CHECK(IsInt<10>(imm10)) << imm10;
1815   EmitMsaI10(0x6, 0x2, imm10 & kMsaS10Mask, wd, 0x7);
1816 }
1817 
LdiD(VectorRegister wd,int imm10)1818 void Mips64Assembler::LdiD(VectorRegister wd, int imm10) {
1819   CHECK(HasMsa());
1820   CHECK(IsInt<10>(imm10)) << imm10;
1821   EmitMsaI10(0x6, 0x3, imm10 & kMsaS10Mask, wd, 0x7);
1822 }
1823 
LdB(VectorRegister wd,GpuRegister rs,int offset)1824 void Mips64Assembler::LdB(VectorRegister wd, GpuRegister rs, int offset) {
1825   CHECK(HasMsa());
1826   CHECK(IsInt<10>(offset)) << offset;
1827   EmitMsaMI10(offset & kMsaS10Mask, rs, wd, 0x8, 0x0);
1828 }
1829 
LdH(VectorRegister wd,GpuRegister rs,int offset)1830 void Mips64Assembler::LdH(VectorRegister wd, GpuRegister rs, int offset) {
1831   CHECK(HasMsa());
1832   CHECK(IsInt<11>(offset)) << offset;
1833   CHECK_ALIGNED(offset, kMips64HalfwordSize);
1834   EmitMsaMI10((offset >> TIMES_2) & kMsaS10Mask, rs, wd, 0x8, 0x1);
1835 }
1836 
LdW(VectorRegister wd,GpuRegister rs,int offset)1837 void Mips64Assembler::LdW(VectorRegister wd, GpuRegister rs, int offset) {
1838   CHECK(HasMsa());
1839   CHECK(IsInt<12>(offset)) << offset;
1840   CHECK_ALIGNED(offset, kMips64WordSize);
1841   EmitMsaMI10((offset >> TIMES_4) & kMsaS10Mask, rs, wd, 0x8, 0x2);
1842 }
1843 
LdD(VectorRegister wd,GpuRegister rs,int offset)1844 void Mips64Assembler::LdD(VectorRegister wd, GpuRegister rs, int offset) {
1845   CHECK(HasMsa());
1846   CHECK(IsInt<13>(offset)) << offset;
1847   CHECK_ALIGNED(offset, kMips64DoublewordSize);
1848   EmitMsaMI10((offset >> TIMES_8) & kMsaS10Mask, rs, wd, 0x8, 0x3);
1849 }
1850 
StB(VectorRegister wd,GpuRegister rs,int offset)1851 void Mips64Assembler::StB(VectorRegister wd, GpuRegister rs, int offset) {
1852   CHECK(HasMsa());
1853   CHECK(IsInt<10>(offset)) << offset;
1854   EmitMsaMI10(offset & kMsaS10Mask, rs, wd, 0x9, 0x0);
1855 }
1856 
StH(VectorRegister wd,GpuRegister rs,int offset)1857 void Mips64Assembler::StH(VectorRegister wd, GpuRegister rs, int offset) {
1858   CHECK(HasMsa());
1859   CHECK(IsInt<11>(offset)) << offset;
1860   CHECK_ALIGNED(offset, kMips64HalfwordSize);
1861   EmitMsaMI10((offset >> TIMES_2) & kMsaS10Mask, rs, wd, 0x9, 0x1);
1862 }
1863 
StW(VectorRegister wd,GpuRegister rs,int offset)1864 void Mips64Assembler::StW(VectorRegister wd, GpuRegister rs, int offset) {
1865   CHECK(HasMsa());
1866   CHECK(IsInt<12>(offset)) << offset;
1867   CHECK_ALIGNED(offset, kMips64WordSize);
1868   EmitMsaMI10((offset >> TIMES_4) & kMsaS10Mask, rs, wd, 0x9, 0x2);
1869 }
1870 
StD(VectorRegister wd,GpuRegister rs,int offset)1871 void Mips64Assembler::StD(VectorRegister wd, GpuRegister rs, int offset) {
1872   CHECK(HasMsa());
1873   CHECK(IsInt<13>(offset)) << offset;
1874   CHECK_ALIGNED(offset, kMips64DoublewordSize);
1875   EmitMsaMI10((offset >> TIMES_8) & kMsaS10Mask, rs, wd, 0x9, 0x3);
1876 }
1877 
IlvrB(VectorRegister wd,VectorRegister ws,VectorRegister wt)1878 void Mips64Assembler::IlvrB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1879   CHECK(HasMsa());
1880   EmitMsa3R(0x5, 0x0, wt, ws, wd, 0x14);
1881 }
1882 
IlvrH(VectorRegister wd,VectorRegister ws,VectorRegister wt)1883 void Mips64Assembler::IlvrH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1884   CHECK(HasMsa());
1885   EmitMsa3R(0x5, 0x1, wt, ws, wd, 0x14);
1886 }
1887 
IlvrW(VectorRegister wd,VectorRegister ws,VectorRegister wt)1888 void Mips64Assembler::IlvrW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1889   CHECK(HasMsa());
1890   EmitMsa3R(0x5, 0x2, wt, ws, wd, 0x14);
1891 }
1892 
IlvrD(VectorRegister wd,VectorRegister ws,VectorRegister wt)1893 void Mips64Assembler::IlvrD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
1894   CHECK(HasMsa());
1895   EmitMsa3R(0x5, 0x3, wt, ws, wd, 0x14);
1896 }
1897 
ReplicateFPToVectorRegister(VectorRegister dst,FpuRegister src,bool is_double)1898 void Mips64Assembler::ReplicateFPToVectorRegister(VectorRegister dst,
1899                                                   FpuRegister src,
1900                                                   bool is_double) {
1901   // Float or double in FPU register Fx can be considered as 0th element in vector register Wx.
1902   if (is_double) {
1903     SplatiD(dst, static_cast<VectorRegister>(src), 0);
1904   } else {
1905     SplatiW(dst, static_cast<VectorRegister>(src), 0);
1906   }
1907 }
1908 
LoadConst32(GpuRegister rd,int32_t value)1909 void Mips64Assembler::LoadConst32(GpuRegister rd, int32_t value) {
1910   TemplateLoadConst32(this, rd, value);
1911 }
1912 
1913 // This function is only used for testing purposes.
RecordLoadConst64Path(int value ATTRIBUTE_UNUSED)1914 void Mips64Assembler::RecordLoadConst64Path(int value ATTRIBUTE_UNUSED) {
1915 }
1916 
LoadConst64(GpuRegister rd,int64_t value)1917 void Mips64Assembler::LoadConst64(GpuRegister rd, int64_t value) {
1918   TemplateLoadConst64(this, rd, value);
1919 }
1920 
Addiu32(GpuRegister rt,GpuRegister rs,int32_t value)1921 void Mips64Assembler::Addiu32(GpuRegister rt, GpuRegister rs, int32_t value) {
1922   if (IsInt<16>(value)) {
1923     Addiu(rt, rs, value);
1924   } else {
1925     int16_t high = High16Bits(value);
1926     int16_t low = Low16Bits(value);
1927     high += (low < 0) ? 1 : 0;  // Account for sign extension in addiu.
1928     Aui(rt, rs, high);
1929     if (low != 0) {
1930       Addiu(rt, rt, low);
1931     }
1932   }
1933 }
1934 
1935 // TODO: don't use rtmp, use daui, dahi, dati.
Daddiu64(GpuRegister rt,GpuRegister rs,int64_t value,GpuRegister rtmp)1936 void Mips64Assembler::Daddiu64(GpuRegister rt, GpuRegister rs, int64_t value, GpuRegister rtmp) {
1937   CHECK_NE(rs, rtmp);
1938   if (IsInt<16>(value)) {
1939     Daddiu(rt, rs, value);
1940   } else {
1941     LoadConst64(rtmp, value);
1942     Daddu(rt, rs, rtmp);
1943   }
1944 }
1945 
InitShortOrLong(Mips64Assembler::Branch::OffsetBits offset_size,Mips64Assembler::Branch::Type short_type,Mips64Assembler::Branch::Type long_type)1946 void Mips64Assembler::Branch::InitShortOrLong(Mips64Assembler::Branch::OffsetBits offset_size,
1947                                               Mips64Assembler::Branch::Type short_type,
1948                                               Mips64Assembler::Branch::Type long_type) {
1949   type_ = (offset_size <= branch_info_[short_type].offset_size) ? short_type : long_type;
1950 }
1951 
InitializeType(Type initial_type)1952 void Mips64Assembler::Branch::InitializeType(Type initial_type) {
1953   OffsetBits offset_size = GetOffsetSizeNeeded(location_, target_);
1954   switch (initial_type) {
1955     case kLabel:
1956     case kLiteral:
1957     case kLiteralUnsigned:
1958     case kLiteralLong:
1959       CHECK(!IsResolved());
1960       type_ = initial_type;
1961       break;
1962     case kCall:
1963       InitShortOrLong(offset_size, kCall, kLongCall);
1964       break;
1965     case kCondBranch:
1966       switch (condition_) {
1967         case kUncond:
1968           InitShortOrLong(offset_size, kUncondBranch, kLongUncondBranch);
1969           break;
1970         case kCondEQZ:
1971         case kCondNEZ:
1972           // Special case for beqzc/bnezc with longer offset than in other b<cond>c instructions.
1973           type_ = (offset_size <= kOffset23) ? kCondBranch : kLongCondBranch;
1974           break;
1975         default:
1976           InitShortOrLong(offset_size, kCondBranch, kLongCondBranch);
1977           break;
1978       }
1979       break;
1980     default:
1981       LOG(FATAL) << "Unexpected branch type " << initial_type;
1982       UNREACHABLE();
1983   }
1984   old_type_ = type_;
1985 }
1986 
IsNop(BranchCondition condition,GpuRegister lhs,GpuRegister rhs)1987 bool Mips64Assembler::Branch::IsNop(BranchCondition condition, GpuRegister lhs, GpuRegister rhs) {
1988   switch (condition) {
1989     case kCondLT:
1990     case kCondGT:
1991     case kCondNE:
1992     case kCondLTU:
1993       return lhs == rhs;
1994     default:
1995       return false;
1996   }
1997 }
1998 
IsUncond(BranchCondition condition,GpuRegister lhs,GpuRegister rhs)1999 bool Mips64Assembler::Branch::IsUncond(BranchCondition condition,
2000                                        GpuRegister lhs,
2001                                        GpuRegister rhs) {
2002   switch (condition) {
2003     case kUncond:
2004       return true;
2005     case kCondGE:
2006     case kCondLE:
2007     case kCondEQ:
2008     case kCondGEU:
2009       return lhs == rhs;
2010     default:
2011       return false;
2012   }
2013 }
2014 
Branch(uint32_t location,uint32_t target,bool is_call)2015 Mips64Assembler::Branch::Branch(uint32_t location, uint32_t target, bool is_call)
2016     : old_location_(location),
2017       location_(location),
2018       target_(target),
2019       lhs_reg_(ZERO),
2020       rhs_reg_(ZERO),
2021       condition_(kUncond) {
2022   InitializeType(is_call ? kCall : kCondBranch);
2023 }
2024 
Branch(uint32_t location,uint32_t target,Mips64Assembler::BranchCondition condition,GpuRegister lhs_reg,GpuRegister rhs_reg)2025 Mips64Assembler::Branch::Branch(uint32_t location,
2026                                 uint32_t target,
2027                                 Mips64Assembler::BranchCondition condition,
2028                                 GpuRegister lhs_reg,
2029                                 GpuRegister rhs_reg)
2030     : old_location_(location),
2031       location_(location),
2032       target_(target),
2033       lhs_reg_(lhs_reg),
2034       rhs_reg_(rhs_reg),
2035       condition_(condition) {
2036   CHECK_NE(condition, kUncond);
2037   switch (condition) {
2038     case kCondEQ:
2039     case kCondNE:
2040     case kCondLT:
2041     case kCondGE:
2042     case kCondLE:
2043     case kCondGT:
2044     case kCondLTU:
2045     case kCondGEU:
2046       CHECK_NE(lhs_reg, ZERO);
2047       CHECK_NE(rhs_reg, ZERO);
2048       break;
2049     case kCondLTZ:
2050     case kCondGEZ:
2051     case kCondLEZ:
2052     case kCondGTZ:
2053     case kCondEQZ:
2054     case kCondNEZ:
2055       CHECK_NE(lhs_reg, ZERO);
2056       CHECK_EQ(rhs_reg, ZERO);
2057       break;
2058     case kCondF:
2059     case kCondT:
2060       CHECK_EQ(rhs_reg, ZERO);
2061       break;
2062     case kUncond:
2063       UNREACHABLE();
2064   }
2065   CHECK(!IsNop(condition, lhs_reg, rhs_reg));
2066   if (IsUncond(condition, lhs_reg, rhs_reg)) {
2067     // Branch condition is always true, make the branch unconditional.
2068     condition_ = kUncond;
2069   }
2070   InitializeType(kCondBranch);
2071 }
2072 
Branch(uint32_t location,GpuRegister dest_reg,Type label_or_literal_type)2073 Mips64Assembler::Branch::Branch(uint32_t location, GpuRegister dest_reg, Type label_or_literal_type)
2074     : old_location_(location),
2075       location_(location),
2076       target_(kUnresolved),
2077       lhs_reg_(dest_reg),
2078       rhs_reg_(ZERO),
2079       condition_(kUncond) {
2080   CHECK_NE(dest_reg, ZERO);
2081   InitializeType(label_or_literal_type);
2082 }
2083 
OppositeCondition(Mips64Assembler::BranchCondition cond)2084 Mips64Assembler::BranchCondition Mips64Assembler::Branch::OppositeCondition(
2085     Mips64Assembler::BranchCondition cond) {
2086   switch (cond) {
2087     case kCondLT:
2088       return kCondGE;
2089     case kCondGE:
2090       return kCondLT;
2091     case kCondLE:
2092       return kCondGT;
2093     case kCondGT:
2094       return kCondLE;
2095     case kCondLTZ:
2096       return kCondGEZ;
2097     case kCondGEZ:
2098       return kCondLTZ;
2099     case kCondLEZ:
2100       return kCondGTZ;
2101     case kCondGTZ:
2102       return kCondLEZ;
2103     case kCondEQ:
2104       return kCondNE;
2105     case kCondNE:
2106       return kCondEQ;
2107     case kCondEQZ:
2108       return kCondNEZ;
2109     case kCondNEZ:
2110       return kCondEQZ;
2111     case kCondLTU:
2112       return kCondGEU;
2113     case kCondGEU:
2114       return kCondLTU;
2115     case kCondF:
2116       return kCondT;
2117     case kCondT:
2118       return kCondF;
2119     case kUncond:
2120       LOG(FATAL) << "Unexpected branch condition " << cond;
2121   }
2122   UNREACHABLE();
2123 }
2124 
GetType() const2125 Mips64Assembler::Branch::Type Mips64Assembler::Branch::GetType() const {
2126   return type_;
2127 }
2128 
GetCondition() const2129 Mips64Assembler::BranchCondition Mips64Assembler::Branch::GetCondition() const {
2130   return condition_;
2131 }
2132 
GetLeftRegister() const2133 GpuRegister Mips64Assembler::Branch::GetLeftRegister() const {
2134   return lhs_reg_;
2135 }
2136 
GetRightRegister() const2137 GpuRegister Mips64Assembler::Branch::GetRightRegister() const {
2138   return rhs_reg_;
2139 }
2140 
GetTarget() const2141 uint32_t Mips64Assembler::Branch::GetTarget() const {
2142   return target_;
2143 }
2144 
GetLocation() const2145 uint32_t Mips64Assembler::Branch::GetLocation() const {
2146   return location_;
2147 }
2148 
GetOldLocation() const2149 uint32_t Mips64Assembler::Branch::GetOldLocation() const {
2150   return old_location_;
2151 }
2152 
GetLength() const2153 uint32_t Mips64Assembler::Branch::GetLength() const {
2154   return branch_info_[type_].length;
2155 }
2156 
GetOldLength() const2157 uint32_t Mips64Assembler::Branch::GetOldLength() const {
2158   return branch_info_[old_type_].length;
2159 }
2160 
GetSize() const2161 uint32_t Mips64Assembler::Branch::GetSize() const {
2162   return GetLength() * sizeof(uint32_t);
2163 }
2164 
GetOldSize() const2165 uint32_t Mips64Assembler::Branch::GetOldSize() const {
2166   return GetOldLength() * sizeof(uint32_t);
2167 }
2168 
GetEndLocation() const2169 uint32_t Mips64Assembler::Branch::GetEndLocation() const {
2170   return GetLocation() + GetSize();
2171 }
2172 
GetOldEndLocation() const2173 uint32_t Mips64Assembler::Branch::GetOldEndLocation() const {
2174   return GetOldLocation() + GetOldSize();
2175 }
2176 
IsLong() const2177 bool Mips64Assembler::Branch::IsLong() const {
2178   switch (type_) {
2179     // Short branches.
2180     case kUncondBranch:
2181     case kCondBranch:
2182     case kCall:
2183     // Near label.
2184     case kLabel:
2185     // Near literals.
2186     case kLiteral:
2187     case kLiteralUnsigned:
2188     case kLiteralLong:
2189       return false;
2190     // Long branches.
2191     case kLongUncondBranch:
2192     case kLongCondBranch:
2193     case kLongCall:
2194     // Far label.
2195     case kFarLabel:
2196     // Far literals.
2197     case kFarLiteral:
2198     case kFarLiteralUnsigned:
2199     case kFarLiteralLong:
2200       return true;
2201   }
2202   UNREACHABLE();
2203 }
2204 
IsResolved() const2205 bool Mips64Assembler::Branch::IsResolved() const {
2206   return target_ != kUnresolved;
2207 }
2208 
GetOffsetSize() const2209 Mips64Assembler::Branch::OffsetBits Mips64Assembler::Branch::GetOffsetSize() const {
2210   OffsetBits offset_size =
2211       (type_ == kCondBranch && (condition_ == kCondEQZ || condition_ == kCondNEZ))
2212           ? kOffset23
2213           : branch_info_[type_].offset_size;
2214   return offset_size;
2215 }
2216 
GetOffsetSizeNeeded(uint32_t location,uint32_t target)2217 Mips64Assembler::Branch::OffsetBits Mips64Assembler::Branch::GetOffsetSizeNeeded(uint32_t location,
2218                                                                                  uint32_t target) {
2219   // For unresolved targets assume the shortest encoding
2220   // (later it will be made longer if needed).
2221   if (target == kUnresolved)
2222     return kOffset16;
2223   int64_t distance = static_cast<int64_t>(target) - location;
2224   // To simplify calculations in composite branches consisting of multiple instructions
2225   // bump up the distance by a value larger than the max byte size of a composite branch.
2226   distance += (distance >= 0) ? kMaxBranchSize : -kMaxBranchSize;
2227   if (IsInt<kOffset16>(distance))
2228     return kOffset16;
2229   else if (IsInt<kOffset18>(distance))
2230     return kOffset18;
2231   else if (IsInt<kOffset21>(distance))
2232     return kOffset21;
2233   else if (IsInt<kOffset23>(distance))
2234     return kOffset23;
2235   else if (IsInt<kOffset28>(distance))
2236     return kOffset28;
2237   return kOffset32;
2238 }
2239 
Resolve(uint32_t target)2240 void Mips64Assembler::Branch::Resolve(uint32_t target) {
2241   target_ = target;
2242 }
2243 
Relocate(uint32_t expand_location,uint32_t delta)2244 void Mips64Assembler::Branch::Relocate(uint32_t expand_location, uint32_t delta) {
2245   if (location_ > expand_location) {
2246     location_ += delta;
2247   }
2248   if (!IsResolved()) {
2249     return;  // Don't know the target yet.
2250   }
2251   if (target_ > expand_location) {
2252     target_ += delta;
2253   }
2254 }
2255 
PromoteToLong()2256 void Mips64Assembler::Branch::PromoteToLong() {
2257   switch (type_) {
2258     // Short branches.
2259     case kUncondBranch:
2260       type_ = kLongUncondBranch;
2261       break;
2262     case kCondBranch:
2263       type_ = kLongCondBranch;
2264       break;
2265     case kCall:
2266       type_ = kLongCall;
2267       break;
2268     // Near label.
2269     case kLabel:
2270       type_ = kFarLabel;
2271       break;
2272     // Near literals.
2273     case kLiteral:
2274       type_ = kFarLiteral;
2275       break;
2276     case kLiteralUnsigned:
2277       type_ = kFarLiteralUnsigned;
2278       break;
2279     case kLiteralLong:
2280       type_ = kFarLiteralLong;
2281       break;
2282     default:
2283       // Note: 'type_' is already long.
2284       break;
2285   }
2286   CHECK(IsLong());
2287 }
2288 
PromoteIfNeeded(uint32_t max_short_distance)2289 uint32_t Mips64Assembler::Branch::PromoteIfNeeded(uint32_t max_short_distance) {
2290   // If the branch is still unresolved or already long, nothing to do.
2291   if (IsLong() || !IsResolved()) {
2292     return 0;
2293   }
2294   // Promote the short branch to long if the offset size is too small
2295   // to hold the distance between location_ and target_.
2296   if (GetOffsetSizeNeeded(location_, target_) > GetOffsetSize()) {
2297     PromoteToLong();
2298     uint32_t old_size = GetOldSize();
2299     uint32_t new_size = GetSize();
2300     CHECK_GT(new_size, old_size);
2301     return new_size - old_size;
2302   }
2303   // The following logic is for debugging/testing purposes.
2304   // Promote some short branches to long when it's not really required.
2305   if (UNLIKELY(max_short_distance != std::numeric_limits<uint32_t>::max())) {
2306     int64_t distance = static_cast<int64_t>(target_) - location_;
2307     distance = (distance >= 0) ? distance : -distance;
2308     if (distance >= max_short_distance) {
2309       PromoteToLong();
2310       uint32_t old_size = GetOldSize();
2311       uint32_t new_size = GetSize();
2312       CHECK_GT(new_size, old_size);
2313       return new_size - old_size;
2314     }
2315   }
2316   return 0;
2317 }
2318 
GetOffsetLocation() const2319 uint32_t Mips64Assembler::Branch::GetOffsetLocation() const {
2320   return location_ + branch_info_[type_].instr_offset * sizeof(uint32_t);
2321 }
2322 
GetOffset() const2323 uint32_t Mips64Assembler::Branch::GetOffset() const {
2324   CHECK(IsResolved());
2325   uint32_t ofs_mask = 0xFFFFFFFF >> (32 - GetOffsetSize());
2326   // Calculate the byte distance between instructions and also account for
2327   // different PC-relative origins.
2328   uint32_t offset_location = GetOffsetLocation();
2329   if (type_ == kLiteralLong) {
2330     // Special case for the ldpc instruction, whose address (PC) is rounded down to
2331     // a multiple of 8 before adding the offset.
2332     // Note, branch promotion has already taken care of aligning `target_` to an
2333     // address that's a multiple of 8.
2334     offset_location = RoundDown(offset_location, sizeof(uint64_t));
2335   }
2336   uint32_t offset = target_ - offset_location - branch_info_[type_].pc_org * sizeof(uint32_t);
2337   // Prepare the offset for encoding into the instruction(s).
2338   offset = (offset & ofs_mask) >> branch_info_[type_].offset_shift;
2339   return offset;
2340 }
2341 
GetBranch(uint32_t branch_id)2342 Mips64Assembler::Branch* Mips64Assembler::GetBranch(uint32_t branch_id) {
2343   CHECK_LT(branch_id, branches_.size());
2344   return &branches_[branch_id];
2345 }
2346 
GetBranch(uint32_t branch_id) const2347 const Mips64Assembler::Branch* Mips64Assembler::GetBranch(uint32_t branch_id) const {
2348   CHECK_LT(branch_id, branches_.size());
2349   return &branches_[branch_id];
2350 }
2351 
Bind(Mips64Label * label)2352 void Mips64Assembler::Bind(Mips64Label* label) {
2353   CHECK(!label->IsBound());
2354   uint32_t bound_pc = buffer_.Size();
2355 
2356   // Walk the list of branches referring to and preceding this label.
2357   // Store the previously unknown target addresses in them.
2358   while (label->IsLinked()) {
2359     uint32_t branch_id = label->Position();
2360     Branch* branch = GetBranch(branch_id);
2361     branch->Resolve(bound_pc);
2362 
2363     uint32_t branch_location = branch->GetLocation();
2364     // Extract the location of the previous branch in the list (walking the list backwards;
2365     // the previous branch ID was stored in the space reserved for this branch).
2366     uint32_t prev = buffer_.Load<uint32_t>(branch_location);
2367 
2368     // On to the previous branch in the list...
2369     label->position_ = prev;
2370   }
2371 
2372   // Now make the label object contain its own location (relative to the end of the preceding
2373   // branch, if any; it will be used by the branches referring to and following this label).
2374   label->prev_branch_id_plus_one_ = branches_.size();
2375   if (label->prev_branch_id_plus_one_) {
2376     uint32_t branch_id = label->prev_branch_id_plus_one_ - 1;
2377     const Branch* branch = GetBranch(branch_id);
2378     bound_pc -= branch->GetEndLocation();
2379   }
2380   label->BindTo(bound_pc);
2381 }
2382 
GetLabelLocation(const Mips64Label * label) const2383 uint32_t Mips64Assembler::GetLabelLocation(const Mips64Label* label) const {
2384   CHECK(label->IsBound());
2385   uint32_t target = label->Position();
2386   if (label->prev_branch_id_plus_one_) {
2387     // Get label location based on the branch preceding it.
2388     uint32_t branch_id = label->prev_branch_id_plus_one_ - 1;
2389     const Branch* branch = GetBranch(branch_id);
2390     target += branch->GetEndLocation();
2391   }
2392   return target;
2393 }
2394 
GetAdjustedPosition(uint32_t old_position)2395 uint32_t Mips64Assembler::GetAdjustedPosition(uint32_t old_position) {
2396   // We can reconstruct the adjustment by going through all the branches from the beginning
2397   // up to the old_position. Since we expect AdjustedPosition() to be called in a loop
2398   // with increasing old_position, we can use the data from last AdjustedPosition() to
2399   // continue where we left off and the whole loop should be O(m+n) where m is the number
2400   // of positions to adjust and n is the number of branches.
2401   if (old_position < last_old_position_) {
2402     last_position_adjustment_ = 0;
2403     last_old_position_ = 0;
2404     last_branch_id_ = 0;
2405   }
2406   while (last_branch_id_ != branches_.size()) {
2407     const Branch* branch = GetBranch(last_branch_id_);
2408     if (branch->GetLocation() >= old_position + last_position_adjustment_) {
2409       break;
2410     }
2411     last_position_adjustment_ += branch->GetSize() - branch->GetOldSize();
2412     ++last_branch_id_;
2413   }
2414   last_old_position_ = old_position;
2415   return old_position + last_position_adjustment_;
2416 }
2417 
FinalizeLabeledBranch(Mips64Label * label)2418 void Mips64Assembler::FinalizeLabeledBranch(Mips64Label* label) {
2419   uint32_t length = branches_.back().GetLength();
2420   if (!label->IsBound()) {
2421     // Branch forward (to a following label), distance is unknown.
2422     // The first branch forward will contain 0, serving as the terminator of
2423     // the list of forward-reaching branches.
2424     Emit(label->position_);
2425     length--;
2426     // Now make the label object point to this branch
2427     // (this forms a linked list of branches preceding this label).
2428     uint32_t branch_id = branches_.size() - 1;
2429     label->LinkTo(branch_id);
2430   }
2431   // Reserve space for the branch.
2432   while (length--) {
2433     Nop();
2434   }
2435 }
2436 
Buncond(Mips64Label * label)2437 void Mips64Assembler::Buncond(Mips64Label* label) {
2438   uint32_t target = label->IsBound() ? GetLabelLocation(label) : Branch::kUnresolved;
2439   branches_.emplace_back(buffer_.Size(), target, /* is_call */ false);
2440   FinalizeLabeledBranch(label);
2441 }
2442 
Bcond(Mips64Label * label,BranchCondition condition,GpuRegister lhs,GpuRegister rhs)2443 void Mips64Assembler::Bcond(Mips64Label* label,
2444                             BranchCondition condition,
2445                             GpuRegister lhs,
2446                             GpuRegister rhs) {
2447   // If lhs = rhs, this can be a NOP.
2448   if (Branch::IsNop(condition, lhs, rhs)) {
2449     return;
2450   }
2451   uint32_t target = label->IsBound() ? GetLabelLocation(label) : Branch::kUnresolved;
2452   branches_.emplace_back(buffer_.Size(), target, condition, lhs, rhs);
2453   FinalizeLabeledBranch(label);
2454 }
2455 
Call(Mips64Label * label)2456 void Mips64Assembler::Call(Mips64Label* label) {
2457   uint32_t target = label->IsBound() ? GetLabelLocation(label) : Branch::kUnresolved;
2458   branches_.emplace_back(buffer_.Size(), target, /* is_call */ true);
2459   FinalizeLabeledBranch(label);
2460 }
2461 
LoadLabelAddress(GpuRegister dest_reg,Mips64Label * label)2462 void Mips64Assembler::LoadLabelAddress(GpuRegister dest_reg, Mips64Label* label) {
2463   // Label address loads are treated as pseudo branches since they require very similar handling.
2464   DCHECK(!label->IsBound());
2465   branches_.emplace_back(buffer_.Size(), dest_reg, Branch::kLabel);
2466   FinalizeLabeledBranch(label);
2467 }
2468 
NewLiteral(size_t size,const uint8_t * data)2469 Literal* Mips64Assembler::NewLiteral(size_t size, const uint8_t* data) {
2470   // We don't support byte and half-word literals.
2471   if (size == 4u) {
2472     literals_.emplace_back(size, data);
2473     return &literals_.back();
2474   } else {
2475     DCHECK_EQ(size, 8u);
2476     long_literals_.emplace_back(size, data);
2477     return &long_literals_.back();
2478   }
2479 }
2480 
LoadLiteral(GpuRegister dest_reg,LoadOperandType load_type,Literal * literal)2481 void Mips64Assembler::LoadLiteral(GpuRegister dest_reg,
2482                                   LoadOperandType load_type,
2483                                   Literal* literal) {
2484   // Literal loads are treated as pseudo branches since they require very similar handling.
2485   Branch::Type literal_type;
2486   switch (load_type) {
2487     case kLoadWord:
2488       DCHECK_EQ(literal->GetSize(), 4u);
2489       literal_type = Branch::kLiteral;
2490       break;
2491     case kLoadUnsignedWord:
2492       DCHECK_EQ(literal->GetSize(), 4u);
2493       literal_type = Branch::kLiteralUnsigned;
2494       break;
2495     case kLoadDoubleword:
2496       DCHECK_EQ(literal->GetSize(), 8u);
2497       literal_type = Branch::kLiteralLong;
2498       break;
2499     default:
2500       LOG(FATAL) << "Unexpected literal load type " << load_type;
2501       UNREACHABLE();
2502   }
2503   Mips64Label* label = literal->GetLabel();
2504   DCHECK(!label->IsBound());
2505   branches_.emplace_back(buffer_.Size(), dest_reg, literal_type);
2506   FinalizeLabeledBranch(label);
2507 }
2508 
CreateJumpTable(std::vector<Mips64Label * > && labels)2509 JumpTable* Mips64Assembler::CreateJumpTable(std::vector<Mips64Label*>&& labels) {
2510   jump_tables_.emplace_back(std::move(labels));
2511   JumpTable* table = &jump_tables_.back();
2512   DCHECK(!table->GetLabel()->IsBound());
2513   return table;
2514 }
2515 
ReserveJumpTableSpace()2516 void Mips64Assembler::ReserveJumpTableSpace() {
2517   if (!jump_tables_.empty()) {
2518     for (JumpTable& table : jump_tables_) {
2519       Mips64Label* label = table.GetLabel();
2520       Bind(label);
2521 
2522       // Bulk ensure capacity, as this may be large.
2523       size_t orig_size = buffer_.Size();
2524       size_t required_capacity = orig_size + table.GetSize();
2525       if (required_capacity > buffer_.Capacity()) {
2526         buffer_.ExtendCapacity(required_capacity);
2527       }
2528 #ifndef NDEBUG
2529       buffer_.has_ensured_capacity_ = true;
2530 #endif
2531 
2532       // Fill the space with dummy data as the data is not final
2533       // until the branches have been promoted. And we shouldn't
2534       // be moving uninitialized data during branch promotion.
2535       for (size_t cnt = table.GetData().size(), i = 0; i < cnt; i++) {
2536         buffer_.Emit<uint32_t>(0x1abe1234u);
2537       }
2538 
2539 #ifndef NDEBUG
2540       buffer_.has_ensured_capacity_ = false;
2541 #endif
2542     }
2543   }
2544 }
2545 
EmitJumpTables()2546 void Mips64Assembler::EmitJumpTables() {
2547   if (!jump_tables_.empty()) {
2548     CHECK(!overwriting_);
2549     // Switch from appending instructions at the end of the buffer to overwriting
2550     // existing instructions (here, jump tables) in the buffer.
2551     overwriting_ = true;
2552 
2553     for (JumpTable& table : jump_tables_) {
2554       Mips64Label* table_label = table.GetLabel();
2555       uint32_t start = GetLabelLocation(table_label);
2556       overwrite_location_ = start;
2557 
2558       for (Mips64Label* target : table.GetData()) {
2559         CHECK_EQ(buffer_.Load<uint32_t>(overwrite_location_), 0x1abe1234u);
2560         // The table will contain target addresses relative to the table start.
2561         uint32_t offset = GetLabelLocation(target) - start;
2562         Emit(offset);
2563       }
2564     }
2565 
2566     overwriting_ = false;
2567   }
2568 }
2569 
EmitLiterals()2570 void Mips64Assembler::EmitLiterals() {
2571   if (!literals_.empty()) {
2572     for (Literal& literal : literals_) {
2573       Mips64Label* label = literal.GetLabel();
2574       Bind(label);
2575       AssemblerBuffer::EnsureCapacity ensured(&buffer_);
2576       DCHECK_EQ(literal.GetSize(), 4u);
2577       for (size_t i = 0, size = literal.GetSize(); i != size; ++i) {
2578         buffer_.Emit<uint8_t>(literal.GetData()[i]);
2579       }
2580     }
2581   }
2582   if (!long_literals_.empty()) {
2583     // Reserve 4 bytes for potential alignment. If after the branch promotion the 64-bit
2584     // literals don't end up 8-byte-aligned, they will be moved down 4 bytes.
2585     Emit(0);  // NOP.
2586     for (Literal& literal : long_literals_) {
2587       Mips64Label* label = literal.GetLabel();
2588       Bind(label);
2589       AssemblerBuffer::EnsureCapacity ensured(&buffer_);
2590       DCHECK_EQ(literal.GetSize(), 8u);
2591       for (size_t i = 0, size = literal.GetSize(); i != size; ++i) {
2592         buffer_.Emit<uint8_t>(literal.GetData()[i]);
2593       }
2594     }
2595   }
2596 }
2597 
PromoteBranches()2598 void Mips64Assembler::PromoteBranches() {
2599   // Promote short branches to long as necessary.
2600   bool changed;
2601   do {
2602     changed = false;
2603     for (auto& branch : branches_) {
2604       CHECK(branch.IsResolved());
2605       uint32_t delta = branch.PromoteIfNeeded();
2606       // If this branch has been promoted and needs to expand in size,
2607       // relocate all branches by the expansion size.
2608       if (delta) {
2609         changed = true;
2610         uint32_t expand_location = branch.GetLocation();
2611         for (auto& branch2 : branches_) {
2612           branch2.Relocate(expand_location, delta);
2613         }
2614       }
2615     }
2616   } while (changed);
2617 
2618   // Account for branch expansion by resizing the code buffer
2619   // and moving the code in it to its final location.
2620   size_t branch_count = branches_.size();
2621   if (branch_count > 0) {
2622     // Resize.
2623     Branch& last_branch = branches_[branch_count - 1];
2624     uint32_t size_delta = last_branch.GetEndLocation() - last_branch.GetOldEndLocation();
2625     uint32_t old_size = buffer_.Size();
2626     buffer_.Resize(old_size + size_delta);
2627     // Move the code residing between branch placeholders.
2628     uint32_t end = old_size;
2629     for (size_t i = branch_count; i > 0; ) {
2630       Branch& branch = branches_[--i];
2631       uint32_t size = end - branch.GetOldEndLocation();
2632       buffer_.Move(branch.GetEndLocation(), branch.GetOldEndLocation(), size);
2633       end = branch.GetOldLocation();
2634     }
2635   }
2636 
2637   // Align 64-bit literals by moving them down by 4 bytes if needed.
2638   // This will reduce the PC-relative distance, which should be safe for both near and far literals.
2639   if (!long_literals_.empty()) {
2640     uint32_t first_literal_location = GetLabelLocation(long_literals_.front().GetLabel());
2641     size_t lit_size = long_literals_.size() * sizeof(uint64_t);
2642     size_t buf_size = buffer_.Size();
2643     // 64-bit literals must be at the very end of the buffer.
2644     CHECK_EQ(first_literal_location + lit_size, buf_size);
2645     if (!IsAligned<sizeof(uint64_t)>(first_literal_location)) {
2646       buffer_.Move(first_literal_location - sizeof(uint32_t), first_literal_location, lit_size);
2647       // The 4 reserved bytes proved useless, reduce the buffer size.
2648       buffer_.Resize(buf_size - sizeof(uint32_t));
2649       // Reduce target addresses in literal and address loads by 4 bytes in order for correct
2650       // offsets from PC to be generated.
2651       for (auto& branch : branches_) {
2652         uint32_t target = branch.GetTarget();
2653         if (target >= first_literal_location) {
2654           branch.Resolve(target - sizeof(uint32_t));
2655         }
2656       }
2657       // If after this we ever call GetLabelLocation() to get the location of a 64-bit literal,
2658       // we need to adjust the location of the literal's label as well.
2659       for (Literal& literal : long_literals_) {
2660         // Bound label's position is negative, hence incrementing it instead of decrementing.
2661         literal.GetLabel()->position_ += sizeof(uint32_t);
2662       }
2663     }
2664   }
2665 }
2666 
2667 // Note: make sure branch_info_[] and EmitBranch() are kept synchronized.
2668 const Mips64Assembler::Branch::BranchInfo Mips64Assembler::Branch::branch_info_[] = {
2669   // Short branches.
2670   {  1, 0, 1, Mips64Assembler::Branch::kOffset28, 2 },  // kUncondBranch
2671   {  2, 0, 1, Mips64Assembler::Branch::kOffset18, 2 },  // kCondBranch
2672                                                         // Exception: kOffset23 for beqzc/bnezc
2673   {  1, 0, 1, Mips64Assembler::Branch::kOffset28, 2 },  // kCall
2674   // Near label.
2675   {  1, 0, 0, Mips64Assembler::Branch::kOffset21, 2 },  // kLabel
2676   // Near literals.
2677   {  1, 0, 0, Mips64Assembler::Branch::kOffset21, 2 },  // kLiteral
2678   {  1, 0, 0, Mips64Assembler::Branch::kOffset21, 2 },  // kLiteralUnsigned
2679   {  1, 0, 0, Mips64Assembler::Branch::kOffset21, 3 },  // kLiteralLong
2680   // Long branches.
2681   {  2, 0, 0, Mips64Assembler::Branch::kOffset32, 0 },  // kLongUncondBranch
2682   {  3, 1, 0, Mips64Assembler::Branch::kOffset32, 0 },  // kLongCondBranch
2683   {  2, 0, 0, Mips64Assembler::Branch::kOffset32, 0 },  // kLongCall
2684   // Far label.
2685   {  2, 0, 0, Mips64Assembler::Branch::kOffset32, 0 },  // kFarLabel
2686   // Far literals.
2687   {  2, 0, 0, Mips64Assembler::Branch::kOffset32, 0 },  // kFarLiteral
2688   {  2, 0, 0, Mips64Assembler::Branch::kOffset32, 0 },  // kFarLiteralUnsigned
2689   {  2, 0, 0, Mips64Assembler::Branch::kOffset32, 0 },  // kFarLiteralLong
2690 };
2691 
2692 // Note: make sure branch_info_[] and EmitBranch() are kept synchronized.
EmitBranch(Mips64Assembler::Branch * branch)2693 void Mips64Assembler::EmitBranch(Mips64Assembler::Branch* branch) {
2694   CHECK(overwriting_);
2695   overwrite_location_ = branch->GetLocation();
2696   uint32_t offset = branch->GetOffset();
2697   BranchCondition condition = branch->GetCondition();
2698   GpuRegister lhs = branch->GetLeftRegister();
2699   GpuRegister rhs = branch->GetRightRegister();
2700   switch (branch->GetType()) {
2701     // Short branches.
2702     case Branch::kUncondBranch:
2703       CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
2704       Bc(offset);
2705       break;
2706     case Branch::kCondBranch:
2707       CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
2708       EmitBcondc(condition, lhs, rhs, offset);
2709       Nop();  // TODO: improve by filling the forbidden/delay slot.
2710       break;
2711     case Branch::kCall:
2712       CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
2713       Balc(offset);
2714       break;
2715 
2716     // Near label.
2717     case Branch::kLabel:
2718       CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
2719       Addiupc(lhs, offset);
2720       break;
2721     // Near literals.
2722     case Branch::kLiteral:
2723       CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
2724       Lwpc(lhs, offset);
2725       break;
2726     case Branch::kLiteralUnsigned:
2727       CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
2728       Lwupc(lhs, offset);
2729       break;
2730     case Branch::kLiteralLong:
2731       CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
2732       Ldpc(lhs, offset);
2733       break;
2734 
2735     // Long branches.
2736     case Branch::kLongUncondBranch:
2737       offset += (offset & 0x8000) << 1;  // Account for sign extension in jic.
2738       CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
2739       Auipc(AT, High16Bits(offset));
2740       Jic(AT, Low16Bits(offset));
2741       break;
2742     case Branch::kLongCondBranch:
2743       EmitBcondc(Branch::OppositeCondition(condition), lhs, rhs, 2);
2744       offset += (offset & 0x8000) << 1;  // Account for sign extension in jic.
2745       CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
2746       Auipc(AT, High16Bits(offset));
2747       Jic(AT, Low16Bits(offset));
2748       break;
2749     case Branch::kLongCall:
2750       offset += (offset & 0x8000) << 1;  // Account for sign extension in jialc.
2751       CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
2752       Auipc(AT, High16Bits(offset));
2753       Jialc(AT, Low16Bits(offset));
2754       break;
2755 
2756     // Far label.
2757     case Branch::kFarLabel:
2758       offset += (offset & 0x8000) << 1;  // Account for sign extension in daddiu.
2759       CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
2760       Auipc(AT, High16Bits(offset));
2761       Daddiu(lhs, AT, Low16Bits(offset));
2762       break;
2763     // Far literals.
2764     case Branch::kFarLiteral:
2765       offset += (offset & 0x8000) << 1;  // Account for sign extension in lw.
2766       CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
2767       Auipc(AT, High16Bits(offset));
2768       Lw(lhs, AT, Low16Bits(offset));
2769       break;
2770     case Branch::kFarLiteralUnsigned:
2771       offset += (offset & 0x8000) << 1;  // Account for sign extension in lwu.
2772       CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
2773       Auipc(AT, High16Bits(offset));
2774       Lwu(lhs, AT, Low16Bits(offset));
2775       break;
2776     case Branch::kFarLiteralLong:
2777       offset += (offset & 0x8000) << 1;  // Account for sign extension in ld.
2778       CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
2779       Auipc(AT, High16Bits(offset));
2780       Ld(lhs, AT, Low16Bits(offset));
2781       break;
2782   }
2783   CHECK_EQ(overwrite_location_, branch->GetEndLocation());
2784   CHECK_LT(branch->GetSize(), static_cast<uint32_t>(Branch::kMaxBranchSize));
2785 }
2786 
Bc(Mips64Label * label)2787 void Mips64Assembler::Bc(Mips64Label* label) {
2788   Buncond(label);
2789 }
2790 
Balc(Mips64Label * label)2791 void Mips64Assembler::Balc(Mips64Label* label) {
2792   Call(label);
2793 }
2794 
Bltc(GpuRegister rs,GpuRegister rt,Mips64Label * label)2795 void Mips64Assembler::Bltc(GpuRegister rs, GpuRegister rt, Mips64Label* label) {
2796   Bcond(label, kCondLT, rs, rt);
2797 }
2798 
Bltzc(GpuRegister rt,Mips64Label * label)2799 void Mips64Assembler::Bltzc(GpuRegister rt, Mips64Label* label) {
2800   Bcond(label, kCondLTZ, rt);
2801 }
2802 
Bgtzc(GpuRegister rt,Mips64Label * label)2803 void Mips64Assembler::Bgtzc(GpuRegister rt, Mips64Label* label) {
2804   Bcond(label, kCondGTZ, rt);
2805 }
2806 
Bgec(GpuRegister rs,GpuRegister rt,Mips64Label * label)2807 void Mips64Assembler::Bgec(GpuRegister rs, GpuRegister rt, Mips64Label* label) {
2808   Bcond(label, kCondGE, rs, rt);
2809 }
2810 
Bgezc(GpuRegister rt,Mips64Label * label)2811 void Mips64Assembler::Bgezc(GpuRegister rt, Mips64Label* label) {
2812   Bcond(label, kCondGEZ, rt);
2813 }
2814 
Blezc(GpuRegister rt,Mips64Label * label)2815 void Mips64Assembler::Blezc(GpuRegister rt, Mips64Label* label) {
2816   Bcond(label, kCondLEZ, rt);
2817 }
2818 
Bltuc(GpuRegister rs,GpuRegister rt,Mips64Label * label)2819 void Mips64Assembler::Bltuc(GpuRegister rs, GpuRegister rt, Mips64Label* label) {
2820   Bcond(label, kCondLTU, rs, rt);
2821 }
2822 
Bgeuc(GpuRegister rs,GpuRegister rt,Mips64Label * label)2823 void Mips64Assembler::Bgeuc(GpuRegister rs, GpuRegister rt, Mips64Label* label) {
2824   Bcond(label, kCondGEU, rs, rt);
2825 }
2826 
Beqc(GpuRegister rs,GpuRegister rt,Mips64Label * label)2827 void Mips64Assembler::Beqc(GpuRegister rs, GpuRegister rt, Mips64Label* label) {
2828   Bcond(label, kCondEQ, rs, rt);
2829 }
2830 
Bnec(GpuRegister rs,GpuRegister rt,Mips64Label * label)2831 void Mips64Assembler::Bnec(GpuRegister rs, GpuRegister rt, Mips64Label* label) {
2832   Bcond(label, kCondNE, rs, rt);
2833 }
2834 
Beqzc(GpuRegister rs,Mips64Label * label)2835 void Mips64Assembler::Beqzc(GpuRegister rs, Mips64Label* label) {
2836   Bcond(label, kCondEQZ, rs);
2837 }
2838 
Bnezc(GpuRegister rs,Mips64Label * label)2839 void Mips64Assembler::Bnezc(GpuRegister rs, Mips64Label* label) {
2840   Bcond(label, kCondNEZ, rs);
2841 }
2842 
Bc1eqz(FpuRegister ft,Mips64Label * label)2843 void Mips64Assembler::Bc1eqz(FpuRegister ft, Mips64Label* label) {
2844   Bcond(label, kCondF, static_cast<GpuRegister>(ft), ZERO);
2845 }
2846 
Bc1nez(FpuRegister ft,Mips64Label * label)2847 void Mips64Assembler::Bc1nez(FpuRegister ft, Mips64Label* label) {
2848   Bcond(label, kCondT, static_cast<GpuRegister>(ft), ZERO);
2849 }
2850 
AdjustBaseAndOffset(GpuRegister & base,int32_t & offset,bool is_doubleword)2851 void Mips64Assembler::AdjustBaseAndOffset(GpuRegister& base,
2852                                           int32_t& offset,
2853                                           bool is_doubleword) {
2854   // This method is used to adjust the base register and offset pair
2855   // for a load/store when the offset doesn't fit into int16_t.
2856   // It is assumed that `base + offset` is sufficiently aligned for memory
2857   // operands that are machine word in size or smaller. For doubleword-sized
2858   // operands it's assumed that `base` is a multiple of 8, while `offset`
2859   // may be a multiple of 4 (e.g. 4-byte-aligned long and double arguments
2860   // and spilled variables on the stack accessed relative to the stack
2861   // pointer register).
2862   // We preserve the "alignment" of `offset` by adjusting it by a multiple of 8.
2863   CHECK_NE(base, AT);  // Must not overwrite the register `base` while loading `offset`.
2864 
2865   bool doubleword_aligned = IsAligned<kMips64DoublewordSize>(offset);
2866   bool two_accesses = is_doubleword && !doubleword_aligned;
2867 
2868   // IsInt<16> must be passed a signed value, hence the static cast below.
2869   if (IsInt<16>(offset) &&
2870       (!two_accesses || IsInt<16>(static_cast<int32_t>(offset + kMips64WordSize)))) {
2871     // Nothing to do: `offset` (and, if needed, `offset + 4`) fits into int16_t.
2872     return;
2873   }
2874 
2875   // Remember the "(mis)alignment" of `offset`, it will be checked at the end.
2876   uint32_t misalignment = offset & (kMips64DoublewordSize - 1);
2877 
2878   // First, see if `offset` can be represented as a sum of two 16-bit signed
2879   // offsets. This can save an instruction.
2880   // To simplify matters, only do this for a symmetric range of offsets from
2881   // about -64KB to about +64KB, allowing further addition of 4 when accessing
2882   // 64-bit variables with two 32-bit accesses.
2883   constexpr int32_t kMinOffsetForSimpleAdjustment = 0x7ff8;  // Max int16_t that's a multiple of 8.
2884   constexpr int32_t kMaxOffsetForSimpleAdjustment = 2 * kMinOffsetForSimpleAdjustment;
2885 
2886   if (0 <= offset && offset <= kMaxOffsetForSimpleAdjustment) {
2887     Daddiu(AT, base, kMinOffsetForSimpleAdjustment);
2888     offset -= kMinOffsetForSimpleAdjustment;
2889   } else if (-kMaxOffsetForSimpleAdjustment <= offset && offset < 0) {
2890     Daddiu(AT, base, -kMinOffsetForSimpleAdjustment);
2891     offset += kMinOffsetForSimpleAdjustment;
2892   } else {
2893     // In more complex cases take advantage of the daui instruction, e.g.:
2894     //    daui   AT, base, offset_high
2895     //   [dahi   AT, 1]                       // When `offset` is close to +2GB.
2896     //    lw     reg_lo, offset_low(AT)
2897     //   [lw     reg_hi, (offset_low+4)(AT)]  // If misaligned 64-bit load.
2898     // or when offset_low+4 overflows int16_t:
2899     //    daui   AT, base, offset_high
2900     //    daddiu AT, AT, 8
2901     //    lw     reg_lo, (offset_low-8)(AT)
2902     //    lw     reg_hi, (offset_low-4)(AT)
2903     int16_t offset_low = Low16Bits(offset);
2904     int32_t offset_low32 = offset_low;
2905     int16_t offset_high = High16Bits(offset);
2906     bool increment_hi16 = offset_low < 0;
2907     bool overflow_hi16 = false;
2908 
2909     if (increment_hi16) {
2910       offset_high++;
2911       overflow_hi16 = (offset_high == -32768);
2912     }
2913     Daui(AT, base, offset_high);
2914 
2915     if (overflow_hi16) {
2916       Dahi(AT, 1);
2917     }
2918 
2919     if (two_accesses && !IsInt<16>(static_cast<int32_t>(offset_low32 + kMips64WordSize))) {
2920       // Avoid overflow in the 16-bit offset of the load/store instruction when adding 4.
2921       Daddiu(AT, AT, kMips64DoublewordSize);
2922       offset_low32 -= kMips64DoublewordSize;
2923     }
2924 
2925     offset = offset_low32;
2926   }
2927   base = AT;
2928 
2929   CHECK(IsInt<16>(offset));
2930   if (two_accesses) {
2931     CHECK(IsInt<16>(static_cast<int32_t>(offset + kMips64WordSize)));
2932   }
2933   CHECK_EQ(misalignment, offset & (kMips64DoublewordSize - 1));
2934 }
2935 
AdjustBaseOffsetAndElementSizeShift(GpuRegister & base,int32_t & offset,int & element_size_shift)2936 void Mips64Assembler::AdjustBaseOffsetAndElementSizeShift(GpuRegister& base,
2937                                                           int32_t& offset,
2938                                                           int& element_size_shift) {
2939   // This method is used to adjust the base register, offset and element_size_shift
2940   // for a vector load/store when the offset doesn't fit into allowed number of bits.
2941   // MSA ld.df and st.df instructions take signed offsets as arguments, but maximum
2942   // offset is dependant on the size of the data format df (10-bit offsets for ld.b,
2943   // 11-bit for ld.h, 12-bit for ld.w and 13-bit for ld.d).
2944   // If element_size_shift is non-negative at entry, it won't be changed, but offset
2945   // will be checked for appropriate alignment. If negative at entry, it will be
2946   // adjusted based on offset for maximum fit.
2947   // It's assumed that `base` is a multiple of 8.
2948 
2949   CHECK_NE(base, AT);  // Must not overwrite the register `base` while loading `offset`.
2950 
2951   if (element_size_shift >= 0) {
2952     CHECK_LE(element_size_shift, TIMES_8);
2953     CHECK_GE(JAVASTYLE_CTZ(offset), element_size_shift);
2954   } else if (IsAligned<kMips64DoublewordSize>(offset)) {
2955     element_size_shift = TIMES_8;
2956   } else if (IsAligned<kMips64WordSize>(offset)) {
2957     element_size_shift = TIMES_4;
2958   } else if (IsAligned<kMips64HalfwordSize>(offset)) {
2959     element_size_shift = TIMES_2;
2960   } else {
2961     element_size_shift = TIMES_1;
2962   }
2963 
2964   const int low_len = 10 + element_size_shift;  // How many low bits of `offset` ld.df/st.df
2965                                                 // will take.
2966   int16_t low = offset & ((1 << low_len) - 1);  // Isolate these bits.
2967   low -= (low & (1 << (low_len - 1))) << 1;     // Sign-extend these bits.
2968   if (low == offset) {
2969     return;  // `offset` fits into ld.df/st.df.
2970   }
2971 
2972   // First, see if `offset` can be represented as a sum of two signed offsets.
2973   // This can save an instruction.
2974 
2975   // Max int16_t that's a multiple of element size.
2976   const int32_t kMaxDeltaForSimpleAdjustment = 0x8000 - (1 << element_size_shift);
2977   // Max ld.df/st.df offset that's a multiple of element size.
2978   const int32_t kMaxLoadStoreOffset = 0x1ff << element_size_shift;
2979   const int32_t kMaxOffsetForSimpleAdjustment = kMaxDeltaForSimpleAdjustment + kMaxLoadStoreOffset;
2980 
2981   if (IsInt<16>(offset)) {
2982     Daddiu(AT, base, offset);
2983     offset = 0;
2984   } else if (0 <= offset && offset <= kMaxOffsetForSimpleAdjustment) {
2985     Daddiu(AT, base, kMaxDeltaForSimpleAdjustment);
2986     offset -= kMaxDeltaForSimpleAdjustment;
2987   } else if (-kMaxOffsetForSimpleAdjustment <= offset && offset < 0) {
2988     Daddiu(AT, base, -kMaxDeltaForSimpleAdjustment);
2989     offset += kMaxDeltaForSimpleAdjustment;
2990   } else {
2991     // Let's treat `offset` as 64-bit to simplify handling of sign
2992     // extensions in the instructions that supply its smaller signed parts.
2993     //
2994     // 16-bit or smaller parts of `offset`:
2995     // |63  top  48|47  hi  32|31  upper  16|15  mid  13-10|12-9  low  0|
2996     //
2997     // Instructions that supply each part as a signed integer addend:
2998     // |dati       |dahi      |daui         |daddiu        |ld.df/st.df |
2999     //
3000     // `top` is always 0, so dati isn't used.
3001     // `hi` is 1 when `offset` is close to +2GB and 0 otherwise.
3002     uint64_t tmp = static_cast<uint64_t>(offset) - low;  // Exclude `low` from the rest of `offset`
3003                                                          // (accounts for sign of `low`).
3004     tmp += (tmp & (UINT64_C(1) << 15)) << 1;  // Account for sign extension in daddiu.
3005     tmp += (tmp & (UINT64_C(1) << 31)) << 1;  // Account for sign extension in daui.
3006     int16_t mid = Low16Bits(tmp);
3007     int16_t upper = High16Bits(tmp);
3008     int16_t hi = Low16Bits(High32Bits(tmp));
3009     Daui(AT, base, upper);
3010     if (hi != 0) {
3011       CHECK_EQ(hi, 1);
3012       Dahi(AT, hi);
3013     }
3014     if (mid != 0) {
3015       Daddiu(AT, AT, mid);
3016     }
3017     offset = low;
3018   }
3019   base = AT;
3020   CHECK_GE(JAVASTYLE_CTZ(offset), element_size_shift);
3021   CHECK(IsInt<10>(offset >> element_size_shift));
3022 }
3023 
LoadFromOffset(LoadOperandType type,GpuRegister reg,GpuRegister base,int32_t offset)3024 void Mips64Assembler::LoadFromOffset(LoadOperandType type,
3025                                      GpuRegister reg,
3026                                      GpuRegister base,
3027                                      int32_t offset) {
3028   LoadFromOffset<>(type, reg, base, offset);
3029 }
3030 
LoadFpuFromOffset(LoadOperandType type,FpuRegister reg,GpuRegister base,int32_t offset)3031 void Mips64Assembler::LoadFpuFromOffset(LoadOperandType type,
3032                                         FpuRegister reg,
3033                                         GpuRegister base,
3034                                         int32_t offset) {
3035   LoadFpuFromOffset<>(type, reg, base, offset);
3036 }
3037 
EmitLoad(ManagedRegister m_dst,GpuRegister src_register,int32_t src_offset,size_t size)3038 void Mips64Assembler::EmitLoad(ManagedRegister m_dst, GpuRegister src_register, int32_t src_offset,
3039                                size_t size) {
3040   Mips64ManagedRegister dst = m_dst.AsMips64();
3041   if (dst.IsNoRegister()) {
3042     CHECK_EQ(0u, size) << dst;
3043   } else if (dst.IsGpuRegister()) {
3044     if (size == 4) {
3045       LoadFromOffset(kLoadWord, dst.AsGpuRegister(), src_register, src_offset);
3046     } else if (size == 8) {
3047       CHECK_EQ(8u, size) << dst;
3048       LoadFromOffset(kLoadDoubleword, dst.AsGpuRegister(), src_register, src_offset);
3049     } else {
3050       UNIMPLEMENTED(FATAL) << "We only support Load() of size 4 and 8";
3051     }
3052   } else if (dst.IsFpuRegister()) {
3053     if (size == 4) {
3054       CHECK_EQ(4u, size) << dst;
3055       LoadFpuFromOffset(kLoadWord, dst.AsFpuRegister(), src_register, src_offset);
3056     } else if (size == 8) {
3057       CHECK_EQ(8u, size) << dst;
3058       LoadFpuFromOffset(kLoadDoubleword, dst.AsFpuRegister(), src_register, src_offset);
3059     } else {
3060       UNIMPLEMENTED(FATAL) << "We only support Load() of size 4 and 8";
3061     }
3062   }
3063 }
3064 
StoreToOffset(StoreOperandType type,GpuRegister reg,GpuRegister base,int32_t offset)3065 void Mips64Assembler::StoreToOffset(StoreOperandType type,
3066                                     GpuRegister reg,
3067                                     GpuRegister base,
3068                                     int32_t offset) {
3069   StoreToOffset<>(type, reg, base, offset);
3070 }
3071 
StoreFpuToOffset(StoreOperandType type,FpuRegister reg,GpuRegister base,int32_t offset)3072 void Mips64Assembler::StoreFpuToOffset(StoreOperandType type,
3073                                        FpuRegister reg,
3074                                        GpuRegister base,
3075                                        int32_t offset) {
3076   StoreFpuToOffset<>(type, reg, base, offset);
3077 }
3078 
DWARFReg(GpuRegister reg)3079 static dwarf::Reg DWARFReg(GpuRegister reg) {
3080   return dwarf::Reg::Mips64Core(static_cast<int>(reg));
3081 }
3082 
3083 constexpr size_t kFramePointerSize = 8;
3084 
BuildFrame(size_t frame_size,ManagedRegister method_reg,ArrayRef<const ManagedRegister> callee_save_regs,const ManagedRegisterEntrySpills & entry_spills)3085 void Mips64Assembler::BuildFrame(size_t frame_size,
3086                                  ManagedRegister method_reg,
3087                                  ArrayRef<const ManagedRegister> callee_save_regs,
3088                                  const ManagedRegisterEntrySpills& entry_spills) {
3089   CHECK_ALIGNED(frame_size, kStackAlignment);
3090   DCHECK(!overwriting_);
3091 
3092   // Increase frame to required size.
3093   IncreaseFrameSize(frame_size);
3094 
3095   // Push callee saves and return address
3096   int stack_offset = frame_size - kFramePointerSize;
3097   StoreToOffset(kStoreDoubleword, RA, SP, stack_offset);
3098   cfi_.RelOffset(DWARFReg(RA), stack_offset);
3099   for (int i = callee_save_regs.size() - 1; i >= 0; --i) {
3100     stack_offset -= kFramePointerSize;
3101     GpuRegister reg = callee_save_regs[i].AsMips64().AsGpuRegister();
3102     StoreToOffset(kStoreDoubleword, reg, SP, stack_offset);
3103     cfi_.RelOffset(DWARFReg(reg), stack_offset);
3104   }
3105 
3106   // Write out Method*.
3107   StoreToOffset(kStoreDoubleword, method_reg.AsMips64().AsGpuRegister(), SP, 0);
3108 
3109   // Write out entry spills.
3110   int32_t offset = frame_size + kFramePointerSize;
3111   for (size_t i = 0; i < entry_spills.size(); ++i) {
3112     Mips64ManagedRegister reg = entry_spills[i].AsMips64();
3113     ManagedRegisterSpill spill = entry_spills.at(i);
3114     int32_t size = spill.getSize();
3115     if (reg.IsNoRegister()) {
3116       // only increment stack offset.
3117       offset += size;
3118     } else if (reg.IsFpuRegister()) {
3119       StoreFpuToOffset((size == 4) ? kStoreWord : kStoreDoubleword,
3120           reg.AsFpuRegister(), SP, offset);
3121       offset += size;
3122     } else if (reg.IsGpuRegister()) {
3123       StoreToOffset((size == 4) ? kStoreWord : kStoreDoubleword,
3124           reg.AsGpuRegister(), SP, offset);
3125       offset += size;
3126     }
3127   }
3128 }
3129 
RemoveFrame(size_t frame_size,ArrayRef<const ManagedRegister> callee_save_regs)3130 void Mips64Assembler::RemoveFrame(size_t frame_size,
3131                                   ArrayRef<const ManagedRegister> callee_save_regs) {
3132   CHECK_ALIGNED(frame_size, kStackAlignment);
3133   DCHECK(!overwriting_);
3134   cfi_.RememberState();
3135 
3136   // Pop callee saves and return address
3137   int stack_offset = frame_size - (callee_save_regs.size() * kFramePointerSize) - kFramePointerSize;
3138   for (size_t i = 0; i < callee_save_regs.size(); ++i) {
3139     GpuRegister reg = callee_save_regs[i].AsMips64().AsGpuRegister();
3140     LoadFromOffset(kLoadDoubleword, reg, SP, stack_offset);
3141     cfi_.Restore(DWARFReg(reg));
3142     stack_offset += kFramePointerSize;
3143   }
3144   LoadFromOffset(kLoadDoubleword, RA, SP, stack_offset);
3145   cfi_.Restore(DWARFReg(RA));
3146 
3147   // Decrease frame to required size.
3148   DecreaseFrameSize(frame_size);
3149 
3150   // Then jump to the return address.
3151   Jr(RA);
3152   Nop();
3153 
3154   // The CFI should be restored for any code that follows the exit block.
3155   cfi_.RestoreState();
3156   cfi_.DefCFAOffset(frame_size);
3157 }
3158 
IncreaseFrameSize(size_t adjust)3159 void Mips64Assembler::IncreaseFrameSize(size_t adjust) {
3160   CHECK_ALIGNED(adjust, kFramePointerSize);
3161   DCHECK(!overwriting_);
3162   Daddiu64(SP, SP, static_cast<int32_t>(-adjust));
3163   cfi_.AdjustCFAOffset(adjust);
3164 }
3165 
DecreaseFrameSize(size_t adjust)3166 void Mips64Assembler::DecreaseFrameSize(size_t adjust) {
3167   CHECK_ALIGNED(adjust, kFramePointerSize);
3168   DCHECK(!overwriting_);
3169   Daddiu64(SP, SP, static_cast<int32_t>(adjust));
3170   cfi_.AdjustCFAOffset(-adjust);
3171 }
3172 
Store(FrameOffset dest,ManagedRegister msrc,size_t size)3173 void Mips64Assembler::Store(FrameOffset dest, ManagedRegister msrc, size_t size) {
3174   Mips64ManagedRegister src = msrc.AsMips64();
3175   if (src.IsNoRegister()) {
3176     CHECK_EQ(0u, size);
3177   } else if (src.IsGpuRegister()) {
3178     CHECK(size == 4 || size == 8) << size;
3179     if (size == 8) {
3180       StoreToOffset(kStoreDoubleword, src.AsGpuRegister(), SP, dest.Int32Value());
3181     } else if (size == 4) {
3182       StoreToOffset(kStoreWord, src.AsGpuRegister(), SP, dest.Int32Value());
3183     } else {
3184       UNIMPLEMENTED(FATAL) << "We only support Store() of size 4 and 8";
3185     }
3186   } else if (src.IsFpuRegister()) {
3187     CHECK(size == 4 || size == 8) << size;
3188     if (size == 8) {
3189       StoreFpuToOffset(kStoreDoubleword, src.AsFpuRegister(), SP, dest.Int32Value());
3190     } else if (size == 4) {
3191       StoreFpuToOffset(kStoreWord, src.AsFpuRegister(), SP, dest.Int32Value());
3192     } else {
3193       UNIMPLEMENTED(FATAL) << "We only support Store() of size 4 and 8";
3194     }
3195   }
3196 }
3197 
StoreRef(FrameOffset dest,ManagedRegister msrc)3198 void Mips64Assembler::StoreRef(FrameOffset dest, ManagedRegister msrc) {
3199   Mips64ManagedRegister src = msrc.AsMips64();
3200   CHECK(src.IsGpuRegister());
3201   StoreToOffset(kStoreWord, src.AsGpuRegister(), SP, dest.Int32Value());
3202 }
3203 
StoreRawPtr(FrameOffset dest,ManagedRegister msrc)3204 void Mips64Assembler::StoreRawPtr(FrameOffset dest, ManagedRegister msrc) {
3205   Mips64ManagedRegister src = msrc.AsMips64();
3206   CHECK(src.IsGpuRegister());
3207   StoreToOffset(kStoreDoubleword, src.AsGpuRegister(), SP, dest.Int32Value());
3208 }
3209 
StoreImmediateToFrame(FrameOffset dest,uint32_t imm,ManagedRegister mscratch)3210 void Mips64Assembler::StoreImmediateToFrame(FrameOffset dest, uint32_t imm,
3211                                             ManagedRegister mscratch) {
3212   Mips64ManagedRegister scratch = mscratch.AsMips64();
3213   CHECK(scratch.IsGpuRegister()) << scratch;
3214   LoadConst32(scratch.AsGpuRegister(), imm);
3215   StoreToOffset(kStoreWord, scratch.AsGpuRegister(), SP, dest.Int32Value());
3216 }
3217 
StoreStackOffsetToThread(ThreadOffset64 thr_offs,FrameOffset fr_offs,ManagedRegister mscratch)3218 void Mips64Assembler::StoreStackOffsetToThread(ThreadOffset64 thr_offs,
3219                                                FrameOffset fr_offs,
3220                                                ManagedRegister mscratch) {
3221   Mips64ManagedRegister scratch = mscratch.AsMips64();
3222   CHECK(scratch.IsGpuRegister()) << scratch;
3223   Daddiu64(scratch.AsGpuRegister(), SP, fr_offs.Int32Value());
3224   StoreToOffset(kStoreDoubleword, scratch.AsGpuRegister(), S1, thr_offs.Int32Value());
3225 }
3226 
StoreStackPointerToThread(ThreadOffset64 thr_offs)3227 void Mips64Assembler::StoreStackPointerToThread(ThreadOffset64 thr_offs) {
3228   StoreToOffset(kStoreDoubleword, SP, S1, thr_offs.Int32Value());
3229 }
3230 
StoreSpanning(FrameOffset dest,ManagedRegister msrc,FrameOffset in_off,ManagedRegister mscratch)3231 void Mips64Assembler::StoreSpanning(FrameOffset dest, ManagedRegister msrc,
3232                                     FrameOffset in_off, ManagedRegister mscratch) {
3233   Mips64ManagedRegister src = msrc.AsMips64();
3234   Mips64ManagedRegister scratch = mscratch.AsMips64();
3235   StoreToOffset(kStoreDoubleword, src.AsGpuRegister(), SP, dest.Int32Value());
3236   LoadFromOffset(kLoadDoubleword, scratch.AsGpuRegister(), SP, in_off.Int32Value());
3237   StoreToOffset(kStoreDoubleword, scratch.AsGpuRegister(), SP, dest.Int32Value() + 8);
3238 }
3239 
Load(ManagedRegister mdest,FrameOffset src,size_t size)3240 void Mips64Assembler::Load(ManagedRegister mdest, FrameOffset src, size_t size) {
3241   return EmitLoad(mdest, SP, src.Int32Value(), size);
3242 }
3243 
LoadFromThread(ManagedRegister mdest,ThreadOffset64 src,size_t size)3244 void Mips64Assembler::LoadFromThread(ManagedRegister mdest, ThreadOffset64 src, size_t size) {
3245   return EmitLoad(mdest, S1, src.Int32Value(), size);
3246 }
3247 
LoadRef(ManagedRegister mdest,FrameOffset src)3248 void Mips64Assembler::LoadRef(ManagedRegister mdest, FrameOffset src) {
3249   Mips64ManagedRegister dest = mdest.AsMips64();
3250   CHECK(dest.IsGpuRegister());
3251   LoadFromOffset(kLoadUnsignedWord, dest.AsGpuRegister(), SP, src.Int32Value());
3252 }
3253 
LoadRef(ManagedRegister mdest,ManagedRegister base,MemberOffset offs,bool unpoison_reference)3254 void Mips64Assembler::LoadRef(ManagedRegister mdest, ManagedRegister base, MemberOffset offs,
3255                               bool unpoison_reference) {
3256   Mips64ManagedRegister dest = mdest.AsMips64();
3257   CHECK(dest.IsGpuRegister() && base.AsMips64().IsGpuRegister());
3258   LoadFromOffset(kLoadUnsignedWord, dest.AsGpuRegister(),
3259                  base.AsMips64().AsGpuRegister(), offs.Int32Value());
3260   if (unpoison_reference) {
3261     MaybeUnpoisonHeapReference(dest.AsGpuRegister());
3262   }
3263 }
3264 
LoadRawPtr(ManagedRegister mdest,ManagedRegister base,Offset offs)3265 void Mips64Assembler::LoadRawPtr(ManagedRegister mdest, ManagedRegister base,
3266                                  Offset offs) {
3267   Mips64ManagedRegister dest = mdest.AsMips64();
3268   CHECK(dest.IsGpuRegister() && base.AsMips64().IsGpuRegister());
3269   LoadFromOffset(kLoadDoubleword, dest.AsGpuRegister(),
3270                  base.AsMips64().AsGpuRegister(), offs.Int32Value());
3271 }
3272 
LoadRawPtrFromThread(ManagedRegister mdest,ThreadOffset64 offs)3273 void Mips64Assembler::LoadRawPtrFromThread(ManagedRegister mdest, ThreadOffset64 offs) {
3274   Mips64ManagedRegister dest = mdest.AsMips64();
3275   CHECK(dest.IsGpuRegister());
3276   LoadFromOffset(kLoadDoubleword, dest.AsGpuRegister(), S1, offs.Int32Value());
3277 }
3278 
SignExtend(ManagedRegister mreg ATTRIBUTE_UNUSED,size_t size ATTRIBUTE_UNUSED)3279 void Mips64Assembler::SignExtend(ManagedRegister mreg ATTRIBUTE_UNUSED,
3280                                  size_t size ATTRIBUTE_UNUSED) {
3281   UNIMPLEMENTED(FATAL) << "No sign extension necessary for MIPS64";
3282 }
3283 
ZeroExtend(ManagedRegister mreg ATTRIBUTE_UNUSED,size_t size ATTRIBUTE_UNUSED)3284 void Mips64Assembler::ZeroExtend(ManagedRegister mreg ATTRIBUTE_UNUSED,
3285                                  size_t size ATTRIBUTE_UNUSED) {
3286   UNIMPLEMENTED(FATAL) << "No zero extension necessary for MIPS64";
3287 }
3288 
Move(ManagedRegister mdest,ManagedRegister msrc,size_t size)3289 void Mips64Assembler::Move(ManagedRegister mdest, ManagedRegister msrc, size_t size) {
3290   Mips64ManagedRegister dest = mdest.AsMips64();
3291   Mips64ManagedRegister src = msrc.AsMips64();
3292   if (!dest.Equals(src)) {
3293     if (dest.IsGpuRegister()) {
3294       CHECK(src.IsGpuRegister()) << src;
3295       Move(dest.AsGpuRegister(), src.AsGpuRegister());
3296     } else if (dest.IsFpuRegister()) {
3297       CHECK(src.IsFpuRegister()) << src;
3298       if (size == 4) {
3299         MovS(dest.AsFpuRegister(), src.AsFpuRegister());
3300       } else if (size == 8) {
3301         MovD(dest.AsFpuRegister(), src.AsFpuRegister());
3302       } else {
3303         UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
3304       }
3305     }
3306   }
3307 }
3308 
CopyRef(FrameOffset dest,FrameOffset src,ManagedRegister mscratch)3309 void Mips64Assembler::CopyRef(FrameOffset dest, FrameOffset src,
3310                               ManagedRegister mscratch) {
3311   Mips64ManagedRegister scratch = mscratch.AsMips64();
3312   CHECK(scratch.IsGpuRegister()) << scratch;
3313   LoadFromOffset(kLoadWord, scratch.AsGpuRegister(), SP, src.Int32Value());
3314   StoreToOffset(kStoreWord, scratch.AsGpuRegister(), SP, dest.Int32Value());
3315 }
3316 
CopyRawPtrFromThread(FrameOffset fr_offs,ThreadOffset64 thr_offs,ManagedRegister mscratch)3317 void Mips64Assembler::CopyRawPtrFromThread(FrameOffset fr_offs,
3318                                            ThreadOffset64 thr_offs,
3319                                            ManagedRegister mscratch) {
3320   Mips64ManagedRegister scratch = mscratch.AsMips64();
3321   CHECK(scratch.IsGpuRegister()) << scratch;
3322   LoadFromOffset(kLoadDoubleword, scratch.AsGpuRegister(), S1, thr_offs.Int32Value());
3323   StoreToOffset(kStoreDoubleword, scratch.AsGpuRegister(), SP, fr_offs.Int32Value());
3324 }
3325 
CopyRawPtrToThread(ThreadOffset64 thr_offs,FrameOffset fr_offs,ManagedRegister mscratch)3326 void Mips64Assembler::CopyRawPtrToThread(ThreadOffset64 thr_offs,
3327                                          FrameOffset fr_offs,
3328                                          ManagedRegister mscratch) {
3329   Mips64ManagedRegister scratch = mscratch.AsMips64();
3330   CHECK(scratch.IsGpuRegister()) << scratch;
3331   LoadFromOffset(kLoadDoubleword, scratch.AsGpuRegister(),
3332                  SP, fr_offs.Int32Value());
3333   StoreToOffset(kStoreDoubleword, scratch.AsGpuRegister(),
3334                 S1, thr_offs.Int32Value());
3335 }
3336 
Copy(FrameOffset dest,FrameOffset src,ManagedRegister mscratch,size_t size)3337 void Mips64Assembler::Copy(FrameOffset dest, FrameOffset src,
3338                            ManagedRegister mscratch, size_t size) {
3339   Mips64ManagedRegister scratch = mscratch.AsMips64();
3340   CHECK(scratch.IsGpuRegister()) << scratch;
3341   CHECK(size == 4 || size == 8) << size;
3342   if (size == 4) {
3343     LoadFromOffset(kLoadWord, scratch.AsGpuRegister(), SP, src.Int32Value());
3344     StoreToOffset(kStoreDoubleword, scratch.AsGpuRegister(), SP, dest.Int32Value());
3345   } else if (size == 8) {
3346     LoadFromOffset(kLoadDoubleword, scratch.AsGpuRegister(), SP, src.Int32Value());
3347     StoreToOffset(kStoreDoubleword, scratch.AsGpuRegister(), SP, dest.Int32Value());
3348   } else {
3349     UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
3350   }
3351 }
3352 
Copy(FrameOffset dest,ManagedRegister src_base,Offset src_offset,ManagedRegister mscratch,size_t size)3353 void Mips64Assembler::Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset,
3354                            ManagedRegister mscratch, size_t size) {
3355   GpuRegister scratch = mscratch.AsMips64().AsGpuRegister();
3356   CHECK(size == 4 || size == 8) << size;
3357   if (size == 4) {
3358     LoadFromOffset(kLoadWord, scratch, src_base.AsMips64().AsGpuRegister(),
3359                    src_offset.Int32Value());
3360     StoreToOffset(kStoreDoubleword, scratch, SP, dest.Int32Value());
3361   } else if (size == 8) {
3362     LoadFromOffset(kLoadDoubleword, scratch, src_base.AsMips64().AsGpuRegister(),
3363                    src_offset.Int32Value());
3364     StoreToOffset(kStoreDoubleword, scratch, SP, dest.Int32Value());
3365   } else {
3366     UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
3367   }
3368 }
3369 
Copy(ManagedRegister dest_base,Offset dest_offset,FrameOffset src,ManagedRegister mscratch,size_t size)3370 void Mips64Assembler::Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src,
3371                            ManagedRegister mscratch, size_t size) {
3372   GpuRegister scratch = mscratch.AsMips64().AsGpuRegister();
3373   CHECK(size == 4 || size == 8) << size;
3374   if (size == 4) {
3375     LoadFromOffset(kLoadWord, scratch, SP, src.Int32Value());
3376     StoreToOffset(kStoreDoubleword, scratch, dest_base.AsMips64().AsGpuRegister(),
3377                   dest_offset.Int32Value());
3378   } else if (size == 8) {
3379     LoadFromOffset(kLoadDoubleword, scratch, SP, src.Int32Value());
3380     StoreToOffset(kStoreDoubleword, scratch, dest_base.AsMips64().AsGpuRegister(),
3381                   dest_offset.Int32Value());
3382   } else {
3383     UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
3384   }
3385 }
3386 
Copy(FrameOffset dest ATTRIBUTE_UNUSED,FrameOffset src_base ATTRIBUTE_UNUSED,Offset src_offset ATTRIBUTE_UNUSED,ManagedRegister mscratch ATTRIBUTE_UNUSED,size_t size ATTRIBUTE_UNUSED)3387 void Mips64Assembler::Copy(FrameOffset dest ATTRIBUTE_UNUSED,
3388                            FrameOffset src_base ATTRIBUTE_UNUSED,
3389                            Offset src_offset ATTRIBUTE_UNUSED,
3390                            ManagedRegister mscratch ATTRIBUTE_UNUSED,
3391                            size_t size ATTRIBUTE_UNUSED) {
3392   UNIMPLEMENTED(FATAL) << "No MIPS64 implementation";
3393 }
3394 
Copy(ManagedRegister dest,Offset dest_offset,ManagedRegister src,Offset src_offset,ManagedRegister mscratch,size_t size)3395 void Mips64Assembler::Copy(ManagedRegister dest, Offset dest_offset,
3396                            ManagedRegister src, Offset src_offset,
3397                            ManagedRegister mscratch, size_t size) {
3398   GpuRegister scratch = mscratch.AsMips64().AsGpuRegister();
3399   CHECK(size == 4 || size == 8) << size;
3400   if (size == 4) {
3401     LoadFromOffset(kLoadWord, scratch, src.AsMips64().AsGpuRegister(), src_offset.Int32Value());
3402     StoreToOffset(kStoreDoubleword, scratch, dest.AsMips64().AsGpuRegister(), dest_offset.Int32Value());
3403   } else if (size == 8) {
3404     LoadFromOffset(kLoadDoubleword, scratch, src.AsMips64().AsGpuRegister(),
3405                    src_offset.Int32Value());
3406     StoreToOffset(kStoreDoubleword, scratch, dest.AsMips64().AsGpuRegister(),
3407                   dest_offset.Int32Value());
3408   } else {
3409     UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
3410   }
3411 }
3412 
Copy(FrameOffset dest ATTRIBUTE_UNUSED,Offset dest_offset ATTRIBUTE_UNUSED,FrameOffset src ATTRIBUTE_UNUSED,Offset src_offset ATTRIBUTE_UNUSED,ManagedRegister mscratch ATTRIBUTE_UNUSED,size_t size ATTRIBUTE_UNUSED)3413 void Mips64Assembler::Copy(FrameOffset dest ATTRIBUTE_UNUSED,
3414                            Offset dest_offset ATTRIBUTE_UNUSED,
3415                            FrameOffset src ATTRIBUTE_UNUSED,
3416                            Offset src_offset ATTRIBUTE_UNUSED,
3417                            ManagedRegister mscratch ATTRIBUTE_UNUSED,
3418                            size_t size ATTRIBUTE_UNUSED) {
3419   UNIMPLEMENTED(FATAL) << "No MIPS64 implementation";
3420 }
3421 
MemoryBarrier(ManagedRegister mreg ATTRIBUTE_UNUSED)3422 void Mips64Assembler::MemoryBarrier(ManagedRegister mreg ATTRIBUTE_UNUSED) {
3423   // TODO: sync?
3424   UNIMPLEMENTED(FATAL) << "No MIPS64 implementation";
3425 }
3426 
CreateHandleScopeEntry(ManagedRegister mout_reg,FrameOffset handle_scope_offset,ManagedRegister min_reg,bool null_allowed)3427 void Mips64Assembler::CreateHandleScopeEntry(ManagedRegister mout_reg,
3428                                              FrameOffset handle_scope_offset,
3429                                              ManagedRegister min_reg,
3430                                              bool null_allowed) {
3431   Mips64ManagedRegister out_reg = mout_reg.AsMips64();
3432   Mips64ManagedRegister in_reg = min_reg.AsMips64();
3433   CHECK(in_reg.IsNoRegister() || in_reg.IsGpuRegister()) << in_reg;
3434   CHECK(out_reg.IsGpuRegister()) << out_reg;
3435   if (null_allowed) {
3436     Mips64Label null_arg;
3437     // Null values get a handle scope entry value of 0.  Otherwise, the handle scope entry is
3438     // the address in the handle scope holding the reference.
3439     // e.g. out_reg = (handle == 0) ? 0 : (SP+handle_offset)
3440     if (in_reg.IsNoRegister()) {
3441       LoadFromOffset(kLoadUnsignedWord, out_reg.AsGpuRegister(),
3442                      SP, handle_scope_offset.Int32Value());
3443       in_reg = out_reg;
3444     }
3445     if (!out_reg.Equals(in_reg)) {
3446       LoadConst32(out_reg.AsGpuRegister(), 0);
3447     }
3448     Beqzc(in_reg.AsGpuRegister(), &null_arg);
3449     Daddiu64(out_reg.AsGpuRegister(), SP, handle_scope_offset.Int32Value());
3450     Bind(&null_arg);
3451   } else {
3452     Daddiu64(out_reg.AsGpuRegister(), SP, handle_scope_offset.Int32Value());
3453   }
3454 }
3455 
CreateHandleScopeEntry(FrameOffset out_off,FrameOffset handle_scope_offset,ManagedRegister mscratch,bool null_allowed)3456 void Mips64Assembler::CreateHandleScopeEntry(FrameOffset out_off,
3457                                              FrameOffset handle_scope_offset,
3458                                              ManagedRegister mscratch,
3459                                              bool null_allowed) {
3460   Mips64ManagedRegister scratch = mscratch.AsMips64();
3461   CHECK(scratch.IsGpuRegister()) << scratch;
3462   if (null_allowed) {
3463     Mips64Label null_arg;
3464     LoadFromOffset(kLoadUnsignedWord, scratch.AsGpuRegister(), SP,
3465                    handle_scope_offset.Int32Value());
3466     // Null values get a handle scope entry value of 0.  Otherwise, the handle scope entry is
3467     // the address in the handle scope holding the reference.
3468     // e.g. scratch = (scratch == 0) ? 0 : (SP+handle_scope_offset)
3469     Beqzc(scratch.AsGpuRegister(), &null_arg);
3470     Daddiu64(scratch.AsGpuRegister(), SP, handle_scope_offset.Int32Value());
3471     Bind(&null_arg);
3472   } else {
3473     Daddiu64(scratch.AsGpuRegister(), SP, handle_scope_offset.Int32Value());
3474   }
3475   StoreToOffset(kStoreDoubleword, scratch.AsGpuRegister(), SP, out_off.Int32Value());
3476 }
3477 
3478 // Given a handle scope entry, load the associated reference.
LoadReferenceFromHandleScope(ManagedRegister mout_reg,ManagedRegister min_reg)3479 void Mips64Assembler::LoadReferenceFromHandleScope(ManagedRegister mout_reg,
3480                                                    ManagedRegister min_reg) {
3481   Mips64ManagedRegister out_reg = mout_reg.AsMips64();
3482   Mips64ManagedRegister in_reg = min_reg.AsMips64();
3483   CHECK(out_reg.IsGpuRegister()) << out_reg;
3484   CHECK(in_reg.IsGpuRegister()) << in_reg;
3485   Mips64Label null_arg;
3486   if (!out_reg.Equals(in_reg)) {
3487     LoadConst32(out_reg.AsGpuRegister(), 0);
3488   }
3489   Beqzc(in_reg.AsGpuRegister(), &null_arg);
3490   LoadFromOffset(kLoadDoubleword, out_reg.AsGpuRegister(),
3491                  in_reg.AsGpuRegister(), 0);
3492   Bind(&null_arg);
3493 }
3494 
VerifyObject(ManagedRegister src ATTRIBUTE_UNUSED,bool could_be_null ATTRIBUTE_UNUSED)3495 void Mips64Assembler::VerifyObject(ManagedRegister src ATTRIBUTE_UNUSED,
3496                                    bool could_be_null ATTRIBUTE_UNUSED) {
3497   // TODO: not validating references
3498 }
3499 
VerifyObject(FrameOffset src ATTRIBUTE_UNUSED,bool could_be_null ATTRIBUTE_UNUSED)3500 void Mips64Assembler::VerifyObject(FrameOffset src ATTRIBUTE_UNUSED,
3501                                    bool could_be_null ATTRIBUTE_UNUSED) {
3502   // TODO: not validating references
3503 }
3504 
Call(ManagedRegister mbase,Offset offset,ManagedRegister mscratch)3505 void Mips64Assembler::Call(ManagedRegister mbase, Offset offset, ManagedRegister mscratch) {
3506   Mips64ManagedRegister base = mbase.AsMips64();
3507   Mips64ManagedRegister scratch = mscratch.AsMips64();
3508   CHECK(base.IsGpuRegister()) << base;
3509   CHECK(scratch.IsGpuRegister()) << scratch;
3510   LoadFromOffset(kLoadDoubleword, scratch.AsGpuRegister(),
3511                  base.AsGpuRegister(), offset.Int32Value());
3512   Jalr(scratch.AsGpuRegister());
3513   Nop();
3514   // TODO: place reference map on call
3515 }
3516 
Call(FrameOffset base,Offset offset,ManagedRegister mscratch)3517 void Mips64Assembler::Call(FrameOffset base, Offset offset, ManagedRegister mscratch) {
3518   Mips64ManagedRegister scratch = mscratch.AsMips64();
3519   CHECK(scratch.IsGpuRegister()) << scratch;
3520   // Call *(*(SP + base) + offset)
3521   LoadFromOffset(kLoadDoubleword, scratch.AsGpuRegister(),
3522                  SP, base.Int32Value());
3523   LoadFromOffset(kLoadDoubleword, scratch.AsGpuRegister(),
3524                  scratch.AsGpuRegister(), offset.Int32Value());
3525   Jalr(scratch.AsGpuRegister());
3526   Nop();
3527   // TODO: place reference map on call
3528 }
3529 
CallFromThread(ThreadOffset64 offset ATTRIBUTE_UNUSED,ManagedRegister mscratch ATTRIBUTE_UNUSED)3530 void Mips64Assembler::CallFromThread(ThreadOffset64 offset ATTRIBUTE_UNUSED,
3531                                      ManagedRegister mscratch ATTRIBUTE_UNUSED) {
3532   UNIMPLEMENTED(FATAL) << "No MIPS64 implementation";
3533 }
3534 
GetCurrentThread(ManagedRegister tr)3535 void Mips64Assembler::GetCurrentThread(ManagedRegister tr) {
3536   Move(tr.AsMips64().AsGpuRegister(), S1);
3537 }
3538 
GetCurrentThread(FrameOffset offset,ManagedRegister mscratch ATTRIBUTE_UNUSED)3539 void Mips64Assembler::GetCurrentThread(FrameOffset offset,
3540                                        ManagedRegister mscratch ATTRIBUTE_UNUSED) {
3541   StoreToOffset(kStoreDoubleword, S1, SP, offset.Int32Value());
3542 }
3543 
ExceptionPoll(ManagedRegister mscratch,size_t stack_adjust)3544 void Mips64Assembler::ExceptionPoll(ManagedRegister mscratch, size_t stack_adjust) {
3545   Mips64ManagedRegister scratch = mscratch.AsMips64();
3546   exception_blocks_.emplace_back(scratch, stack_adjust);
3547   LoadFromOffset(kLoadDoubleword,
3548                  scratch.AsGpuRegister(),
3549                  S1,
3550                  Thread::ExceptionOffset<kMips64PointerSize>().Int32Value());
3551   Bnezc(scratch.AsGpuRegister(), exception_blocks_.back().Entry());
3552 }
3553 
EmitExceptionPoll(Mips64ExceptionSlowPath * exception)3554 void Mips64Assembler::EmitExceptionPoll(Mips64ExceptionSlowPath* exception) {
3555   Bind(exception->Entry());
3556   if (exception->stack_adjust_ != 0) {  // Fix up the frame.
3557     DecreaseFrameSize(exception->stack_adjust_);
3558   }
3559   // Pass exception object as argument.
3560   // Don't care about preserving A0 as this call won't return.
3561   CheckEntrypointTypes<kQuickDeliverException, void, mirror::Object*>();
3562   Move(A0, exception->scratch_.AsGpuRegister());
3563   // Set up call to Thread::Current()->pDeliverException
3564   LoadFromOffset(kLoadDoubleword,
3565                  T9,
3566                  S1,
3567                  QUICK_ENTRYPOINT_OFFSET(kMips64PointerSize, pDeliverException).Int32Value());
3568   Jr(T9);
3569   Nop();
3570 
3571   // Call never returns
3572   Break();
3573 }
3574 
3575 }  // namespace mips64
3576 }  // namespace art
3577