• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2016 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "jni_macro_assembler_x86_64.h"
18 
19 #include "base/casts.h"
20 #include "entrypoints/quick/quick_entrypoints.h"
21 #include "memory_region.h"
22 #include "thread.h"
23 
24 namespace art {
25 namespace x86_64 {
26 
DWARFReg(Register reg)27 static dwarf::Reg DWARFReg(Register reg) {
28   return dwarf::Reg::X86_64Core(static_cast<int>(reg));
29 }
DWARFReg(FloatRegister reg)30 static dwarf::Reg DWARFReg(FloatRegister reg) {
31   return dwarf::Reg::X86_64Fp(static_cast<int>(reg));
32 }
33 
34 constexpr size_t kFramePointerSize = 8;
35 
36 #define __ asm_.
37 
BuildFrame(size_t frame_size,ManagedRegister method_reg,ArrayRef<const ManagedRegister> spill_regs,const ManagedRegisterEntrySpills & entry_spills)38 void X86_64JNIMacroAssembler::BuildFrame(size_t frame_size,
39                                          ManagedRegister method_reg,
40                                          ArrayRef<const ManagedRegister> spill_regs,
41                                          const ManagedRegisterEntrySpills& entry_spills) {
42   DCHECK_EQ(CodeSize(), 0U);  // Nothing emitted yet.
43   cfi().SetCurrentCFAOffset(8);  // Return address on stack.
44   CHECK_ALIGNED(frame_size, kStackAlignment);
45   int gpr_count = 0;
46   for (int i = spill_regs.size() - 1; i >= 0; --i) {
47     x86_64::X86_64ManagedRegister spill = spill_regs[i].AsX86_64();
48     if (spill.IsCpuRegister()) {
49       __ pushq(spill.AsCpuRegister());
50       gpr_count++;
51       cfi().AdjustCFAOffset(kFramePointerSize);
52       cfi().RelOffset(DWARFReg(spill.AsCpuRegister().AsRegister()), 0);
53     }
54   }
55   // return address then method on stack.
56   int64_t rest_of_frame = static_cast<int64_t>(frame_size)
57                           - (gpr_count * kFramePointerSize)
58                           - kFramePointerSize /*return address*/;
59   __ subq(CpuRegister(RSP), Immediate(rest_of_frame));
60   cfi().AdjustCFAOffset(rest_of_frame);
61 
62   // spill xmms
63   int64_t offset = rest_of_frame;
64   for (int i = spill_regs.size() - 1; i >= 0; --i) {
65     x86_64::X86_64ManagedRegister spill = spill_regs[i].AsX86_64();
66     if (spill.IsXmmRegister()) {
67       offset -= sizeof(double);
68       __ movsd(Address(CpuRegister(RSP), offset), spill.AsXmmRegister());
69       cfi().RelOffset(DWARFReg(spill.AsXmmRegister().AsFloatRegister()), offset);
70     }
71   }
72 
73   static_assert(static_cast<size_t>(kX86_64PointerSize) == kFramePointerSize,
74                 "Unexpected frame pointer size.");
75 
76   __ movq(Address(CpuRegister(RSP), 0), method_reg.AsX86_64().AsCpuRegister());
77 
78   for (size_t i = 0; i < entry_spills.size(); ++i) {
79     ManagedRegisterSpill spill = entry_spills.at(i);
80     if (spill.AsX86_64().IsCpuRegister()) {
81       if (spill.getSize() == 8) {
82         __ movq(Address(CpuRegister(RSP), frame_size + spill.getSpillOffset()),
83                 spill.AsX86_64().AsCpuRegister());
84       } else {
85         CHECK_EQ(spill.getSize(), 4);
86         __ movl(Address(CpuRegister(RSP), frame_size + spill.getSpillOffset()),
87                 spill.AsX86_64().AsCpuRegister());
88       }
89     } else {
90       if (spill.getSize() == 8) {
91         __ movsd(Address(CpuRegister(RSP), frame_size + spill.getSpillOffset()),
92                  spill.AsX86_64().AsXmmRegister());
93       } else {
94         CHECK_EQ(spill.getSize(), 4);
95         __ movss(Address(CpuRegister(RSP), frame_size + spill.getSpillOffset()),
96                  spill.AsX86_64().AsXmmRegister());
97       }
98     }
99   }
100 }
101 
RemoveFrame(size_t frame_size,ArrayRef<const ManagedRegister> spill_regs,bool may_suspend ATTRIBUTE_UNUSED)102 void X86_64JNIMacroAssembler::RemoveFrame(size_t frame_size,
103                                           ArrayRef<const ManagedRegister> spill_regs,
104                                           bool may_suspend ATTRIBUTE_UNUSED) {
105   CHECK_ALIGNED(frame_size, kStackAlignment);
106   cfi().RememberState();
107   int gpr_count = 0;
108   // unspill xmms
109   int64_t offset = static_cast<int64_t>(frame_size)
110       - (spill_regs.size() * kFramePointerSize)
111       - 2 * kFramePointerSize;
112   for (size_t i = 0; i < spill_regs.size(); ++i) {
113     x86_64::X86_64ManagedRegister spill = spill_regs[i].AsX86_64();
114     if (spill.IsXmmRegister()) {
115       offset += sizeof(double);
116       __ movsd(spill.AsXmmRegister(), Address(CpuRegister(RSP), offset));
117       cfi().Restore(DWARFReg(spill.AsXmmRegister().AsFloatRegister()));
118     } else {
119       gpr_count++;
120     }
121   }
122   int adjust = static_cast<int>(frame_size) - (gpr_count * kFramePointerSize) - kFramePointerSize;
123   __ addq(CpuRegister(RSP), Immediate(adjust));
124   cfi().AdjustCFAOffset(-adjust);
125   for (size_t i = 0; i < spill_regs.size(); ++i) {
126     x86_64::X86_64ManagedRegister spill = spill_regs[i].AsX86_64();
127     if (spill.IsCpuRegister()) {
128       __ popq(spill.AsCpuRegister());
129       cfi().AdjustCFAOffset(-static_cast<int>(kFramePointerSize));
130       cfi().Restore(DWARFReg(spill.AsCpuRegister().AsRegister()));
131     }
132   }
133   __ ret();
134   // The CFI should be restored for any code that follows the exit block.
135   cfi().RestoreState();
136   cfi().DefCFAOffset(frame_size);
137 }
138 
IncreaseFrameSize(size_t adjust)139 void X86_64JNIMacroAssembler::IncreaseFrameSize(size_t adjust) {
140   CHECK_ALIGNED(adjust, kStackAlignment);
141   __ addq(CpuRegister(RSP), Immediate(-static_cast<int64_t>(adjust)));
142   cfi().AdjustCFAOffset(adjust);
143 }
144 
DecreaseFrameSizeImpl(size_t adjust,X86_64Assembler * assembler)145 static void DecreaseFrameSizeImpl(size_t adjust, X86_64Assembler* assembler) {
146   CHECK_ALIGNED(adjust, kStackAlignment);
147   assembler->addq(CpuRegister(RSP), Immediate(adjust));
148   assembler->cfi().AdjustCFAOffset(-adjust);
149 }
150 
DecreaseFrameSize(size_t adjust)151 void X86_64JNIMacroAssembler::DecreaseFrameSize(size_t adjust) {
152   DecreaseFrameSizeImpl(adjust, &asm_);
153 }
154 
Store(FrameOffset offs,ManagedRegister msrc,size_t size)155 void X86_64JNIMacroAssembler::Store(FrameOffset offs, ManagedRegister msrc, size_t size) {
156   X86_64ManagedRegister src = msrc.AsX86_64();
157   if (src.IsNoRegister()) {
158     CHECK_EQ(0u, size);
159   } else if (src.IsCpuRegister()) {
160     if (size == 4) {
161       CHECK_EQ(4u, size);
162       __ movl(Address(CpuRegister(RSP), offs), src.AsCpuRegister());
163     } else {
164       CHECK_EQ(8u, size);
165       __ movq(Address(CpuRegister(RSP), offs), src.AsCpuRegister());
166     }
167   } else if (src.IsRegisterPair()) {
168     CHECK_EQ(0u, size);
169     __ movq(Address(CpuRegister(RSP), offs), src.AsRegisterPairLow());
170     __ movq(Address(CpuRegister(RSP), FrameOffset(offs.Int32Value()+4)),
171             src.AsRegisterPairHigh());
172   } else if (src.IsX87Register()) {
173     if (size == 4) {
174       __ fstps(Address(CpuRegister(RSP), offs));
175     } else {
176       __ fstpl(Address(CpuRegister(RSP), offs));
177     }
178   } else {
179     CHECK(src.IsXmmRegister());
180     if (size == 4) {
181       __ movss(Address(CpuRegister(RSP), offs), src.AsXmmRegister());
182     } else {
183       __ movsd(Address(CpuRegister(RSP), offs), src.AsXmmRegister());
184     }
185   }
186 }
187 
StoreRef(FrameOffset dest,ManagedRegister msrc)188 void X86_64JNIMacroAssembler::StoreRef(FrameOffset dest, ManagedRegister msrc) {
189   X86_64ManagedRegister src = msrc.AsX86_64();
190   CHECK(src.IsCpuRegister());
191   __ movl(Address(CpuRegister(RSP), dest), src.AsCpuRegister());
192 }
193 
StoreRawPtr(FrameOffset dest,ManagedRegister msrc)194 void X86_64JNIMacroAssembler::StoreRawPtr(FrameOffset dest, ManagedRegister msrc) {
195   X86_64ManagedRegister src = msrc.AsX86_64();
196   CHECK(src.IsCpuRegister());
197   __ movq(Address(CpuRegister(RSP), dest), src.AsCpuRegister());
198 }
199 
StoreImmediateToFrame(FrameOffset dest,uint32_t imm,ManagedRegister)200 void X86_64JNIMacroAssembler::StoreImmediateToFrame(FrameOffset dest,
201                                                     uint32_t imm,
202                                                     ManagedRegister) {
203   __ movl(Address(CpuRegister(RSP), dest), Immediate(imm));  // TODO(64) movq?
204 }
205 
StoreStackOffsetToThread(ThreadOffset64 thr_offs,FrameOffset fr_offs,ManagedRegister mscratch)206 void X86_64JNIMacroAssembler::StoreStackOffsetToThread(ThreadOffset64 thr_offs,
207                                                        FrameOffset fr_offs,
208                                                        ManagedRegister mscratch) {
209   X86_64ManagedRegister scratch = mscratch.AsX86_64();
210   CHECK(scratch.IsCpuRegister());
211   __ leaq(scratch.AsCpuRegister(), Address(CpuRegister(RSP), fr_offs));
212   __ gs()->movq(Address::Absolute(thr_offs, true), scratch.AsCpuRegister());
213 }
214 
StoreStackPointerToThread(ThreadOffset64 thr_offs)215 void X86_64JNIMacroAssembler::StoreStackPointerToThread(ThreadOffset64 thr_offs) {
216   __ gs()->movq(Address::Absolute(thr_offs, true), CpuRegister(RSP));
217 }
218 
StoreSpanning(FrameOffset,ManagedRegister,FrameOffset,ManagedRegister)219 void X86_64JNIMacroAssembler::StoreSpanning(FrameOffset /*dst*/,
220                                             ManagedRegister /*src*/,
221                                             FrameOffset /*in_off*/,
222                                             ManagedRegister /*scratch*/) {
223   UNIMPLEMENTED(FATAL);  // this case only currently exists for ARM
224 }
225 
Load(ManagedRegister mdest,FrameOffset src,size_t size)226 void X86_64JNIMacroAssembler::Load(ManagedRegister mdest, FrameOffset src, size_t size) {
227   X86_64ManagedRegister dest = mdest.AsX86_64();
228   if (dest.IsNoRegister()) {
229     CHECK_EQ(0u, size);
230   } else if (dest.IsCpuRegister()) {
231     if (size == 4) {
232       CHECK_EQ(4u, size);
233       __ movl(dest.AsCpuRegister(), Address(CpuRegister(RSP), src));
234     } else {
235       CHECK_EQ(8u, size);
236       __ movq(dest.AsCpuRegister(), Address(CpuRegister(RSP), src));
237     }
238   } else if (dest.IsRegisterPair()) {
239     CHECK_EQ(0u, size);
240     __ movq(dest.AsRegisterPairLow(), Address(CpuRegister(RSP), src));
241     __ movq(dest.AsRegisterPairHigh(), Address(CpuRegister(RSP), FrameOffset(src.Int32Value()+4)));
242   } else if (dest.IsX87Register()) {
243     if (size == 4) {
244       __ flds(Address(CpuRegister(RSP), src));
245     } else {
246       __ fldl(Address(CpuRegister(RSP), src));
247     }
248   } else {
249     CHECK(dest.IsXmmRegister());
250     if (size == 4) {
251       __ movss(dest.AsXmmRegister(), Address(CpuRegister(RSP), src));
252     } else {
253       __ movsd(dest.AsXmmRegister(), Address(CpuRegister(RSP), src));
254     }
255   }
256 }
257 
LoadFromThread(ManagedRegister mdest,ThreadOffset64 src,size_t size)258 void X86_64JNIMacroAssembler::LoadFromThread(ManagedRegister mdest,
259                                              ThreadOffset64 src, size_t size) {
260   X86_64ManagedRegister dest = mdest.AsX86_64();
261   if (dest.IsNoRegister()) {
262     CHECK_EQ(0u, size);
263   } else if (dest.IsCpuRegister()) {
264     if (size == 1u) {
265       __ gs()->movzxb(dest.AsCpuRegister(), Address::Absolute(src, true));
266     } else {
267       CHECK_EQ(4u, size);
268       __ gs()->movl(dest.AsCpuRegister(), Address::Absolute(src, true));
269     }
270   } else if (dest.IsRegisterPair()) {
271     CHECK_EQ(8u, size);
272     __ gs()->movq(dest.AsRegisterPairLow(), Address::Absolute(src, true));
273   } else if (dest.IsX87Register()) {
274     if (size == 4) {
275       __ gs()->flds(Address::Absolute(src, true));
276     } else {
277       __ gs()->fldl(Address::Absolute(src, true));
278     }
279   } else {
280     CHECK(dest.IsXmmRegister());
281     if (size == 4) {
282       __ gs()->movss(dest.AsXmmRegister(), Address::Absolute(src, true));
283     } else {
284       __ gs()->movsd(dest.AsXmmRegister(), Address::Absolute(src, true));
285     }
286   }
287 }
288 
LoadRef(ManagedRegister mdest,FrameOffset src)289 void X86_64JNIMacroAssembler::LoadRef(ManagedRegister mdest, FrameOffset src) {
290   X86_64ManagedRegister dest = mdest.AsX86_64();
291   CHECK(dest.IsCpuRegister());
292   __ movq(dest.AsCpuRegister(), Address(CpuRegister(RSP), src));
293 }
294 
LoadRef(ManagedRegister mdest,ManagedRegister mbase,MemberOffset offs,bool unpoison_reference)295 void X86_64JNIMacroAssembler::LoadRef(ManagedRegister mdest,
296                                       ManagedRegister mbase,
297                                       MemberOffset offs,
298                                       bool unpoison_reference) {
299   X86_64ManagedRegister base = mbase.AsX86_64();
300   X86_64ManagedRegister dest = mdest.AsX86_64();
301   CHECK(base.IsCpuRegister());
302   CHECK(dest.IsCpuRegister());
303   __ movl(dest.AsCpuRegister(), Address(base.AsCpuRegister(), offs));
304   if (unpoison_reference) {
305     __ MaybeUnpoisonHeapReference(dest.AsCpuRegister());
306   }
307 }
308 
LoadRawPtr(ManagedRegister mdest,ManagedRegister mbase,Offset offs)309 void X86_64JNIMacroAssembler::LoadRawPtr(ManagedRegister mdest,
310                                          ManagedRegister mbase,
311                                          Offset offs) {
312   X86_64ManagedRegister base = mbase.AsX86_64();
313   X86_64ManagedRegister dest = mdest.AsX86_64();
314   CHECK(base.IsCpuRegister());
315   CHECK(dest.IsCpuRegister());
316   __ movq(dest.AsCpuRegister(), Address(base.AsCpuRegister(), offs));
317 }
318 
LoadRawPtrFromThread(ManagedRegister mdest,ThreadOffset64 offs)319 void X86_64JNIMacroAssembler::LoadRawPtrFromThread(ManagedRegister mdest, ThreadOffset64 offs) {
320   X86_64ManagedRegister dest = mdest.AsX86_64();
321   CHECK(dest.IsCpuRegister());
322   __ gs()->movq(dest.AsCpuRegister(), Address::Absolute(offs, true));
323 }
324 
SignExtend(ManagedRegister mreg,size_t size)325 void X86_64JNIMacroAssembler::SignExtend(ManagedRegister mreg, size_t size) {
326   X86_64ManagedRegister reg = mreg.AsX86_64();
327   CHECK(size == 1 || size == 2) << size;
328   CHECK(reg.IsCpuRegister()) << reg;
329   if (size == 1) {
330     __ movsxb(reg.AsCpuRegister(), reg.AsCpuRegister());
331   } else {
332     __ movsxw(reg.AsCpuRegister(), reg.AsCpuRegister());
333   }
334 }
335 
ZeroExtend(ManagedRegister mreg,size_t size)336 void X86_64JNIMacroAssembler::ZeroExtend(ManagedRegister mreg, size_t size) {
337   X86_64ManagedRegister reg = mreg.AsX86_64();
338   CHECK(size == 1 || size == 2) << size;
339   CHECK(reg.IsCpuRegister()) << reg;
340   if (size == 1) {
341     __ movzxb(reg.AsCpuRegister(), reg.AsCpuRegister());
342   } else {
343     __ movzxw(reg.AsCpuRegister(), reg.AsCpuRegister());
344   }
345 }
346 
Move(ManagedRegister mdest,ManagedRegister msrc,size_t size)347 void X86_64JNIMacroAssembler::Move(ManagedRegister mdest, ManagedRegister msrc, size_t size) {
348   X86_64ManagedRegister dest = mdest.AsX86_64();
349   X86_64ManagedRegister src = msrc.AsX86_64();
350   if (!dest.Equals(src)) {
351     if (dest.IsCpuRegister() && src.IsCpuRegister()) {
352       __ movq(dest.AsCpuRegister(), src.AsCpuRegister());
353     } else if (src.IsX87Register() && dest.IsXmmRegister()) {
354       // Pass via stack and pop X87 register
355       __ subl(CpuRegister(RSP), Immediate(16));
356       if (size == 4) {
357         CHECK_EQ(src.AsX87Register(), ST0);
358         __ fstps(Address(CpuRegister(RSP), 0));
359         __ movss(dest.AsXmmRegister(), Address(CpuRegister(RSP), 0));
360       } else {
361         CHECK_EQ(src.AsX87Register(), ST0);
362         __ fstpl(Address(CpuRegister(RSP), 0));
363         __ movsd(dest.AsXmmRegister(), Address(CpuRegister(RSP), 0));
364       }
365       __ addq(CpuRegister(RSP), Immediate(16));
366     } else {
367       // TODO: x87, SSE
368       UNIMPLEMENTED(FATAL) << ": Move " << dest << ", " << src;
369     }
370   }
371 }
372 
CopyRef(FrameOffset dest,FrameOffset src,ManagedRegister mscratch)373 void X86_64JNIMacroAssembler::CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister mscratch) {
374   X86_64ManagedRegister scratch = mscratch.AsX86_64();
375   CHECK(scratch.IsCpuRegister());
376   __ movl(scratch.AsCpuRegister(), Address(CpuRegister(RSP), src));
377   __ movl(Address(CpuRegister(RSP), dest), scratch.AsCpuRegister());
378 }
379 
CopyRawPtrFromThread(FrameOffset fr_offs,ThreadOffset64 thr_offs,ManagedRegister mscratch)380 void X86_64JNIMacroAssembler::CopyRawPtrFromThread(FrameOffset fr_offs,
381                                                    ThreadOffset64 thr_offs,
382                                                    ManagedRegister mscratch) {
383   X86_64ManagedRegister scratch = mscratch.AsX86_64();
384   CHECK(scratch.IsCpuRegister());
385   __ gs()->movq(scratch.AsCpuRegister(), Address::Absolute(thr_offs, true));
386   Store(fr_offs, scratch, 8);
387 }
388 
CopyRawPtrToThread(ThreadOffset64 thr_offs,FrameOffset fr_offs,ManagedRegister mscratch)389 void X86_64JNIMacroAssembler::CopyRawPtrToThread(ThreadOffset64 thr_offs,
390                                                  FrameOffset fr_offs,
391                                                  ManagedRegister mscratch) {
392   X86_64ManagedRegister scratch = mscratch.AsX86_64();
393   CHECK(scratch.IsCpuRegister());
394   Load(scratch, fr_offs, 8);
395   __ gs()->movq(Address::Absolute(thr_offs, true), scratch.AsCpuRegister());
396 }
397 
Copy(FrameOffset dest,FrameOffset src,ManagedRegister mscratch,size_t size)398 void X86_64JNIMacroAssembler::Copy(FrameOffset dest,
399                                    FrameOffset src,
400                                    ManagedRegister mscratch,
401                                    size_t size) {
402   X86_64ManagedRegister scratch = mscratch.AsX86_64();
403   if (scratch.IsCpuRegister() && size == 8) {
404     Load(scratch, src, 4);
405     Store(dest, scratch, 4);
406     Load(scratch, FrameOffset(src.Int32Value() + 4), 4);
407     Store(FrameOffset(dest.Int32Value() + 4), scratch, 4);
408   } else {
409     Load(scratch, src, size);
410     Store(dest, scratch, size);
411   }
412 }
413 
Copy(FrameOffset,ManagedRegister,Offset,ManagedRegister,size_t)414 void X86_64JNIMacroAssembler::Copy(FrameOffset /*dst*/,
415                                    ManagedRegister /*src_base*/,
416                                    Offset /*src_offset*/,
417                                    ManagedRegister /*scratch*/,
418                                    size_t /*size*/) {
419   UNIMPLEMENTED(FATAL);
420 }
421 
Copy(ManagedRegister dest_base,Offset dest_offset,FrameOffset src,ManagedRegister scratch,size_t size)422 void X86_64JNIMacroAssembler::Copy(ManagedRegister dest_base,
423                                    Offset dest_offset,
424                                    FrameOffset src,
425                                    ManagedRegister scratch,
426                                    size_t size) {
427   CHECK(scratch.IsNoRegister());
428   CHECK_EQ(size, 4u);
429   __ pushq(Address(CpuRegister(RSP), src));
430   __ popq(Address(dest_base.AsX86_64().AsCpuRegister(), dest_offset));
431 }
432 
Copy(FrameOffset dest,FrameOffset src_base,Offset src_offset,ManagedRegister mscratch,size_t size)433 void X86_64JNIMacroAssembler::Copy(FrameOffset dest,
434                                    FrameOffset src_base,
435                                    Offset src_offset,
436                                    ManagedRegister mscratch,
437                                    size_t size) {
438   CpuRegister scratch = mscratch.AsX86_64().AsCpuRegister();
439   CHECK_EQ(size, 4u);
440   __ movq(scratch, Address(CpuRegister(RSP), src_base));
441   __ movq(scratch, Address(scratch, src_offset));
442   __ movq(Address(CpuRegister(RSP), dest), scratch);
443 }
444 
Copy(ManagedRegister dest,Offset dest_offset,ManagedRegister src,Offset src_offset,ManagedRegister scratch,size_t size)445 void X86_64JNIMacroAssembler::Copy(ManagedRegister dest,
446                                    Offset dest_offset,
447                                    ManagedRegister src,
448                                    Offset src_offset,
449                                    ManagedRegister scratch,
450                                    size_t size) {
451   CHECK_EQ(size, 4u);
452   CHECK(scratch.IsNoRegister());
453   __ pushq(Address(src.AsX86_64().AsCpuRegister(), src_offset));
454   __ popq(Address(dest.AsX86_64().AsCpuRegister(), dest_offset));
455 }
456 
Copy(FrameOffset dest,Offset dest_offset,FrameOffset src,Offset src_offset,ManagedRegister mscratch,size_t size)457 void X86_64JNIMacroAssembler::Copy(FrameOffset dest,
458                                    Offset dest_offset,
459                                    FrameOffset src,
460                                    Offset src_offset,
461                                    ManagedRegister mscratch,
462                                    size_t size) {
463   CpuRegister scratch = mscratch.AsX86_64().AsCpuRegister();
464   CHECK_EQ(size, 4u);
465   CHECK_EQ(dest.Int32Value(), src.Int32Value());
466   __ movq(scratch, Address(CpuRegister(RSP), src));
467   __ pushq(Address(scratch, src_offset));
468   __ popq(Address(scratch, dest_offset));
469 }
470 
MemoryBarrier(ManagedRegister)471 void X86_64JNIMacroAssembler::MemoryBarrier(ManagedRegister) {
472   __ mfence();
473 }
474 
CreateHandleScopeEntry(ManagedRegister mout_reg,FrameOffset handle_scope_offset,ManagedRegister min_reg,bool null_allowed)475 void X86_64JNIMacroAssembler::CreateHandleScopeEntry(ManagedRegister mout_reg,
476                                                      FrameOffset handle_scope_offset,
477                                                      ManagedRegister min_reg,
478                                                      bool null_allowed) {
479   X86_64ManagedRegister out_reg = mout_reg.AsX86_64();
480   X86_64ManagedRegister in_reg = min_reg.AsX86_64();
481   if (in_reg.IsNoRegister()) {  // TODO(64): && null_allowed
482     // Use out_reg as indicator of null.
483     in_reg = out_reg;
484     // TODO: movzwl
485     __ movl(in_reg.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
486   }
487   CHECK(in_reg.IsCpuRegister());
488   CHECK(out_reg.IsCpuRegister());
489   VerifyObject(in_reg, null_allowed);
490   if (null_allowed) {
491     Label null_arg;
492     if (!out_reg.Equals(in_reg)) {
493       __ xorl(out_reg.AsCpuRegister(), out_reg.AsCpuRegister());
494     }
495     __ testl(in_reg.AsCpuRegister(), in_reg.AsCpuRegister());
496     __ j(kZero, &null_arg);
497     __ leaq(out_reg.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
498     __ Bind(&null_arg);
499   } else {
500     __ leaq(out_reg.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
501   }
502 }
503 
CreateHandleScopeEntry(FrameOffset out_off,FrameOffset handle_scope_offset,ManagedRegister mscratch,bool null_allowed)504 void X86_64JNIMacroAssembler::CreateHandleScopeEntry(FrameOffset out_off,
505                                                      FrameOffset handle_scope_offset,
506                                                      ManagedRegister mscratch,
507                                                      bool null_allowed) {
508   X86_64ManagedRegister scratch = mscratch.AsX86_64();
509   CHECK(scratch.IsCpuRegister());
510   if (null_allowed) {
511     Label null_arg;
512     __ movl(scratch.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
513     __ testl(scratch.AsCpuRegister(), scratch.AsCpuRegister());
514     __ j(kZero, &null_arg);
515     __ leaq(scratch.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
516     __ Bind(&null_arg);
517   } else {
518     __ leaq(scratch.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
519   }
520   Store(out_off, scratch, 8);
521 }
522 
523 // Given a handle scope entry, load the associated reference.
LoadReferenceFromHandleScope(ManagedRegister mout_reg,ManagedRegister min_reg)524 void X86_64JNIMacroAssembler::LoadReferenceFromHandleScope(ManagedRegister mout_reg,
525                                                            ManagedRegister min_reg) {
526   X86_64ManagedRegister out_reg = mout_reg.AsX86_64();
527   X86_64ManagedRegister in_reg = min_reg.AsX86_64();
528   CHECK(out_reg.IsCpuRegister());
529   CHECK(in_reg.IsCpuRegister());
530   Label null_arg;
531   if (!out_reg.Equals(in_reg)) {
532     __ xorl(out_reg.AsCpuRegister(), out_reg.AsCpuRegister());
533   }
534   __ testl(in_reg.AsCpuRegister(), in_reg.AsCpuRegister());
535   __ j(kZero, &null_arg);
536   __ movq(out_reg.AsCpuRegister(), Address(in_reg.AsCpuRegister(), 0));
537   __ Bind(&null_arg);
538 }
539 
VerifyObject(ManagedRegister,bool)540 void X86_64JNIMacroAssembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) {
541   // TODO: not validating references
542 }
543 
VerifyObject(FrameOffset,bool)544 void X86_64JNIMacroAssembler::VerifyObject(FrameOffset /*src*/, bool /*could_be_null*/) {
545   // TODO: not validating references
546 }
547 
Call(ManagedRegister mbase,Offset offset,ManagedRegister)548 void X86_64JNIMacroAssembler::Call(ManagedRegister mbase, Offset offset, ManagedRegister) {
549   X86_64ManagedRegister base = mbase.AsX86_64();
550   CHECK(base.IsCpuRegister());
551   __ call(Address(base.AsCpuRegister(), offset.Int32Value()));
552   // TODO: place reference map on call
553 }
554 
Call(FrameOffset base,Offset offset,ManagedRegister mscratch)555 void X86_64JNIMacroAssembler::Call(FrameOffset base, Offset offset, ManagedRegister mscratch) {
556   CpuRegister scratch = mscratch.AsX86_64().AsCpuRegister();
557   __ movq(scratch, Address(CpuRegister(RSP), base));
558   __ call(Address(scratch, offset));
559 }
560 
CallFromThread(ThreadOffset64 offset,ManagedRegister)561 void X86_64JNIMacroAssembler::CallFromThread(ThreadOffset64 offset, ManagedRegister /*mscratch*/) {
562   __ gs()->call(Address::Absolute(offset, true));
563 }
564 
GetCurrentThread(ManagedRegister tr)565 void X86_64JNIMacroAssembler::GetCurrentThread(ManagedRegister tr) {
566   __ gs()->movq(tr.AsX86_64().AsCpuRegister(),
567                 Address::Absolute(Thread::SelfOffset<kX86_64PointerSize>(), true));
568 }
569 
GetCurrentThread(FrameOffset offset,ManagedRegister mscratch)570 void X86_64JNIMacroAssembler::GetCurrentThread(FrameOffset offset, ManagedRegister mscratch) {
571   X86_64ManagedRegister scratch = mscratch.AsX86_64();
572   __ gs()->movq(scratch.AsCpuRegister(),
573                 Address::Absolute(Thread::SelfOffset<kX86_64PointerSize>(), true));
574   __ movq(Address(CpuRegister(RSP), offset), scratch.AsCpuRegister());
575 }
576 
577 // Slowpath entered when Thread::Current()->_exception is non-null
578 class X86_64ExceptionSlowPath FINAL : public SlowPath {
579  public:
X86_64ExceptionSlowPath(size_t stack_adjust)580   explicit X86_64ExceptionSlowPath(size_t stack_adjust) : stack_adjust_(stack_adjust) {}
581   virtual void Emit(Assembler *sp_asm) OVERRIDE;
582  private:
583   const size_t stack_adjust_;
584 };
585 
ExceptionPoll(ManagedRegister,size_t stack_adjust)586 void X86_64JNIMacroAssembler::ExceptionPoll(ManagedRegister /*scratch*/, size_t stack_adjust) {
587   X86_64ExceptionSlowPath* slow = new (__ GetAllocator()) X86_64ExceptionSlowPath(stack_adjust);
588   __ GetBuffer()->EnqueueSlowPath(slow);
589   __ gs()->cmpl(Address::Absolute(Thread::ExceptionOffset<kX86_64PointerSize>(), true),
590                 Immediate(0));
591   __ j(kNotEqual, slow->Entry());
592 }
593 
CreateLabel()594 std::unique_ptr<JNIMacroLabel> X86_64JNIMacroAssembler::CreateLabel() {
595   return std::unique_ptr<JNIMacroLabel>(new X86_64JNIMacroLabel());
596 }
597 
Jump(JNIMacroLabel * label)598 void X86_64JNIMacroAssembler::Jump(JNIMacroLabel* label) {
599   CHECK(label != nullptr);
600   __ jmp(X86_64JNIMacroLabel::Cast(label)->AsX86_64());
601 }
602 
Jump(JNIMacroLabel * label,JNIMacroUnaryCondition condition,ManagedRegister test)603 void X86_64JNIMacroAssembler::Jump(JNIMacroLabel* label,
604                                    JNIMacroUnaryCondition condition,
605                                    ManagedRegister test) {
606   CHECK(label != nullptr);
607 
608   art::x86_64::Condition x86_64_cond;
609   switch (condition) {
610     case JNIMacroUnaryCondition::kZero:
611       x86_64_cond = art::x86_64::kZero;
612       break;
613     case JNIMacroUnaryCondition::kNotZero:
614       x86_64_cond = art::x86_64::kNotZero;
615       break;
616     default:
617       LOG(FATAL) << "Not implemented condition: " << static_cast<int>(condition);
618       UNREACHABLE();
619   }
620 
621   // TEST reg, reg
622   // Jcc <Offset>
623   __ testq(test.AsX86_64().AsCpuRegister(), test.AsX86_64().AsCpuRegister());
624   __ j(x86_64_cond, X86_64JNIMacroLabel::Cast(label)->AsX86_64());
625 }
626 
Bind(JNIMacroLabel * label)627 void X86_64JNIMacroAssembler::Bind(JNIMacroLabel* label) {
628   CHECK(label != nullptr);
629   __ Bind(X86_64JNIMacroLabel::Cast(label)->AsX86_64());
630 }
631 
632 #undef __
633 
Emit(Assembler * sasm)634 void X86_64ExceptionSlowPath::Emit(Assembler *sasm) {
635   X86_64Assembler* sp_asm = down_cast<X86_64Assembler*>(sasm);
636 #define __ sp_asm->
637   __ Bind(&entry_);
638   // Note: the return value is dead
639   if (stack_adjust_ != 0) {  // Fix up the frame.
640     DecreaseFrameSizeImpl(stack_adjust_, sp_asm);
641   }
642   // Pass exception as argument in RDI
643   __ gs()->movq(CpuRegister(RDI),
644                 Address::Absolute(Thread::ExceptionOffset<kX86_64PointerSize>(), true));
645   __ gs()->call(
646       Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64PointerSize, pDeliverException), true));
647   // this call should never return
648   __ int3();
649 #undef __
650 }
651 
652 }  // namespace x86_64
653 }  // namespace art
654