• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2016 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "jni_macro_assembler_x86.h"
18 
19 #include "base/casts.h"
20 #include "entrypoints/quick/quick_entrypoints.h"
21 #include "thread.h"
22 #include "utils/assembler.h"
23 
24 namespace art {
25 namespace x86 {
26 
27 // Slowpath entered when Thread::Current()->_exception is non-null
28 class X86ExceptionSlowPath final : public SlowPath {
29  public:
X86ExceptionSlowPath(size_t stack_adjust)30   explicit X86ExceptionSlowPath(size_t stack_adjust) : stack_adjust_(stack_adjust) {}
31   void Emit(Assembler *sp_asm) override;
32  private:
33   const size_t stack_adjust_;
34 };
35 
DWARFReg(Register reg)36 static dwarf::Reg DWARFReg(Register reg) {
37   return dwarf::Reg::X86Core(static_cast<int>(reg));
38 }
39 
40 constexpr size_t kFramePointerSize = 4;
41 
42 #define __ asm_.
43 
BuildFrame(size_t frame_size,ManagedRegister method_reg,ArrayRef<const ManagedRegister> spill_regs,const ManagedRegisterEntrySpills & entry_spills)44 void X86JNIMacroAssembler::BuildFrame(size_t frame_size,
45                                       ManagedRegister method_reg,
46                                       ArrayRef<const ManagedRegister> spill_regs,
47                                       const ManagedRegisterEntrySpills& entry_spills) {
48   DCHECK_EQ(CodeSize(), 0U);  // Nothing emitted yet.
49   cfi().SetCurrentCFAOffset(4);  // Return address on stack.
50   CHECK_ALIGNED(frame_size, kStackAlignment);
51   int gpr_count = 0;
52   for (int i = spill_regs.size() - 1; i >= 0; --i) {
53     Register spill = spill_regs[i].AsX86().AsCpuRegister();
54     __ pushl(spill);
55     gpr_count++;
56     cfi().AdjustCFAOffset(kFramePointerSize);
57     cfi().RelOffset(DWARFReg(spill), 0);
58   }
59 
60   // return address then method on stack.
61   int32_t adjust = frame_size - gpr_count * kFramePointerSize -
62       kFramePointerSize /*method*/ -
63       kFramePointerSize /*return address*/;
64   __ addl(ESP, Immediate(-adjust));
65   cfi().AdjustCFAOffset(adjust);
66   __ pushl(method_reg.AsX86().AsCpuRegister());
67   cfi().AdjustCFAOffset(kFramePointerSize);
68   DCHECK_EQ(static_cast<size_t>(cfi().GetCurrentCFAOffset()), frame_size);
69 
70   for (const ManagedRegisterSpill& spill : entry_spills) {
71     if (spill.AsX86().IsCpuRegister()) {
72       int offset = frame_size + spill.getSpillOffset();
73       __ movl(Address(ESP, offset), spill.AsX86().AsCpuRegister());
74     } else {
75       DCHECK(spill.AsX86().IsXmmRegister());
76       if (spill.getSize() == 8) {
77         __ movsd(Address(ESP, frame_size + spill.getSpillOffset()), spill.AsX86().AsXmmRegister());
78       } else {
79         CHECK_EQ(spill.getSize(), 4);
80         __ movss(Address(ESP, frame_size + spill.getSpillOffset()), spill.AsX86().AsXmmRegister());
81       }
82     }
83   }
84 }
85 
RemoveFrame(size_t frame_size,ArrayRef<const ManagedRegister> spill_regs,bool may_suspend ATTRIBUTE_UNUSED)86 void X86JNIMacroAssembler::RemoveFrame(size_t frame_size,
87                                        ArrayRef<const ManagedRegister> spill_regs,
88                                        bool may_suspend ATTRIBUTE_UNUSED) {
89   CHECK_ALIGNED(frame_size, kStackAlignment);
90   cfi().RememberState();
91   // -kFramePointerSize for ArtMethod*.
92   int adjust = frame_size - spill_regs.size() * kFramePointerSize - kFramePointerSize;
93   __ addl(ESP, Immediate(adjust));
94   cfi().AdjustCFAOffset(-adjust);
95   for (size_t i = 0; i < spill_regs.size(); ++i) {
96     Register spill = spill_regs[i].AsX86().AsCpuRegister();
97     __ popl(spill);
98     cfi().AdjustCFAOffset(-static_cast<int>(kFramePointerSize));
99     cfi().Restore(DWARFReg(spill));
100   }
101   __ ret();
102   // The CFI should be restored for any code that follows the exit block.
103   cfi().RestoreState();
104   cfi().DefCFAOffset(frame_size);
105 }
106 
IncreaseFrameSize(size_t adjust)107 void X86JNIMacroAssembler::IncreaseFrameSize(size_t adjust) {
108   CHECK_ALIGNED(adjust, kStackAlignment);
109   __ addl(ESP, Immediate(-adjust));
110   cfi().AdjustCFAOffset(adjust);
111 }
112 
DecreaseFrameSizeImpl(X86Assembler * assembler,size_t adjust)113 static void DecreaseFrameSizeImpl(X86Assembler* assembler, size_t adjust) {
114   CHECK_ALIGNED(adjust, kStackAlignment);
115   assembler->addl(ESP, Immediate(adjust));
116   assembler->cfi().AdjustCFAOffset(-adjust);
117 }
118 
DecreaseFrameSize(size_t adjust)119 void X86JNIMacroAssembler::DecreaseFrameSize(size_t adjust) {
120   DecreaseFrameSizeImpl(&asm_, adjust);
121 }
122 
Store(FrameOffset offs,ManagedRegister msrc,size_t size)123 void X86JNIMacroAssembler::Store(FrameOffset offs, ManagedRegister msrc, size_t size) {
124   X86ManagedRegister src = msrc.AsX86();
125   if (src.IsNoRegister()) {
126     CHECK_EQ(0u, size);
127   } else if (src.IsCpuRegister()) {
128     CHECK_EQ(4u, size);
129     __ movl(Address(ESP, offs), src.AsCpuRegister());
130   } else if (src.IsRegisterPair()) {
131     CHECK_EQ(8u, size);
132     __ movl(Address(ESP, offs), src.AsRegisterPairLow());
133     __ movl(Address(ESP, FrameOffset(offs.Int32Value()+4)), src.AsRegisterPairHigh());
134   } else if (src.IsX87Register()) {
135     if (size == 4) {
136       __ fstps(Address(ESP, offs));
137     } else {
138       __ fstpl(Address(ESP, offs));
139     }
140   } else {
141     CHECK(src.IsXmmRegister());
142     if (size == 4) {
143       __ movss(Address(ESP, offs), src.AsXmmRegister());
144     } else {
145       __ movsd(Address(ESP, offs), src.AsXmmRegister());
146     }
147   }
148 }
149 
StoreRef(FrameOffset dest,ManagedRegister msrc)150 void X86JNIMacroAssembler::StoreRef(FrameOffset dest, ManagedRegister msrc) {
151   X86ManagedRegister src = msrc.AsX86();
152   CHECK(src.IsCpuRegister());
153   __ movl(Address(ESP, dest), src.AsCpuRegister());
154 }
155 
StoreRawPtr(FrameOffset dest,ManagedRegister msrc)156 void X86JNIMacroAssembler::StoreRawPtr(FrameOffset dest, ManagedRegister msrc) {
157   X86ManagedRegister src = msrc.AsX86();
158   CHECK(src.IsCpuRegister());
159   __ movl(Address(ESP, dest), src.AsCpuRegister());
160 }
161 
StoreImmediateToFrame(FrameOffset dest,uint32_t imm,ManagedRegister)162 void X86JNIMacroAssembler::StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister) {
163   __ movl(Address(ESP, dest), Immediate(imm));
164 }
165 
StoreStackOffsetToThread(ThreadOffset32 thr_offs,FrameOffset fr_offs,ManagedRegister mscratch)166 void X86JNIMacroAssembler::StoreStackOffsetToThread(ThreadOffset32 thr_offs,
167                                                     FrameOffset fr_offs,
168                                                     ManagedRegister mscratch) {
169   X86ManagedRegister scratch = mscratch.AsX86();
170   CHECK(scratch.IsCpuRegister());
171   __ leal(scratch.AsCpuRegister(), Address(ESP, fr_offs));
172   __ fs()->movl(Address::Absolute(thr_offs), scratch.AsCpuRegister());
173 }
174 
StoreStackPointerToThread(ThreadOffset32 thr_offs)175 void X86JNIMacroAssembler::StoreStackPointerToThread(ThreadOffset32 thr_offs) {
176   __ fs()->movl(Address::Absolute(thr_offs), ESP);
177 }
178 
StoreSpanning(FrameOffset,ManagedRegister,FrameOffset,ManagedRegister)179 void X86JNIMacroAssembler::StoreSpanning(FrameOffset /*dst*/,
180                                          ManagedRegister /*src*/,
181                                          FrameOffset /*in_off*/,
182                                          ManagedRegister /*scratch*/) {
183   UNIMPLEMENTED(FATAL);  // this case only currently exists for ARM
184 }
185 
Load(ManagedRegister mdest,FrameOffset src,size_t size)186 void X86JNIMacroAssembler::Load(ManagedRegister mdest, FrameOffset src, size_t size) {
187   X86ManagedRegister dest = mdest.AsX86();
188   if (dest.IsNoRegister()) {
189     CHECK_EQ(0u, size);
190   } else if (dest.IsCpuRegister()) {
191     CHECK_EQ(4u, size);
192     __ movl(dest.AsCpuRegister(), Address(ESP, src));
193   } else if (dest.IsRegisterPair()) {
194     CHECK_EQ(8u, size);
195     __ movl(dest.AsRegisterPairLow(), Address(ESP, src));
196     __ movl(dest.AsRegisterPairHigh(), Address(ESP, FrameOffset(src.Int32Value()+4)));
197   } else if (dest.IsX87Register()) {
198     if (size == 4) {
199       __ flds(Address(ESP, src));
200     } else {
201       __ fldl(Address(ESP, src));
202     }
203   } else {
204     CHECK(dest.IsXmmRegister());
205     if (size == 4) {
206       __ movss(dest.AsXmmRegister(), Address(ESP, src));
207     } else {
208       __ movsd(dest.AsXmmRegister(), Address(ESP, src));
209     }
210   }
211 }
212 
LoadFromThread(ManagedRegister mdest,ThreadOffset32 src,size_t size)213 void X86JNIMacroAssembler::LoadFromThread(ManagedRegister mdest, ThreadOffset32 src, size_t size) {
214   X86ManagedRegister dest = mdest.AsX86();
215   if (dest.IsNoRegister()) {
216     CHECK_EQ(0u, size);
217   } else if (dest.IsCpuRegister()) {
218     if (size == 1u) {
219       __ fs()->movzxb(dest.AsCpuRegister(), Address::Absolute(src));
220     } else {
221       CHECK_EQ(4u, size);
222       __ fs()->movl(dest.AsCpuRegister(), Address::Absolute(src));
223     }
224   } else if (dest.IsRegisterPair()) {
225     CHECK_EQ(8u, size);
226     __ fs()->movl(dest.AsRegisterPairLow(), Address::Absolute(src));
227     __ fs()->movl(dest.AsRegisterPairHigh(), Address::Absolute(ThreadOffset32(src.Int32Value()+4)));
228   } else if (dest.IsX87Register()) {
229     if (size == 4) {
230       __ fs()->flds(Address::Absolute(src));
231     } else {
232       __ fs()->fldl(Address::Absolute(src));
233     }
234   } else {
235     CHECK(dest.IsXmmRegister());
236     if (size == 4) {
237       __ fs()->movss(dest.AsXmmRegister(), Address::Absolute(src));
238     } else {
239       __ fs()->movsd(dest.AsXmmRegister(), Address::Absolute(src));
240     }
241   }
242 }
243 
LoadRef(ManagedRegister mdest,FrameOffset src)244 void X86JNIMacroAssembler::LoadRef(ManagedRegister mdest, FrameOffset src) {
245   X86ManagedRegister dest = mdest.AsX86();
246   CHECK(dest.IsCpuRegister());
247   __ movl(dest.AsCpuRegister(), Address(ESP, src));
248 }
249 
LoadRef(ManagedRegister mdest,ManagedRegister base,MemberOffset offs,bool unpoison_reference)250 void X86JNIMacroAssembler::LoadRef(ManagedRegister mdest, ManagedRegister base, MemberOffset offs,
251                            bool unpoison_reference) {
252   X86ManagedRegister dest = mdest.AsX86();
253   CHECK(dest.IsCpuRegister() && dest.IsCpuRegister());
254   __ movl(dest.AsCpuRegister(), Address(base.AsX86().AsCpuRegister(), offs));
255   if (unpoison_reference) {
256     __ MaybeUnpoisonHeapReference(dest.AsCpuRegister());
257   }
258 }
259 
LoadRawPtr(ManagedRegister mdest,ManagedRegister base,Offset offs)260 void X86JNIMacroAssembler::LoadRawPtr(ManagedRegister mdest,
261                                       ManagedRegister base,
262                                       Offset offs) {
263   X86ManagedRegister dest = mdest.AsX86();
264   CHECK(dest.IsCpuRegister() && dest.IsCpuRegister());
265   __ movl(dest.AsCpuRegister(), Address(base.AsX86().AsCpuRegister(), offs));
266 }
267 
LoadRawPtrFromThread(ManagedRegister mdest,ThreadOffset32 offs)268 void X86JNIMacroAssembler::LoadRawPtrFromThread(ManagedRegister mdest, ThreadOffset32 offs) {
269   X86ManagedRegister dest = mdest.AsX86();
270   CHECK(dest.IsCpuRegister());
271   __ fs()->movl(dest.AsCpuRegister(), Address::Absolute(offs));
272 }
273 
SignExtend(ManagedRegister mreg,size_t size)274 void X86JNIMacroAssembler::SignExtend(ManagedRegister mreg, size_t size) {
275   X86ManagedRegister reg = mreg.AsX86();
276   CHECK(size == 1 || size == 2) << size;
277   CHECK(reg.IsCpuRegister()) << reg;
278   if (size == 1) {
279     __ movsxb(reg.AsCpuRegister(), reg.AsByteRegister());
280   } else {
281     __ movsxw(reg.AsCpuRegister(), reg.AsCpuRegister());
282   }
283 }
284 
ZeroExtend(ManagedRegister mreg,size_t size)285 void X86JNIMacroAssembler::ZeroExtend(ManagedRegister mreg, size_t size) {
286   X86ManagedRegister reg = mreg.AsX86();
287   CHECK(size == 1 || size == 2) << size;
288   CHECK(reg.IsCpuRegister()) << reg;
289   if (size == 1) {
290     __ movzxb(reg.AsCpuRegister(), reg.AsByteRegister());
291   } else {
292     __ movzxw(reg.AsCpuRegister(), reg.AsCpuRegister());
293   }
294 }
295 
Move(ManagedRegister mdest,ManagedRegister msrc,size_t size)296 void X86JNIMacroAssembler::Move(ManagedRegister mdest, ManagedRegister msrc, size_t size) {
297   X86ManagedRegister dest = mdest.AsX86();
298   X86ManagedRegister src = msrc.AsX86();
299   if (!dest.Equals(src)) {
300     if (dest.IsCpuRegister() && src.IsCpuRegister()) {
301       __ movl(dest.AsCpuRegister(), src.AsCpuRegister());
302     } else if (src.IsX87Register() && dest.IsXmmRegister()) {
303       // Pass via stack and pop X87 register
304       __ subl(ESP, Immediate(16));
305       if (size == 4) {
306         CHECK_EQ(src.AsX87Register(), ST0);
307         __ fstps(Address(ESP, 0));
308         __ movss(dest.AsXmmRegister(), Address(ESP, 0));
309       } else {
310         CHECK_EQ(src.AsX87Register(), ST0);
311         __ fstpl(Address(ESP, 0));
312         __ movsd(dest.AsXmmRegister(), Address(ESP, 0));
313       }
314       __ addl(ESP, Immediate(16));
315     } else {
316       // TODO: x87, SSE
317       UNIMPLEMENTED(FATAL) << ": Move " << dest << ", " << src;
318     }
319   }
320 }
321 
CopyRef(FrameOffset dest,FrameOffset src,ManagedRegister mscratch)322 void X86JNIMacroAssembler::CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister mscratch) {
323   X86ManagedRegister scratch = mscratch.AsX86();
324   CHECK(scratch.IsCpuRegister());
325   __ movl(scratch.AsCpuRegister(), Address(ESP, src));
326   __ movl(Address(ESP, dest), scratch.AsCpuRegister());
327 }
328 
CopyRawPtrFromThread(FrameOffset fr_offs,ThreadOffset32 thr_offs,ManagedRegister mscratch)329 void X86JNIMacroAssembler::CopyRawPtrFromThread(FrameOffset fr_offs,
330                                                 ThreadOffset32 thr_offs,
331                                                 ManagedRegister mscratch) {
332   X86ManagedRegister scratch = mscratch.AsX86();
333   CHECK(scratch.IsCpuRegister());
334   __ fs()->movl(scratch.AsCpuRegister(), Address::Absolute(thr_offs));
335   Store(fr_offs, scratch, 4);
336 }
337 
CopyRawPtrToThread(ThreadOffset32 thr_offs,FrameOffset fr_offs,ManagedRegister mscratch)338 void X86JNIMacroAssembler::CopyRawPtrToThread(ThreadOffset32 thr_offs,
339                                               FrameOffset fr_offs,
340                                               ManagedRegister mscratch) {
341   X86ManagedRegister scratch = mscratch.AsX86();
342   CHECK(scratch.IsCpuRegister());
343   Load(scratch, fr_offs, 4);
344   __ fs()->movl(Address::Absolute(thr_offs), scratch.AsCpuRegister());
345 }
346 
Copy(FrameOffset dest,FrameOffset src,ManagedRegister mscratch,size_t size)347 void X86JNIMacroAssembler::Copy(FrameOffset dest, FrameOffset src,
348                         ManagedRegister mscratch,
349                         size_t size) {
350   X86ManagedRegister scratch = mscratch.AsX86();
351   if (scratch.IsCpuRegister() && size == 8) {
352     Load(scratch, src, 4);
353     Store(dest, scratch, 4);
354     Load(scratch, FrameOffset(src.Int32Value() + 4), 4);
355     Store(FrameOffset(dest.Int32Value() + 4), scratch, 4);
356   } else {
357     Load(scratch, src, size);
358     Store(dest, scratch, size);
359   }
360 }
361 
Copy(FrameOffset,ManagedRegister,Offset,ManagedRegister,size_t)362 void X86JNIMacroAssembler::Copy(FrameOffset /*dst*/,
363                                 ManagedRegister /*src_base*/,
364                                 Offset /*src_offset*/,
365                                 ManagedRegister /*scratch*/,
366                                 size_t /*size*/) {
367   UNIMPLEMENTED(FATAL);
368 }
369 
Copy(ManagedRegister dest_base,Offset dest_offset,FrameOffset src,ManagedRegister scratch,size_t size)370 void X86JNIMacroAssembler::Copy(ManagedRegister dest_base,
371                                 Offset dest_offset,
372                                 FrameOffset src,
373                                 ManagedRegister scratch,
374                                 size_t size) {
375   CHECK(scratch.IsNoRegister());
376   CHECK_EQ(size, 4u);
377   __ pushl(Address(ESP, src));
378   __ popl(Address(dest_base.AsX86().AsCpuRegister(), dest_offset));
379 }
380 
Copy(FrameOffset dest,FrameOffset src_base,Offset src_offset,ManagedRegister mscratch,size_t size)381 void X86JNIMacroAssembler::Copy(FrameOffset dest,
382                                 FrameOffset src_base,
383                                 Offset src_offset,
384                                 ManagedRegister mscratch,
385                                 size_t size) {
386   Register scratch = mscratch.AsX86().AsCpuRegister();
387   CHECK_EQ(size, 4u);
388   __ movl(scratch, Address(ESP, src_base));
389   __ movl(scratch, Address(scratch, src_offset));
390   __ movl(Address(ESP, dest), scratch);
391 }
392 
Copy(ManagedRegister dest,Offset dest_offset,ManagedRegister src,Offset src_offset,ManagedRegister scratch,size_t size)393 void X86JNIMacroAssembler::Copy(ManagedRegister dest,
394                                 Offset dest_offset,
395                                 ManagedRegister src,
396                                 Offset src_offset,
397                                 ManagedRegister scratch,
398                                 size_t size) {
399   CHECK_EQ(size, 4u);
400   CHECK(scratch.IsNoRegister());
401   __ pushl(Address(src.AsX86().AsCpuRegister(), src_offset));
402   __ popl(Address(dest.AsX86().AsCpuRegister(), dest_offset));
403 }
404 
Copy(FrameOffset dest,Offset dest_offset,FrameOffset src,Offset src_offset,ManagedRegister mscratch,size_t size)405 void X86JNIMacroAssembler::Copy(FrameOffset dest,
406                                 Offset dest_offset,
407                                 FrameOffset src,
408                                 Offset src_offset,
409                                 ManagedRegister mscratch,
410                                 size_t size) {
411   Register scratch = mscratch.AsX86().AsCpuRegister();
412   CHECK_EQ(size, 4u);
413   CHECK_EQ(dest.Int32Value(), src.Int32Value());
414   __ movl(scratch, Address(ESP, src));
415   __ pushl(Address(scratch, src_offset));
416   __ popl(Address(scratch, dest_offset));
417 }
418 
MemoryBarrier(ManagedRegister)419 void X86JNIMacroAssembler::MemoryBarrier(ManagedRegister) {
420   __ mfence();
421 }
422 
CreateHandleScopeEntry(ManagedRegister mout_reg,FrameOffset handle_scope_offset,ManagedRegister min_reg,bool null_allowed)423 void X86JNIMacroAssembler::CreateHandleScopeEntry(ManagedRegister mout_reg,
424                                                   FrameOffset handle_scope_offset,
425                                                   ManagedRegister min_reg,
426                                                   bool null_allowed) {
427   X86ManagedRegister out_reg = mout_reg.AsX86();
428   X86ManagedRegister in_reg = min_reg.AsX86();
429   CHECK(in_reg.IsCpuRegister());
430   CHECK(out_reg.IsCpuRegister());
431   VerifyObject(in_reg, null_allowed);
432   if (null_allowed) {
433     Label null_arg;
434     if (!out_reg.Equals(in_reg)) {
435       __ xorl(out_reg.AsCpuRegister(), out_reg.AsCpuRegister());
436     }
437     __ testl(in_reg.AsCpuRegister(), in_reg.AsCpuRegister());
438     __ j(kZero, &null_arg);
439     __ leal(out_reg.AsCpuRegister(), Address(ESP, handle_scope_offset));
440     __ Bind(&null_arg);
441   } else {
442     __ leal(out_reg.AsCpuRegister(), Address(ESP, handle_scope_offset));
443   }
444 }
445 
CreateHandleScopeEntry(FrameOffset out_off,FrameOffset handle_scope_offset,ManagedRegister mscratch,bool null_allowed)446 void X86JNIMacroAssembler::CreateHandleScopeEntry(FrameOffset out_off,
447                                                   FrameOffset handle_scope_offset,
448                                                   ManagedRegister mscratch,
449                                                   bool null_allowed) {
450   X86ManagedRegister scratch = mscratch.AsX86();
451   CHECK(scratch.IsCpuRegister());
452   if (null_allowed) {
453     Label null_arg;
454     __ movl(scratch.AsCpuRegister(), Address(ESP, handle_scope_offset));
455     __ testl(scratch.AsCpuRegister(), scratch.AsCpuRegister());
456     __ j(kZero, &null_arg);
457     __ leal(scratch.AsCpuRegister(), Address(ESP, handle_scope_offset));
458     __ Bind(&null_arg);
459   } else {
460     __ leal(scratch.AsCpuRegister(), Address(ESP, handle_scope_offset));
461   }
462   Store(out_off, scratch, 4);
463 }
464 
465 // Given a handle scope entry, load the associated reference.
LoadReferenceFromHandleScope(ManagedRegister mout_reg,ManagedRegister min_reg)466 void X86JNIMacroAssembler::LoadReferenceFromHandleScope(ManagedRegister mout_reg,
467                                                         ManagedRegister min_reg) {
468   X86ManagedRegister out_reg = mout_reg.AsX86();
469   X86ManagedRegister in_reg = min_reg.AsX86();
470   CHECK(out_reg.IsCpuRegister());
471   CHECK(in_reg.IsCpuRegister());
472   Label null_arg;
473   if (!out_reg.Equals(in_reg)) {
474     __ xorl(out_reg.AsCpuRegister(), out_reg.AsCpuRegister());
475   }
476   __ testl(in_reg.AsCpuRegister(), in_reg.AsCpuRegister());
477   __ j(kZero, &null_arg);
478   __ movl(out_reg.AsCpuRegister(), Address(in_reg.AsCpuRegister(), 0));
479   __ Bind(&null_arg);
480 }
481 
VerifyObject(ManagedRegister,bool)482 void X86JNIMacroAssembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) {
483   // TODO: not validating references
484 }
485 
VerifyObject(FrameOffset,bool)486 void X86JNIMacroAssembler::VerifyObject(FrameOffset /*src*/, bool /*could_be_null*/) {
487   // TODO: not validating references
488 }
489 
Call(ManagedRegister mbase,Offset offset,ManagedRegister)490 void X86JNIMacroAssembler::Call(ManagedRegister mbase, Offset offset, ManagedRegister) {
491   X86ManagedRegister base = mbase.AsX86();
492   CHECK(base.IsCpuRegister());
493   __ call(Address(base.AsCpuRegister(), offset.Int32Value()));
494   // TODO: place reference map on call
495 }
496 
Call(FrameOffset base,Offset offset,ManagedRegister mscratch)497 void X86JNIMacroAssembler::Call(FrameOffset base, Offset offset, ManagedRegister mscratch) {
498   Register scratch = mscratch.AsX86().AsCpuRegister();
499   __ movl(scratch, Address(ESP, base));
500   __ call(Address(scratch, offset));
501 }
502 
CallFromThread(ThreadOffset32 offset,ManagedRegister)503 void X86JNIMacroAssembler::CallFromThread(ThreadOffset32 offset, ManagedRegister /*mscratch*/) {
504   __ fs()->call(Address::Absolute(offset));
505 }
506 
GetCurrentThread(ManagedRegister tr)507 void X86JNIMacroAssembler::GetCurrentThread(ManagedRegister tr) {
508   __ fs()->movl(tr.AsX86().AsCpuRegister(),
509                 Address::Absolute(Thread::SelfOffset<kX86PointerSize>()));
510 }
511 
GetCurrentThread(FrameOffset offset,ManagedRegister mscratch)512 void X86JNIMacroAssembler::GetCurrentThread(FrameOffset offset,
513                                     ManagedRegister mscratch) {
514   X86ManagedRegister scratch = mscratch.AsX86();
515   __ fs()->movl(scratch.AsCpuRegister(), Address::Absolute(Thread::SelfOffset<kX86PointerSize>()));
516   __ movl(Address(ESP, offset), scratch.AsCpuRegister());
517 }
518 
ExceptionPoll(ManagedRegister,size_t stack_adjust)519 void X86JNIMacroAssembler::ExceptionPoll(ManagedRegister /*scratch*/, size_t stack_adjust) {
520   X86ExceptionSlowPath* slow = new (__ GetAllocator()) X86ExceptionSlowPath(stack_adjust);
521   __ GetBuffer()->EnqueueSlowPath(slow);
522   __ fs()->cmpl(Address::Absolute(Thread::ExceptionOffset<kX86PointerSize>()), Immediate(0));
523   __ j(kNotEqual, slow->Entry());
524 }
525 
CreateLabel()526 std::unique_ptr<JNIMacroLabel> X86JNIMacroAssembler::CreateLabel() {
527   return std::unique_ptr<JNIMacroLabel>(new X86JNIMacroLabel());
528 }
529 
Jump(JNIMacroLabel * label)530 void X86JNIMacroAssembler::Jump(JNIMacroLabel* label) {
531   CHECK(label != nullptr);
532   __ jmp(X86JNIMacroLabel::Cast(label)->AsX86());
533 }
534 
Jump(JNIMacroLabel * label,JNIMacroUnaryCondition condition,ManagedRegister test)535 void X86JNIMacroAssembler::Jump(JNIMacroLabel* label,
536                                 JNIMacroUnaryCondition condition,
537                                 ManagedRegister test) {
538   CHECK(label != nullptr);
539 
540   art::x86::Condition x86_cond;
541   switch (condition) {
542     case JNIMacroUnaryCondition::kZero:
543       x86_cond = art::x86::kZero;
544       break;
545     case JNIMacroUnaryCondition::kNotZero:
546       x86_cond = art::x86::kNotZero;
547       break;
548     default:
549       LOG(FATAL) << "Not implemented condition: " << static_cast<int>(condition);
550       UNREACHABLE();
551   }
552 
553   // TEST reg, reg
554   // Jcc <Offset>
555   __ testl(test.AsX86().AsCpuRegister(), test.AsX86().AsCpuRegister());
556   __ j(x86_cond, X86JNIMacroLabel::Cast(label)->AsX86());
557 
558 
559   // X86 also has JCZX, JECZX, however it's not worth it to implement
560   // because we aren't likely to codegen with ECX+kZero check.
561 }
562 
Bind(JNIMacroLabel * label)563 void X86JNIMacroAssembler::Bind(JNIMacroLabel* label) {
564   CHECK(label != nullptr);
565   __ Bind(X86JNIMacroLabel::Cast(label)->AsX86());
566 }
567 
568 #undef __
569 
Emit(Assembler * sasm)570 void X86ExceptionSlowPath::Emit(Assembler *sasm) {
571   X86Assembler* sp_asm = down_cast<X86Assembler*>(sasm);
572 #define __ sp_asm->
573   __ Bind(&entry_);
574   // Note: the return value is dead
575   if (stack_adjust_ != 0) {  // Fix up the frame.
576     DecreaseFrameSizeImpl(sp_asm, stack_adjust_);
577   }
578   // Pass exception as argument in EAX
579   __ fs()->movl(EAX, Address::Absolute(Thread::ExceptionOffset<kX86PointerSize>()));
580   __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86PointerSize, pDeliverException)));
581   // this call should never return
582   __ int3();
583 #undef __
584 }
585 
586 }  // namespace x86
587 }  // namespace art
588