1 /*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "jni_macro_assembler_x86_64.h"
18
19 #include "base/casts.h"
20 #include "base/memory_region.h"
21 #include "entrypoints/quick/quick_entrypoints.h"
22 #include "thread.h"
23
24 namespace art {
25 namespace x86_64 {
26
DWARFReg(Register reg)27 static dwarf::Reg DWARFReg(Register reg) {
28 return dwarf::Reg::X86_64Core(static_cast<int>(reg));
29 }
DWARFReg(FloatRegister reg)30 static dwarf::Reg DWARFReg(FloatRegister reg) {
31 return dwarf::Reg::X86_64Fp(static_cast<int>(reg));
32 }
33
34 constexpr size_t kFramePointerSize = 8;
35
36 #define __ asm_.
37
BuildFrame(size_t frame_size,ManagedRegister method_reg,ArrayRef<const ManagedRegister> spill_regs,const ManagedRegisterEntrySpills & entry_spills)38 void X86_64JNIMacroAssembler::BuildFrame(size_t frame_size,
39 ManagedRegister method_reg,
40 ArrayRef<const ManagedRegister> spill_regs,
41 const ManagedRegisterEntrySpills& entry_spills) {
42 DCHECK_EQ(CodeSize(), 0U); // Nothing emitted yet.
43 cfi().SetCurrentCFAOffset(8); // Return address on stack.
44 CHECK_ALIGNED(frame_size, kStackAlignment);
45 int gpr_count = 0;
46 for (int i = spill_regs.size() - 1; i >= 0; --i) {
47 x86_64::X86_64ManagedRegister spill = spill_regs[i].AsX86_64();
48 if (spill.IsCpuRegister()) {
49 __ pushq(spill.AsCpuRegister());
50 gpr_count++;
51 cfi().AdjustCFAOffset(kFramePointerSize);
52 cfi().RelOffset(DWARFReg(spill.AsCpuRegister().AsRegister()), 0);
53 }
54 }
55 // return address then method on stack.
56 int64_t rest_of_frame = static_cast<int64_t>(frame_size)
57 - (gpr_count * kFramePointerSize)
58 - kFramePointerSize /*return address*/;
59 __ subq(CpuRegister(RSP), Immediate(rest_of_frame));
60 cfi().AdjustCFAOffset(rest_of_frame);
61
62 // spill xmms
63 int64_t offset = rest_of_frame;
64 for (int i = spill_regs.size() - 1; i >= 0; --i) {
65 x86_64::X86_64ManagedRegister spill = spill_regs[i].AsX86_64();
66 if (spill.IsXmmRegister()) {
67 offset -= sizeof(double);
68 __ movsd(Address(CpuRegister(RSP), offset), spill.AsXmmRegister());
69 cfi().RelOffset(DWARFReg(spill.AsXmmRegister().AsFloatRegister()), offset);
70 }
71 }
72
73 static_assert(static_cast<size_t>(kX86_64PointerSize) == kFramePointerSize,
74 "Unexpected frame pointer size.");
75
76 __ movq(Address(CpuRegister(RSP), 0), method_reg.AsX86_64().AsCpuRegister());
77
78 for (const ManagedRegisterSpill& spill : entry_spills) {
79 if (spill.AsX86_64().IsCpuRegister()) {
80 if (spill.getSize() == 8) {
81 __ movq(Address(CpuRegister(RSP), frame_size + spill.getSpillOffset()),
82 spill.AsX86_64().AsCpuRegister());
83 } else {
84 CHECK_EQ(spill.getSize(), 4);
85 __ movl(Address(CpuRegister(RSP), frame_size + spill.getSpillOffset()),
86 spill.AsX86_64().AsCpuRegister());
87 }
88 } else {
89 if (spill.getSize() == 8) {
90 __ movsd(Address(CpuRegister(RSP), frame_size + spill.getSpillOffset()),
91 spill.AsX86_64().AsXmmRegister());
92 } else {
93 CHECK_EQ(spill.getSize(), 4);
94 __ movss(Address(CpuRegister(RSP), frame_size + spill.getSpillOffset()),
95 spill.AsX86_64().AsXmmRegister());
96 }
97 }
98 }
99 }
100
RemoveFrame(size_t frame_size,ArrayRef<const ManagedRegister> spill_regs,bool may_suspend ATTRIBUTE_UNUSED)101 void X86_64JNIMacroAssembler::RemoveFrame(size_t frame_size,
102 ArrayRef<const ManagedRegister> spill_regs,
103 bool may_suspend ATTRIBUTE_UNUSED) {
104 CHECK_ALIGNED(frame_size, kStackAlignment);
105 cfi().RememberState();
106 int gpr_count = 0;
107 // unspill xmms
108 int64_t offset = static_cast<int64_t>(frame_size)
109 - (spill_regs.size() * kFramePointerSize)
110 - 2 * kFramePointerSize;
111 for (size_t i = 0; i < spill_regs.size(); ++i) {
112 x86_64::X86_64ManagedRegister spill = spill_regs[i].AsX86_64();
113 if (spill.IsXmmRegister()) {
114 offset += sizeof(double);
115 __ movsd(spill.AsXmmRegister(), Address(CpuRegister(RSP), offset));
116 cfi().Restore(DWARFReg(spill.AsXmmRegister().AsFloatRegister()));
117 } else {
118 gpr_count++;
119 }
120 }
121 int adjust = static_cast<int>(frame_size) - (gpr_count * kFramePointerSize) - kFramePointerSize;
122 __ addq(CpuRegister(RSP), Immediate(adjust));
123 cfi().AdjustCFAOffset(-adjust);
124 for (size_t i = 0; i < spill_regs.size(); ++i) {
125 x86_64::X86_64ManagedRegister spill = spill_regs[i].AsX86_64();
126 if (spill.IsCpuRegister()) {
127 __ popq(spill.AsCpuRegister());
128 cfi().AdjustCFAOffset(-static_cast<int>(kFramePointerSize));
129 cfi().Restore(DWARFReg(spill.AsCpuRegister().AsRegister()));
130 }
131 }
132 __ ret();
133 // The CFI should be restored for any code that follows the exit block.
134 cfi().RestoreState();
135 cfi().DefCFAOffset(frame_size);
136 }
137
IncreaseFrameSize(size_t adjust)138 void X86_64JNIMacroAssembler::IncreaseFrameSize(size_t adjust) {
139 CHECK_ALIGNED(adjust, kStackAlignment);
140 __ addq(CpuRegister(RSP), Immediate(-static_cast<int64_t>(adjust)));
141 cfi().AdjustCFAOffset(adjust);
142 }
143
DecreaseFrameSizeImpl(size_t adjust,X86_64Assembler * assembler)144 static void DecreaseFrameSizeImpl(size_t adjust, X86_64Assembler* assembler) {
145 CHECK_ALIGNED(adjust, kStackAlignment);
146 assembler->addq(CpuRegister(RSP), Immediate(adjust));
147 assembler->cfi().AdjustCFAOffset(-adjust);
148 }
149
DecreaseFrameSize(size_t adjust)150 void X86_64JNIMacroAssembler::DecreaseFrameSize(size_t adjust) {
151 DecreaseFrameSizeImpl(adjust, &asm_);
152 }
153
Store(FrameOffset offs,ManagedRegister msrc,size_t size)154 void X86_64JNIMacroAssembler::Store(FrameOffset offs, ManagedRegister msrc, size_t size) {
155 X86_64ManagedRegister src = msrc.AsX86_64();
156 if (src.IsNoRegister()) {
157 CHECK_EQ(0u, size);
158 } else if (src.IsCpuRegister()) {
159 if (size == 4) {
160 CHECK_EQ(4u, size);
161 __ movl(Address(CpuRegister(RSP), offs), src.AsCpuRegister());
162 } else {
163 CHECK_EQ(8u, size);
164 __ movq(Address(CpuRegister(RSP), offs), src.AsCpuRegister());
165 }
166 } else if (src.IsRegisterPair()) {
167 CHECK_EQ(0u, size);
168 __ movq(Address(CpuRegister(RSP), offs), src.AsRegisterPairLow());
169 __ movq(Address(CpuRegister(RSP), FrameOffset(offs.Int32Value()+4)),
170 src.AsRegisterPairHigh());
171 } else if (src.IsX87Register()) {
172 if (size == 4) {
173 __ fstps(Address(CpuRegister(RSP), offs));
174 } else {
175 __ fstpl(Address(CpuRegister(RSP), offs));
176 }
177 } else {
178 CHECK(src.IsXmmRegister());
179 if (size == 4) {
180 __ movss(Address(CpuRegister(RSP), offs), src.AsXmmRegister());
181 } else {
182 __ movsd(Address(CpuRegister(RSP), offs), src.AsXmmRegister());
183 }
184 }
185 }
186
StoreRef(FrameOffset dest,ManagedRegister msrc)187 void X86_64JNIMacroAssembler::StoreRef(FrameOffset dest, ManagedRegister msrc) {
188 X86_64ManagedRegister src = msrc.AsX86_64();
189 CHECK(src.IsCpuRegister());
190 __ movl(Address(CpuRegister(RSP), dest), src.AsCpuRegister());
191 }
192
StoreRawPtr(FrameOffset dest,ManagedRegister msrc)193 void X86_64JNIMacroAssembler::StoreRawPtr(FrameOffset dest, ManagedRegister msrc) {
194 X86_64ManagedRegister src = msrc.AsX86_64();
195 CHECK(src.IsCpuRegister());
196 __ movq(Address(CpuRegister(RSP), dest), src.AsCpuRegister());
197 }
198
StoreImmediateToFrame(FrameOffset dest,uint32_t imm,ManagedRegister)199 void X86_64JNIMacroAssembler::StoreImmediateToFrame(FrameOffset dest,
200 uint32_t imm,
201 ManagedRegister) {
202 __ movl(Address(CpuRegister(RSP), dest), Immediate(imm)); // TODO(64) movq?
203 }
204
StoreStackOffsetToThread(ThreadOffset64 thr_offs,FrameOffset fr_offs,ManagedRegister mscratch)205 void X86_64JNIMacroAssembler::StoreStackOffsetToThread(ThreadOffset64 thr_offs,
206 FrameOffset fr_offs,
207 ManagedRegister mscratch) {
208 X86_64ManagedRegister scratch = mscratch.AsX86_64();
209 CHECK(scratch.IsCpuRegister());
210 __ leaq(scratch.AsCpuRegister(), Address(CpuRegister(RSP), fr_offs));
211 __ gs()->movq(Address::Absolute(thr_offs, true), scratch.AsCpuRegister());
212 }
213
StoreStackPointerToThread(ThreadOffset64 thr_offs)214 void X86_64JNIMacroAssembler::StoreStackPointerToThread(ThreadOffset64 thr_offs) {
215 __ gs()->movq(Address::Absolute(thr_offs, true), CpuRegister(RSP));
216 }
217
StoreSpanning(FrameOffset,ManagedRegister,FrameOffset,ManagedRegister)218 void X86_64JNIMacroAssembler::StoreSpanning(FrameOffset /*dst*/,
219 ManagedRegister /*src*/,
220 FrameOffset /*in_off*/,
221 ManagedRegister /*scratch*/) {
222 UNIMPLEMENTED(FATAL); // this case only currently exists for ARM
223 }
224
Load(ManagedRegister mdest,FrameOffset src,size_t size)225 void X86_64JNIMacroAssembler::Load(ManagedRegister mdest, FrameOffset src, size_t size) {
226 X86_64ManagedRegister dest = mdest.AsX86_64();
227 if (dest.IsNoRegister()) {
228 CHECK_EQ(0u, size);
229 } else if (dest.IsCpuRegister()) {
230 if (size == 4) {
231 CHECK_EQ(4u, size);
232 __ movl(dest.AsCpuRegister(), Address(CpuRegister(RSP), src));
233 } else {
234 CHECK_EQ(8u, size);
235 __ movq(dest.AsCpuRegister(), Address(CpuRegister(RSP), src));
236 }
237 } else if (dest.IsRegisterPair()) {
238 CHECK_EQ(0u, size);
239 __ movq(dest.AsRegisterPairLow(), Address(CpuRegister(RSP), src));
240 __ movq(dest.AsRegisterPairHigh(), Address(CpuRegister(RSP), FrameOffset(src.Int32Value()+4)));
241 } else if (dest.IsX87Register()) {
242 if (size == 4) {
243 __ flds(Address(CpuRegister(RSP), src));
244 } else {
245 __ fldl(Address(CpuRegister(RSP), src));
246 }
247 } else {
248 CHECK(dest.IsXmmRegister());
249 if (size == 4) {
250 __ movss(dest.AsXmmRegister(), Address(CpuRegister(RSP), src));
251 } else {
252 __ movsd(dest.AsXmmRegister(), Address(CpuRegister(RSP), src));
253 }
254 }
255 }
256
LoadFromThread(ManagedRegister mdest,ThreadOffset64 src,size_t size)257 void X86_64JNIMacroAssembler::LoadFromThread(ManagedRegister mdest,
258 ThreadOffset64 src, size_t size) {
259 X86_64ManagedRegister dest = mdest.AsX86_64();
260 if (dest.IsNoRegister()) {
261 CHECK_EQ(0u, size);
262 } else if (dest.IsCpuRegister()) {
263 if (size == 1u) {
264 __ gs()->movzxb(dest.AsCpuRegister(), Address::Absolute(src, true));
265 } else {
266 CHECK_EQ(4u, size);
267 __ gs()->movl(dest.AsCpuRegister(), Address::Absolute(src, true));
268 }
269 } else if (dest.IsRegisterPair()) {
270 CHECK_EQ(8u, size);
271 __ gs()->movq(dest.AsRegisterPairLow(), Address::Absolute(src, true));
272 } else if (dest.IsX87Register()) {
273 if (size == 4) {
274 __ gs()->flds(Address::Absolute(src, true));
275 } else {
276 __ gs()->fldl(Address::Absolute(src, true));
277 }
278 } else {
279 CHECK(dest.IsXmmRegister());
280 if (size == 4) {
281 __ gs()->movss(dest.AsXmmRegister(), Address::Absolute(src, true));
282 } else {
283 __ gs()->movsd(dest.AsXmmRegister(), Address::Absolute(src, true));
284 }
285 }
286 }
287
LoadRef(ManagedRegister mdest,FrameOffset src)288 void X86_64JNIMacroAssembler::LoadRef(ManagedRegister mdest, FrameOffset src) {
289 X86_64ManagedRegister dest = mdest.AsX86_64();
290 CHECK(dest.IsCpuRegister());
291 __ movq(dest.AsCpuRegister(), Address(CpuRegister(RSP), src));
292 }
293
LoadRef(ManagedRegister mdest,ManagedRegister mbase,MemberOffset offs,bool unpoison_reference)294 void X86_64JNIMacroAssembler::LoadRef(ManagedRegister mdest,
295 ManagedRegister mbase,
296 MemberOffset offs,
297 bool unpoison_reference) {
298 X86_64ManagedRegister base = mbase.AsX86_64();
299 X86_64ManagedRegister dest = mdest.AsX86_64();
300 CHECK(base.IsCpuRegister());
301 CHECK(dest.IsCpuRegister());
302 __ movl(dest.AsCpuRegister(), Address(base.AsCpuRegister(), offs));
303 if (unpoison_reference) {
304 __ MaybeUnpoisonHeapReference(dest.AsCpuRegister());
305 }
306 }
307
LoadRawPtr(ManagedRegister mdest,ManagedRegister mbase,Offset offs)308 void X86_64JNIMacroAssembler::LoadRawPtr(ManagedRegister mdest,
309 ManagedRegister mbase,
310 Offset offs) {
311 X86_64ManagedRegister base = mbase.AsX86_64();
312 X86_64ManagedRegister dest = mdest.AsX86_64();
313 CHECK(base.IsCpuRegister());
314 CHECK(dest.IsCpuRegister());
315 __ movq(dest.AsCpuRegister(), Address(base.AsCpuRegister(), offs));
316 }
317
LoadRawPtrFromThread(ManagedRegister mdest,ThreadOffset64 offs)318 void X86_64JNIMacroAssembler::LoadRawPtrFromThread(ManagedRegister mdest, ThreadOffset64 offs) {
319 X86_64ManagedRegister dest = mdest.AsX86_64();
320 CHECK(dest.IsCpuRegister());
321 __ gs()->movq(dest.AsCpuRegister(), Address::Absolute(offs, true));
322 }
323
SignExtend(ManagedRegister mreg,size_t size)324 void X86_64JNIMacroAssembler::SignExtend(ManagedRegister mreg, size_t size) {
325 X86_64ManagedRegister reg = mreg.AsX86_64();
326 CHECK(size == 1 || size == 2) << size;
327 CHECK(reg.IsCpuRegister()) << reg;
328 if (size == 1) {
329 __ movsxb(reg.AsCpuRegister(), reg.AsCpuRegister());
330 } else {
331 __ movsxw(reg.AsCpuRegister(), reg.AsCpuRegister());
332 }
333 }
334
ZeroExtend(ManagedRegister mreg,size_t size)335 void X86_64JNIMacroAssembler::ZeroExtend(ManagedRegister mreg, size_t size) {
336 X86_64ManagedRegister reg = mreg.AsX86_64();
337 CHECK(size == 1 || size == 2) << size;
338 CHECK(reg.IsCpuRegister()) << reg;
339 if (size == 1) {
340 __ movzxb(reg.AsCpuRegister(), reg.AsCpuRegister());
341 } else {
342 __ movzxw(reg.AsCpuRegister(), reg.AsCpuRegister());
343 }
344 }
345
Move(ManagedRegister mdest,ManagedRegister msrc,size_t size)346 void X86_64JNIMacroAssembler::Move(ManagedRegister mdest, ManagedRegister msrc, size_t size) {
347 X86_64ManagedRegister dest = mdest.AsX86_64();
348 X86_64ManagedRegister src = msrc.AsX86_64();
349 if (!dest.Equals(src)) {
350 if (dest.IsCpuRegister() && src.IsCpuRegister()) {
351 __ movq(dest.AsCpuRegister(), src.AsCpuRegister());
352 } else if (src.IsX87Register() && dest.IsXmmRegister()) {
353 // Pass via stack and pop X87 register
354 __ subl(CpuRegister(RSP), Immediate(16));
355 if (size == 4) {
356 CHECK_EQ(src.AsX87Register(), ST0);
357 __ fstps(Address(CpuRegister(RSP), 0));
358 __ movss(dest.AsXmmRegister(), Address(CpuRegister(RSP), 0));
359 } else {
360 CHECK_EQ(src.AsX87Register(), ST0);
361 __ fstpl(Address(CpuRegister(RSP), 0));
362 __ movsd(dest.AsXmmRegister(), Address(CpuRegister(RSP), 0));
363 }
364 __ addq(CpuRegister(RSP), Immediate(16));
365 } else {
366 // TODO: x87, SSE
367 UNIMPLEMENTED(FATAL) << ": Move " << dest << ", " << src;
368 }
369 }
370 }
371
CopyRef(FrameOffset dest,FrameOffset src,ManagedRegister mscratch)372 void X86_64JNIMacroAssembler::CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister mscratch) {
373 X86_64ManagedRegister scratch = mscratch.AsX86_64();
374 CHECK(scratch.IsCpuRegister());
375 __ movl(scratch.AsCpuRegister(), Address(CpuRegister(RSP), src));
376 __ movl(Address(CpuRegister(RSP), dest), scratch.AsCpuRegister());
377 }
378
CopyRawPtrFromThread(FrameOffset fr_offs,ThreadOffset64 thr_offs,ManagedRegister mscratch)379 void X86_64JNIMacroAssembler::CopyRawPtrFromThread(FrameOffset fr_offs,
380 ThreadOffset64 thr_offs,
381 ManagedRegister mscratch) {
382 X86_64ManagedRegister scratch = mscratch.AsX86_64();
383 CHECK(scratch.IsCpuRegister());
384 __ gs()->movq(scratch.AsCpuRegister(), Address::Absolute(thr_offs, true));
385 Store(fr_offs, scratch, 8);
386 }
387
CopyRawPtrToThread(ThreadOffset64 thr_offs,FrameOffset fr_offs,ManagedRegister mscratch)388 void X86_64JNIMacroAssembler::CopyRawPtrToThread(ThreadOffset64 thr_offs,
389 FrameOffset fr_offs,
390 ManagedRegister mscratch) {
391 X86_64ManagedRegister scratch = mscratch.AsX86_64();
392 CHECK(scratch.IsCpuRegister());
393 Load(scratch, fr_offs, 8);
394 __ gs()->movq(Address::Absolute(thr_offs, true), scratch.AsCpuRegister());
395 }
396
Copy(FrameOffset dest,FrameOffset src,ManagedRegister mscratch,size_t size)397 void X86_64JNIMacroAssembler::Copy(FrameOffset dest,
398 FrameOffset src,
399 ManagedRegister mscratch,
400 size_t size) {
401 X86_64ManagedRegister scratch = mscratch.AsX86_64();
402 if (scratch.IsCpuRegister() && size == 8) {
403 Load(scratch, src, 4);
404 Store(dest, scratch, 4);
405 Load(scratch, FrameOffset(src.Int32Value() + 4), 4);
406 Store(FrameOffset(dest.Int32Value() + 4), scratch, 4);
407 } else {
408 Load(scratch, src, size);
409 Store(dest, scratch, size);
410 }
411 }
412
Copy(FrameOffset,ManagedRegister,Offset,ManagedRegister,size_t)413 void X86_64JNIMacroAssembler::Copy(FrameOffset /*dst*/,
414 ManagedRegister /*src_base*/,
415 Offset /*src_offset*/,
416 ManagedRegister /*scratch*/,
417 size_t /*size*/) {
418 UNIMPLEMENTED(FATAL);
419 }
420
Copy(ManagedRegister dest_base,Offset dest_offset,FrameOffset src,ManagedRegister scratch,size_t size)421 void X86_64JNIMacroAssembler::Copy(ManagedRegister dest_base,
422 Offset dest_offset,
423 FrameOffset src,
424 ManagedRegister scratch,
425 size_t size) {
426 CHECK(scratch.IsNoRegister());
427 CHECK_EQ(size, 4u);
428 __ pushq(Address(CpuRegister(RSP), src));
429 __ popq(Address(dest_base.AsX86_64().AsCpuRegister(), dest_offset));
430 }
431
Copy(FrameOffset dest,FrameOffset src_base,Offset src_offset,ManagedRegister mscratch,size_t size)432 void X86_64JNIMacroAssembler::Copy(FrameOffset dest,
433 FrameOffset src_base,
434 Offset src_offset,
435 ManagedRegister mscratch,
436 size_t size) {
437 CpuRegister scratch = mscratch.AsX86_64().AsCpuRegister();
438 CHECK_EQ(size, 4u);
439 __ movq(scratch, Address(CpuRegister(RSP), src_base));
440 __ movq(scratch, Address(scratch, src_offset));
441 __ movq(Address(CpuRegister(RSP), dest), scratch);
442 }
443
Copy(ManagedRegister dest,Offset dest_offset,ManagedRegister src,Offset src_offset,ManagedRegister scratch,size_t size)444 void X86_64JNIMacroAssembler::Copy(ManagedRegister dest,
445 Offset dest_offset,
446 ManagedRegister src,
447 Offset src_offset,
448 ManagedRegister scratch,
449 size_t size) {
450 CHECK_EQ(size, 4u);
451 CHECK(scratch.IsNoRegister());
452 __ pushq(Address(src.AsX86_64().AsCpuRegister(), src_offset));
453 __ popq(Address(dest.AsX86_64().AsCpuRegister(), dest_offset));
454 }
455
Copy(FrameOffset dest,Offset dest_offset,FrameOffset src,Offset src_offset,ManagedRegister mscratch,size_t size)456 void X86_64JNIMacroAssembler::Copy(FrameOffset dest,
457 Offset dest_offset,
458 FrameOffset src,
459 Offset src_offset,
460 ManagedRegister mscratch,
461 size_t size) {
462 CpuRegister scratch = mscratch.AsX86_64().AsCpuRegister();
463 CHECK_EQ(size, 4u);
464 CHECK_EQ(dest.Int32Value(), src.Int32Value());
465 __ movq(scratch, Address(CpuRegister(RSP), src));
466 __ pushq(Address(scratch, src_offset));
467 __ popq(Address(scratch, dest_offset));
468 }
469
MemoryBarrier(ManagedRegister)470 void X86_64JNIMacroAssembler::MemoryBarrier(ManagedRegister) {
471 __ mfence();
472 }
473
CreateHandleScopeEntry(ManagedRegister mout_reg,FrameOffset handle_scope_offset,ManagedRegister min_reg,bool null_allowed)474 void X86_64JNIMacroAssembler::CreateHandleScopeEntry(ManagedRegister mout_reg,
475 FrameOffset handle_scope_offset,
476 ManagedRegister min_reg,
477 bool null_allowed) {
478 X86_64ManagedRegister out_reg = mout_reg.AsX86_64();
479 X86_64ManagedRegister in_reg = min_reg.AsX86_64();
480 if (in_reg.IsNoRegister()) { // TODO(64): && null_allowed
481 // Use out_reg as indicator of null.
482 in_reg = out_reg;
483 // TODO: movzwl
484 __ movl(in_reg.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
485 }
486 CHECK(in_reg.IsCpuRegister());
487 CHECK(out_reg.IsCpuRegister());
488 VerifyObject(in_reg, null_allowed);
489 if (null_allowed) {
490 Label null_arg;
491 if (!out_reg.Equals(in_reg)) {
492 __ xorl(out_reg.AsCpuRegister(), out_reg.AsCpuRegister());
493 }
494 __ testl(in_reg.AsCpuRegister(), in_reg.AsCpuRegister());
495 __ j(kZero, &null_arg);
496 __ leaq(out_reg.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
497 __ Bind(&null_arg);
498 } else {
499 __ leaq(out_reg.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
500 }
501 }
502
CreateHandleScopeEntry(FrameOffset out_off,FrameOffset handle_scope_offset,ManagedRegister mscratch,bool null_allowed)503 void X86_64JNIMacroAssembler::CreateHandleScopeEntry(FrameOffset out_off,
504 FrameOffset handle_scope_offset,
505 ManagedRegister mscratch,
506 bool null_allowed) {
507 X86_64ManagedRegister scratch = mscratch.AsX86_64();
508 CHECK(scratch.IsCpuRegister());
509 if (null_allowed) {
510 Label null_arg;
511 __ movl(scratch.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
512 __ testl(scratch.AsCpuRegister(), scratch.AsCpuRegister());
513 __ j(kZero, &null_arg);
514 __ leaq(scratch.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
515 __ Bind(&null_arg);
516 } else {
517 __ leaq(scratch.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
518 }
519 Store(out_off, scratch, 8);
520 }
521
522 // Given a handle scope entry, load the associated reference.
LoadReferenceFromHandleScope(ManagedRegister mout_reg,ManagedRegister min_reg)523 void X86_64JNIMacroAssembler::LoadReferenceFromHandleScope(ManagedRegister mout_reg,
524 ManagedRegister min_reg) {
525 X86_64ManagedRegister out_reg = mout_reg.AsX86_64();
526 X86_64ManagedRegister in_reg = min_reg.AsX86_64();
527 CHECK(out_reg.IsCpuRegister());
528 CHECK(in_reg.IsCpuRegister());
529 Label null_arg;
530 if (!out_reg.Equals(in_reg)) {
531 __ xorl(out_reg.AsCpuRegister(), out_reg.AsCpuRegister());
532 }
533 __ testl(in_reg.AsCpuRegister(), in_reg.AsCpuRegister());
534 __ j(kZero, &null_arg);
535 __ movq(out_reg.AsCpuRegister(), Address(in_reg.AsCpuRegister(), 0));
536 __ Bind(&null_arg);
537 }
538
VerifyObject(ManagedRegister,bool)539 void X86_64JNIMacroAssembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) {
540 // TODO: not validating references
541 }
542
VerifyObject(FrameOffset,bool)543 void X86_64JNIMacroAssembler::VerifyObject(FrameOffset /*src*/, bool /*could_be_null*/) {
544 // TODO: not validating references
545 }
546
Call(ManagedRegister mbase,Offset offset,ManagedRegister)547 void X86_64JNIMacroAssembler::Call(ManagedRegister mbase, Offset offset, ManagedRegister) {
548 X86_64ManagedRegister base = mbase.AsX86_64();
549 CHECK(base.IsCpuRegister());
550 __ call(Address(base.AsCpuRegister(), offset.Int32Value()));
551 // TODO: place reference map on call
552 }
553
Call(FrameOffset base,Offset offset,ManagedRegister mscratch)554 void X86_64JNIMacroAssembler::Call(FrameOffset base, Offset offset, ManagedRegister mscratch) {
555 CpuRegister scratch = mscratch.AsX86_64().AsCpuRegister();
556 __ movq(scratch, Address(CpuRegister(RSP), base));
557 __ call(Address(scratch, offset));
558 }
559
CallFromThread(ThreadOffset64 offset,ManagedRegister)560 void X86_64JNIMacroAssembler::CallFromThread(ThreadOffset64 offset, ManagedRegister /*mscratch*/) {
561 __ gs()->call(Address::Absolute(offset, true));
562 }
563
GetCurrentThread(ManagedRegister tr)564 void X86_64JNIMacroAssembler::GetCurrentThread(ManagedRegister tr) {
565 __ gs()->movq(tr.AsX86_64().AsCpuRegister(),
566 Address::Absolute(Thread::SelfOffset<kX86_64PointerSize>(), true));
567 }
568
GetCurrentThread(FrameOffset offset,ManagedRegister mscratch)569 void X86_64JNIMacroAssembler::GetCurrentThread(FrameOffset offset, ManagedRegister mscratch) {
570 X86_64ManagedRegister scratch = mscratch.AsX86_64();
571 __ gs()->movq(scratch.AsCpuRegister(),
572 Address::Absolute(Thread::SelfOffset<kX86_64PointerSize>(), true));
573 __ movq(Address(CpuRegister(RSP), offset), scratch.AsCpuRegister());
574 }
575
576 // Slowpath entered when Thread::Current()->_exception is non-null
577 class X86_64ExceptionSlowPath final : public SlowPath {
578 public:
X86_64ExceptionSlowPath(size_t stack_adjust)579 explicit X86_64ExceptionSlowPath(size_t stack_adjust) : stack_adjust_(stack_adjust) {}
580 void Emit(Assembler *sp_asm) override;
581 private:
582 const size_t stack_adjust_;
583 };
584
ExceptionPoll(ManagedRegister,size_t stack_adjust)585 void X86_64JNIMacroAssembler::ExceptionPoll(ManagedRegister /*scratch*/, size_t stack_adjust) {
586 X86_64ExceptionSlowPath* slow = new (__ GetAllocator()) X86_64ExceptionSlowPath(stack_adjust);
587 __ GetBuffer()->EnqueueSlowPath(slow);
588 __ gs()->cmpl(Address::Absolute(Thread::ExceptionOffset<kX86_64PointerSize>(), true),
589 Immediate(0));
590 __ j(kNotEqual, slow->Entry());
591 }
592
CreateLabel()593 std::unique_ptr<JNIMacroLabel> X86_64JNIMacroAssembler::CreateLabel() {
594 return std::unique_ptr<JNIMacroLabel>(new X86_64JNIMacroLabel());
595 }
596
Jump(JNIMacroLabel * label)597 void X86_64JNIMacroAssembler::Jump(JNIMacroLabel* label) {
598 CHECK(label != nullptr);
599 __ jmp(X86_64JNIMacroLabel::Cast(label)->AsX86_64());
600 }
601
Jump(JNIMacroLabel * label,JNIMacroUnaryCondition condition,ManagedRegister test)602 void X86_64JNIMacroAssembler::Jump(JNIMacroLabel* label,
603 JNIMacroUnaryCondition condition,
604 ManagedRegister test) {
605 CHECK(label != nullptr);
606
607 art::x86_64::Condition x86_64_cond;
608 switch (condition) {
609 case JNIMacroUnaryCondition::kZero:
610 x86_64_cond = art::x86_64::kZero;
611 break;
612 case JNIMacroUnaryCondition::kNotZero:
613 x86_64_cond = art::x86_64::kNotZero;
614 break;
615 default:
616 LOG(FATAL) << "Not implemented condition: " << static_cast<int>(condition);
617 UNREACHABLE();
618 }
619
620 // TEST reg, reg
621 // Jcc <Offset>
622 __ testq(test.AsX86_64().AsCpuRegister(), test.AsX86_64().AsCpuRegister());
623 __ j(x86_64_cond, X86_64JNIMacroLabel::Cast(label)->AsX86_64());
624 }
625
Bind(JNIMacroLabel * label)626 void X86_64JNIMacroAssembler::Bind(JNIMacroLabel* label) {
627 CHECK(label != nullptr);
628 __ Bind(X86_64JNIMacroLabel::Cast(label)->AsX86_64());
629 }
630
631 #undef __
632
Emit(Assembler * sasm)633 void X86_64ExceptionSlowPath::Emit(Assembler *sasm) {
634 X86_64Assembler* sp_asm = down_cast<X86_64Assembler*>(sasm);
635 #define __ sp_asm->
636 __ Bind(&entry_);
637 // Note: the return value is dead
638 if (stack_adjust_ != 0) { // Fix up the frame.
639 DecreaseFrameSizeImpl(stack_adjust_, sp_asm);
640 }
641 // Pass exception as argument in RDI
642 __ gs()->movq(CpuRegister(RDI),
643 Address::Absolute(Thread::ExceptionOffset<kX86_64PointerSize>(), true));
644 __ gs()->call(
645 Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64PointerSize, pDeliverException), true));
646 // this call should never return
647 __ int3();
648 #undef __
649 }
650
651 } // namespace x86_64
652 } // namespace art
653