1 /*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "jni_macro_assembler_x86_64.h"
18
19 #include "base/casts.h"
20 #include "base/memory_region.h"
21 #include "entrypoints/quick/quick_entrypoints.h"
22 #include "indirect_reference_table.h"
23 #include "lock_word.h"
24 #include "thread.h"
25
26 namespace art HIDDEN {
27 namespace x86_64 {
28
DWARFReg(Register reg)29 static dwarf::Reg DWARFReg(Register reg) {
30 return dwarf::Reg::X86_64Core(static_cast<int>(reg));
31 }
DWARFReg(FloatRegister reg)32 static dwarf::Reg DWARFReg(FloatRegister reg) {
33 return dwarf::Reg::X86_64Fp(static_cast<int>(reg));
34 }
35
36 constexpr size_t kFramePointerSize = 8;
37
38 static constexpr size_t kNativeStackAlignment = 16;
39 static_assert(kNativeStackAlignment == kStackAlignment);
40
GetScratchRegister()41 static inline CpuRegister GetScratchRegister() {
42 return CpuRegister(R11);
43 }
44
45 #define __ asm_.
46
BuildFrame(size_t frame_size,ManagedRegister method_reg,ArrayRef<const ManagedRegister> spill_regs)47 void X86_64JNIMacroAssembler::BuildFrame(size_t frame_size,
48 ManagedRegister method_reg,
49 ArrayRef<const ManagedRegister> spill_regs) {
50 DCHECK_EQ(CodeSize(), 0U); // Nothing emitted yet.
51 cfi().SetCurrentCFAOffset(8); // Return address on stack.
52 // Note: @CriticalNative tail call is not used (would have frame_size == kFramePointerSize).
53 if (method_reg.IsNoRegister()) {
54 CHECK_ALIGNED(frame_size, kNativeStackAlignment);
55 } else {
56 CHECK_ALIGNED(frame_size, kStackAlignment);
57 }
58 size_t gpr_count = 0u;
59 for (int i = spill_regs.size() - 1; i >= 0; --i) {
60 x86_64::X86_64ManagedRegister spill = spill_regs[i].AsX86_64();
61 if (spill.IsCpuRegister()) {
62 __ pushq(spill.AsCpuRegister());
63 gpr_count++;
64 cfi().AdjustCFAOffset(kFramePointerSize);
65 cfi().RelOffset(DWARFReg(spill.AsCpuRegister().AsRegister()), 0);
66 }
67 }
68 // return address then method on stack.
69 int64_t rest_of_frame = static_cast<int64_t>(frame_size)
70 - (gpr_count * kFramePointerSize)
71 - kFramePointerSize /*return address*/;
72 if (rest_of_frame != 0) {
73 __ subq(CpuRegister(RSP), Immediate(rest_of_frame));
74 cfi().AdjustCFAOffset(rest_of_frame);
75 }
76
77 // spill xmms
78 int64_t offset = rest_of_frame;
79 for (int i = spill_regs.size() - 1; i >= 0; --i) {
80 x86_64::X86_64ManagedRegister spill = spill_regs[i].AsX86_64();
81 if (spill.IsXmmRegister()) {
82 offset -= sizeof(double);
83 __ movsd(Address(CpuRegister(RSP), offset), spill.AsXmmRegister());
84 cfi().RelOffset(DWARFReg(spill.AsXmmRegister().AsFloatRegister()), offset);
85 }
86 }
87
88 static_assert(static_cast<size_t>(kX86_64PointerSize) == kFramePointerSize,
89 "Unexpected frame pointer size.");
90
91 if (method_reg.IsRegister()) {
92 __ movq(Address(CpuRegister(RSP), 0), method_reg.AsX86_64().AsCpuRegister());
93 }
94 }
95
RemoveFrame(size_t frame_size,ArrayRef<const ManagedRegister> spill_regs,bool may_suspend)96 void X86_64JNIMacroAssembler::RemoveFrame(size_t frame_size,
97 ArrayRef<const ManagedRegister> spill_regs,
98 [[maybe_unused]] bool may_suspend) {
99 CHECK_ALIGNED(frame_size, kNativeStackAlignment);
100 cfi().RememberState();
101 int gpr_count = 0;
102 // unspill xmms
103 int64_t offset = static_cast<int64_t>(frame_size)
104 - (spill_regs.size() * kFramePointerSize)
105 - kFramePointerSize;
106 for (size_t i = 0; i < spill_regs.size(); ++i) {
107 x86_64::X86_64ManagedRegister spill = spill_regs[i].AsX86_64();
108 if (spill.IsXmmRegister()) {
109 __ movsd(spill.AsXmmRegister(), Address(CpuRegister(RSP), offset));
110 cfi().Restore(DWARFReg(spill.AsXmmRegister().AsFloatRegister()));
111 offset += sizeof(double);
112 } else {
113 gpr_count++;
114 }
115 }
116 DCHECK_EQ(static_cast<size_t>(offset),
117 frame_size - (gpr_count * kFramePointerSize) - kFramePointerSize);
118 if (offset != 0) {
119 __ addq(CpuRegister(RSP), Immediate(offset));
120 cfi().AdjustCFAOffset(-offset);
121 }
122 for (size_t i = 0; i < spill_regs.size(); ++i) {
123 x86_64::X86_64ManagedRegister spill = spill_regs[i].AsX86_64();
124 if (spill.IsCpuRegister()) {
125 __ popq(spill.AsCpuRegister());
126 cfi().AdjustCFAOffset(-static_cast<int>(kFramePointerSize));
127 cfi().Restore(DWARFReg(spill.AsCpuRegister().AsRegister()));
128 }
129 }
130 __ ret();
131 // The CFI should be restored for any code that follows the exit block.
132 cfi().RestoreState();
133 cfi().DefCFAOffset(frame_size);
134 }
135
IncreaseFrameSize(size_t adjust)136 void X86_64JNIMacroAssembler::IncreaseFrameSize(size_t adjust) {
137 if (adjust != 0u) {
138 CHECK_ALIGNED(adjust, kNativeStackAlignment);
139 __ addq(CpuRegister(RSP), Immediate(-static_cast<int64_t>(adjust)));
140 cfi().AdjustCFAOffset(adjust);
141 }
142 }
143
DecreaseFrameSizeImpl(size_t adjust,X86_64Assembler * assembler)144 static void DecreaseFrameSizeImpl(size_t adjust, X86_64Assembler* assembler) {
145 if (adjust != 0u) {
146 CHECK_ALIGNED(adjust, kNativeStackAlignment);
147 assembler->addq(CpuRegister(RSP), Immediate(adjust));
148 assembler->cfi().AdjustCFAOffset(-adjust);
149 }
150 }
151
DecreaseFrameSize(size_t adjust)152 void X86_64JNIMacroAssembler::DecreaseFrameSize(size_t adjust) {
153 DecreaseFrameSizeImpl(adjust, &asm_);
154 }
155
CoreRegisterWithSize(ManagedRegister src,size_t size)156 ManagedRegister X86_64JNIMacroAssembler::CoreRegisterWithSize(ManagedRegister src, size_t size) {
157 DCHECK(src.AsX86_64().IsCpuRegister());
158 DCHECK(size == 4u || size == 8u) << size;
159 return src;
160 }
161
Store(FrameOffset offs,ManagedRegister msrc,size_t size)162 void X86_64JNIMacroAssembler::Store(FrameOffset offs, ManagedRegister msrc, size_t size) {
163 Store(X86_64ManagedRegister::FromCpuRegister(RSP), MemberOffset(offs.Int32Value()), msrc, size);
164 }
165
Store(ManagedRegister mbase,MemberOffset offs,ManagedRegister msrc,size_t size)166 void X86_64JNIMacroAssembler::Store(ManagedRegister mbase,
167 MemberOffset offs,
168 ManagedRegister msrc,
169 size_t size) {
170 X86_64ManagedRegister base = mbase.AsX86_64();
171 X86_64ManagedRegister src = msrc.AsX86_64();
172 if (src.IsNoRegister()) {
173 CHECK_EQ(0u, size);
174 } else if (src.IsCpuRegister()) {
175 if (size == 4) {
176 CHECK_EQ(4u, size);
177 __ movl(Address(base.AsCpuRegister(), offs), src.AsCpuRegister());
178 } else {
179 CHECK_EQ(8u, size);
180 __ movq(Address(base.AsCpuRegister(), offs), src.AsCpuRegister());
181 }
182 } else if (src.IsX87Register()) {
183 if (size == 4) {
184 __ fstps(Address(base.AsCpuRegister(), offs));
185 } else {
186 __ fstpl(Address(base.AsCpuRegister(), offs));
187 }
188 } else {
189 CHECK(src.IsXmmRegister());
190 if (size == 4) {
191 __ movss(Address(base.AsCpuRegister(), offs), src.AsXmmRegister());
192 } else {
193 __ movsd(Address(base.AsCpuRegister(), offs), src.AsXmmRegister());
194 }
195 }
196 }
197
StoreRawPtr(FrameOffset dest,ManagedRegister msrc)198 void X86_64JNIMacroAssembler::StoreRawPtr(FrameOffset dest, ManagedRegister msrc) {
199 X86_64ManagedRegister src = msrc.AsX86_64();
200 CHECK(src.IsCpuRegister());
201 __ movq(Address(CpuRegister(RSP), dest), src.AsCpuRegister());
202 }
203
StoreStackPointerToThread(ThreadOffset64 thr_offs,bool tag_sp)204 void X86_64JNIMacroAssembler::StoreStackPointerToThread(ThreadOffset64 thr_offs, bool tag_sp) {
205 if (tag_sp) {
206 CpuRegister reg = GetScratchRegister();
207 __ movq(reg, CpuRegister(RSP));
208 __ orq(reg, Immediate(0x2));
209 __ gs()->movq(Address::Absolute(thr_offs, true), reg);
210 } else {
211 __ gs()->movq(Address::Absolute(thr_offs, true), CpuRegister(RSP));
212 }
213 }
214
Load(ManagedRegister mdest,FrameOffset src,size_t size)215 void X86_64JNIMacroAssembler::Load(ManagedRegister mdest, FrameOffset src, size_t size) {
216 Load(mdest, X86_64ManagedRegister::FromCpuRegister(RSP), MemberOffset(src.Int32Value()), size);
217 }
218
Load(ManagedRegister mdest,ManagedRegister mbase,MemberOffset offs,size_t size)219 void X86_64JNIMacroAssembler::Load(ManagedRegister mdest,
220 ManagedRegister mbase,
221 MemberOffset offs,
222 size_t size) {
223 X86_64ManagedRegister dest = mdest.AsX86_64();
224 X86_64ManagedRegister base = mbase.AsX86_64();
225 if (dest.IsNoRegister()) {
226 CHECK_EQ(0u, size);
227 } else if (dest.IsCpuRegister()) {
228 if (size == 4) {
229 CHECK_EQ(4u, size);
230 __ movl(dest.AsCpuRegister(), Address(base.AsCpuRegister(), offs));
231 } else {
232 CHECK_EQ(8u, size);
233 __ movq(dest.AsCpuRegister(), Address(base.AsCpuRegister(), offs));
234 }
235 } else if (dest.IsX87Register()) {
236 if (size == 4) {
237 __ flds(Address(base.AsCpuRegister(), offs));
238 } else {
239 __ fldl(Address(base.AsCpuRegister(), offs));
240 }
241 } else {
242 CHECK(dest.IsXmmRegister());
243 if (size == 4) {
244 __ movss(dest.AsXmmRegister(), Address(base.AsCpuRegister(), offs));
245 } else {
246 __ movsd(dest.AsXmmRegister(), Address(base.AsCpuRegister(), offs));
247 }
248 }
249 }
250
LoadRawPtrFromThread(ManagedRegister mdest,ThreadOffset64 offs)251 void X86_64JNIMacroAssembler::LoadRawPtrFromThread(ManagedRegister mdest, ThreadOffset64 offs) {
252 X86_64ManagedRegister dest = mdest.AsX86_64();
253 CHECK(dest.IsCpuRegister());
254 __ gs()->movq(dest.AsCpuRegister(), Address::Absolute(offs, true));
255 }
256
SignExtend(ManagedRegister mreg,size_t size)257 void X86_64JNIMacroAssembler::SignExtend(ManagedRegister mreg, size_t size) {
258 X86_64ManagedRegister reg = mreg.AsX86_64();
259 CHECK(size == 1 || size == 2) << size;
260 CHECK(reg.IsCpuRegister()) << reg;
261 if (size == 1) {
262 __ movsxb(reg.AsCpuRegister(), reg.AsCpuRegister());
263 } else {
264 __ movsxw(reg.AsCpuRegister(), reg.AsCpuRegister());
265 }
266 }
267
ZeroExtend(ManagedRegister mreg,size_t size)268 void X86_64JNIMacroAssembler::ZeroExtend(ManagedRegister mreg, size_t size) {
269 X86_64ManagedRegister reg = mreg.AsX86_64();
270 CHECK(size == 1 || size == 2) << size;
271 CHECK(reg.IsCpuRegister()) << reg;
272 if (size == 1) {
273 __ movzxb(reg.AsCpuRegister(), reg.AsCpuRegister());
274 } else {
275 __ movzxw(reg.AsCpuRegister(), reg.AsCpuRegister());
276 }
277 }
278
MoveArguments(ArrayRef<ArgumentLocation> dests,ArrayRef<ArgumentLocation> srcs,ArrayRef<FrameOffset> refs)279 void X86_64JNIMacroAssembler::MoveArguments(ArrayRef<ArgumentLocation> dests,
280 ArrayRef<ArgumentLocation> srcs,
281 ArrayRef<FrameOffset> refs) {
282 size_t arg_count = dests.size();
283 DCHECK_EQ(arg_count, srcs.size());
284 DCHECK_EQ(arg_count, refs.size());
285
286 auto get_mask = [](ManagedRegister reg) -> uint32_t {
287 X86_64ManagedRegister x86_64_reg = reg.AsX86_64();
288 if (x86_64_reg.IsCpuRegister()) {
289 size_t cpu_reg_number = static_cast<size_t>(x86_64_reg.AsCpuRegister().AsRegister());
290 DCHECK_LT(cpu_reg_number, 16u);
291 return 1u << cpu_reg_number;
292 } else {
293 DCHECK(x86_64_reg.IsXmmRegister());
294 size_t xmm_reg_number = static_cast<size_t>(x86_64_reg.AsXmmRegister().AsFloatRegister());
295 DCHECK_LT(xmm_reg_number, 16u);
296 return (1u << 16u) << xmm_reg_number;
297 }
298 };
299
300 // Collect registers to move while storing/copying args to stack slots.
301 // Convert all register references and copied stack references to `jobject`.
302 uint32_t src_regs = 0u;
303 uint32_t dest_regs = 0u;
304 for (size_t i = 0; i != arg_count; ++i) {
305 const ArgumentLocation& src = srcs[i];
306 const ArgumentLocation& dest = dests[i];
307 const FrameOffset ref = refs[i];
308 if (ref != kInvalidReferenceOffset) {
309 DCHECK_EQ(src.GetSize(), kObjectReferenceSize);
310 DCHECK_EQ(dest.GetSize(), static_cast<size_t>(kX86_64PointerSize));
311 } else {
312 DCHECK_EQ(src.GetSize(), dest.GetSize());
313 }
314 if (src.IsRegister() && ref != kInvalidReferenceOffset) {
315 // Note: We can clobber `src` here as the register cannot hold more than one argument.
316 // This overload of `CreateJObject()` is currently implemented as "test and branch";
317 // if it was using a conditional move, it would be better to do this at move time.
318 CreateJObject(src.GetRegister(), ref, src.GetRegister(), /*null_allowed=*/ i != 0u);
319 }
320 if (dest.IsRegister()) {
321 // Note: X86_64ManagedRegister makes no distinction between 32-bit and 64-bit core
322 // registers, so the following `Equals()` can return `true` for references; the
323 // reference has already been converted to `jobject` above.
324 if (src.IsRegister() && src.GetRegister().Equals(dest.GetRegister())) {
325 // Nothing to do.
326 } else {
327 if (src.IsRegister()) {
328 src_regs |= get_mask(src.GetRegister());
329 }
330 dest_regs |= get_mask(dest.GetRegister());
331 }
332 } else {
333 if (src.IsRegister()) {
334 Store(dest.GetFrameOffset(), src.GetRegister(), dest.GetSize());
335 } else if (ref != kInvalidReferenceOffset) {
336 CreateJObject(dest.GetFrameOffset(), ref, /*null_allowed=*/ i != 0u);
337 } else {
338 Copy(dest.GetFrameOffset(), src.GetFrameOffset(), dest.GetSize());
339 }
340 }
341 }
342
343 // Fill destination registers. Convert loaded references to `jobject`.
344 // There should be no cycles, so this simple algorithm should make progress.
345 while (dest_regs != 0u) {
346 uint32_t old_dest_regs = dest_regs;
347 for (size_t i = 0; i != arg_count; ++i) {
348 const ArgumentLocation& src = srcs[i];
349 const ArgumentLocation& dest = dests[i];
350 const FrameOffset ref = refs[i];
351 if (!dest.IsRegister()) {
352 continue; // Stored in first loop above.
353 }
354 uint32_t dest_reg_mask = get_mask(dest.GetRegister());
355 if ((dest_reg_mask & dest_regs) == 0u) {
356 continue; // Equals source, or already filled in one of previous iterations.
357 }
358 if ((dest_reg_mask & src_regs) != 0u) {
359 continue; // Cannot clobber this register yet.
360 }
361 if (src.IsRegister()) {
362 Move(dest.GetRegister(), src.GetRegister(), dest.GetSize());
363 src_regs &= ~get_mask(src.GetRegister()); // Allow clobbering source register.
364 } else if (ref != kInvalidReferenceOffset) {
365 CreateJObject(
366 dest.GetRegister(), ref, ManagedRegister::NoRegister(), /*null_allowed=*/ i != 0u);
367 } else {
368 Load(dest.GetRegister(), src.GetFrameOffset(), dest.GetSize());
369 }
370 dest_regs &= ~get_mask(dest.GetRegister()); // Destination register was filled.
371 }
372 CHECK_NE(old_dest_regs, dest_regs);
373 DCHECK_EQ(0u, dest_regs & ~old_dest_regs);
374 }
375 }
376
Move(ManagedRegister mdest,ManagedRegister msrc,size_t size)377 void X86_64JNIMacroAssembler::Move(ManagedRegister mdest, ManagedRegister msrc, size_t size) {
378 DCHECK(!mdest.Equals(X86_64ManagedRegister::FromCpuRegister(GetScratchRegister().AsRegister())));
379 X86_64ManagedRegister dest = mdest.AsX86_64();
380 X86_64ManagedRegister src = msrc.AsX86_64();
381 if (!dest.Equals(src)) {
382 if (dest.IsCpuRegister() && src.IsCpuRegister()) {
383 __ movq(dest.AsCpuRegister(), src.AsCpuRegister());
384 } else if (src.IsX87Register() && dest.IsXmmRegister()) {
385 // Pass via stack and pop X87 register
386 __ subl(CpuRegister(RSP), Immediate(16));
387 if (size == 4) {
388 CHECK_EQ(src.AsX87Register(), ST0);
389 __ fstps(Address(CpuRegister(RSP), 0));
390 __ movss(dest.AsXmmRegister(), Address(CpuRegister(RSP), 0));
391 } else {
392 CHECK_EQ(src.AsX87Register(), ST0);
393 __ fstpl(Address(CpuRegister(RSP), 0));
394 __ movsd(dest.AsXmmRegister(), Address(CpuRegister(RSP), 0));
395 }
396 __ addq(CpuRegister(RSP), Immediate(16));
397 } else {
398 // TODO: x87, SSE
399 UNIMPLEMENTED(FATAL) << ": Move " << dest << ", " << src;
400 }
401 }
402 }
403
404
Move(ManagedRegister mdest,size_t value)405 void X86_64JNIMacroAssembler::Move(ManagedRegister mdest, size_t value) {
406 X86_64ManagedRegister dest = mdest.AsX86_64();
407 __ movq(dest.AsCpuRegister(), Immediate(value));
408 }
409
Copy(FrameOffset dest,FrameOffset src,size_t size)410 void X86_64JNIMacroAssembler::Copy(FrameOffset dest, FrameOffset src, size_t size) {
411 DCHECK(size == 4 || size == 8) << size;
412 CpuRegister scratch = GetScratchRegister();
413 if (size == 8) {
414 __ movq(scratch, Address(CpuRegister(RSP), src));
415 __ movq(Address(CpuRegister(RSP), dest), scratch);
416 } else {
417 __ movl(scratch, Address(CpuRegister(RSP), src));
418 __ movl(Address(CpuRegister(RSP), dest), scratch);
419 }
420 }
421
CreateJObject(ManagedRegister mout_reg,FrameOffset spilled_reference_offset,ManagedRegister min_reg,bool null_allowed)422 void X86_64JNIMacroAssembler::CreateJObject(ManagedRegister mout_reg,
423 FrameOffset spilled_reference_offset,
424 ManagedRegister min_reg,
425 bool null_allowed) {
426 X86_64ManagedRegister out_reg = mout_reg.AsX86_64();
427 X86_64ManagedRegister in_reg = min_reg.AsX86_64();
428 if (in_reg.IsNoRegister()) { // TODO(64): && null_allowed
429 // Use out_reg as indicator of null.
430 in_reg = out_reg;
431 // TODO: movzwl
432 __ movl(in_reg.AsCpuRegister(), Address(CpuRegister(RSP), spilled_reference_offset));
433 }
434 CHECK(in_reg.IsCpuRegister());
435 CHECK(out_reg.IsCpuRegister());
436 VerifyObject(in_reg, null_allowed);
437 if (null_allowed) {
438 Label null_arg;
439 if (!out_reg.Equals(in_reg)) {
440 __ xorl(out_reg.AsCpuRegister(), out_reg.AsCpuRegister());
441 }
442 __ testl(in_reg.AsCpuRegister(), in_reg.AsCpuRegister());
443 __ j(kZero, &null_arg);
444 __ leaq(out_reg.AsCpuRegister(), Address(CpuRegister(RSP), spilled_reference_offset));
445 __ Bind(&null_arg);
446 } else {
447 __ leaq(out_reg.AsCpuRegister(), Address(CpuRegister(RSP), spilled_reference_offset));
448 }
449 }
450
CreateJObject(FrameOffset out_off,FrameOffset spilled_reference_offset,bool null_allowed)451 void X86_64JNIMacroAssembler::CreateJObject(FrameOffset out_off,
452 FrameOffset spilled_reference_offset,
453 bool null_allowed) {
454 CpuRegister scratch = GetScratchRegister();
455 if (null_allowed) {
456 Label null_arg;
457 __ movl(scratch, Address(CpuRegister(RSP), spilled_reference_offset));
458 __ testl(scratch, scratch);
459 __ j(kZero, &null_arg);
460 __ leaq(scratch, Address(CpuRegister(RSP), spilled_reference_offset));
461 __ Bind(&null_arg);
462 } else {
463 __ leaq(scratch, Address(CpuRegister(RSP), spilled_reference_offset));
464 }
465 __ movq(Address(CpuRegister(RSP), out_off), scratch);
466 }
467
DecodeJNITransitionOrLocalJObject(ManagedRegister reg,JNIMacroLabel * slow_path,JNIMacroLabel * resume)468 void X86_64JNIMacroAssembler::DecodeJNITransitionOrLocalJObject(ManagedRegister reg,
469 JNIMacroLabel* slow_path,
470 JNIMacroLabel* resume) {
471 constexpr uint64_t kGlobalOrWeakGlobalMask = IndirectReferenceTable::GetGlobalOrWeakGlobalMask();
472 constexpr uint64_t kIndirectRefKindMask = IndirectReferenceTable::GetIndirectRefKindMask();
473 // TODO: Add `testq()` with `imm32` to assembler to avoid using 64-bit pointer as 32-bit value.
474 __ testl(reg.AsX86_64().AsCpuRegister(), Immediate(kGlobalOrWeakGlobalMask));
475 __ j(kNotZero, X86_64JNIMacroLabel::Cast(slow_path)->AsX86_64());
476 __ andq(reg.AsX86_64().AsCpuRegister(), Immediate(~kIndirectRefKindMask));
477 __ j(kZero, X86_64JNIMacroLabel::Cast(resume)->AsX86_64()); // Skip load for null.
478 __ movl(reg.AsX86_64().AsCpuRegister(), Address(reg.AsX86_64().AsCpuRegister(), /*disp=*/ 0));
479 }
480
VerifyObject(ManagedRegister,bool)481 void X86_64JNIMacroAssembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) {
482 // TODO: not validating references
483 }
484
VerifyObject(FrameOffset,bool)485 void X86_64JNIMacroAssembler::VerifyObject(FrameOffset /*src*/, bool /*could_be_null*/) {
486 // TODO: not validating references
487 }
488
Jump(ManagedRegister mbase,Offset offset)489 void X86_64JNIMacroAssembler::Jump(ManagedRegister mbase, Offset offset) {
490 X86_64ManagedRegister base = mbase.AsX86_64();
491 CHECK(base.IsCpuRegister());
492 __ jmp(Address(base.AsCpuRegister(), offset.Int32Value()));
493 }
494
Call(ManagedRegister mbase,Offset offset)495 void X86_64JNIMacroAssembler::Call(ManagedRegister mbase, Offset offset) {
496 X86_64ManagedRegister base = mbase.AsX86_64();
497 CHECK(base.IsCpuRegister());
498 __ call(Address(base.AsCpuRegister(), offset.Int32Value()));
499 // TODO: place reference map on call
500 }
501
CallFromThread(ThreadOffset64 offset)502 void X86_64JNIMacroAssembler::CallFromThread(ThreadOffset64 offset) {
503 __ gs()->call(Address::Absolute(offset, true));
504 }
505
GetCurrentThread(ManagedRegister dest)506 void X86_64JNIMacroAssembler::GetCurrentThread(ManagedRegister dest) {
507 __ gs()->movq(dest.AsX86_64().AsCpuRegister(),
508 Address::Absolute(Thread::SelfOffset<kX86_64PointerSize>(), true));
509 }
510
GetCurrentThread(FrameOffset offset)511 void X86_64JNIMacroAssembler::GetCurrentThread(FrameOffset offset) {
512 CpuRegister scratch = GetScratchRegister();
513 __ gs()->movq(scratch, Address::Absolute(Thread::SelfOffset<kX86_64PointerSize>(), true));
514 __ movq(Address(CpuRegister(RSP), offset), scratch);
515 }
516
TryToTransitionFromRunnableToNative(JNIMacroLabel * label,ArrayRef<const ManagedRegister> scratch_regs)517 void X86_64JNIMacroAssembler::TryToTransitionFromRunnableToNative(
518 JNIMacroLabel* label, [[maybe_unused]] ArrayRef<const ManagedRegister> scratch_regs) {
519 constexpr uint32_t kNativeStateValue = Thread::StoredThreadStateValue(ThreadState::kNative);
520 constexpr uint32_t kRunnableStateValue = Thread::StoredThreadStateValue(ThreadState::kRunnable);
521 constexpr ThreadOffset64 thread_flags_offset = Thread::ThreadFlagsOffset<kX86_64PointerSize>();
522 constexpr ThreadOffset64 thread_held_mutex_mutator_lock_offset =
523 Thread::HeldMutexOffset<kX86_64PointerSize>(kMutatorLock);
524
525 CpuRegister rax(RAX); // RAX can be freely clobbered. It does not hold any argument.
526 CpuRegister scratch = GetScratchRegister();
527
528 // CAS release, old_value = kRunnableStateValue, new_value = kNativeStateValue, no flags.
529 static_assert(kRunnableStateValue == 0u);
530 __ xorl(rax, rax);
531 __ movl(scratch, Immediate(kNativeStateValue));
532 __ gs()->LockCmpxchgl(Address::Absolute(thread_flags_offset.Uint32Value(), /*no_rip=*/ true),
533 scratch);
534 // LOCK CMPXCHG has full barrier semantics, so we don't need barriers here.
535 // If any flags are set, go to the slow path.
536 __ j(kNotZero, X86_64JNIMacroLabel::Cast(label)->AsX86_64());
537
538 // Clear `self->tlsPtr_.held_mutexes[kMutatorLock]`.
539 __ gs()->movq(
540 Address::Absolute(thread_held_mutex_mutator_lock_offset.Uint32Value(), /*no_rip=*/ true),
541 Immediate(0));
542 }
543
TryToTransitionFromNativeToRunnable(JNIMacroLabel * label,ArrayRef<const ManagedRegister> scratch_regs,ManagedRegister return_reg)544 void X86_64JNIMacroAssembler::TryToTransitionFromNativeToRunnable(
545 JNIMacroLabel* label,
546 ArrayRef<const ManagedRegister> scratch_regs,
547 ManagedRegister return_reg) {
548 constexpr uint32_t kNativeStateValue = Thread::StoredThreadStateValue(ThreadState::kNative);
549 constexpr uint32_t kRunnableStateValue = Thread::StoredThreadStateValue(ThreadState::kRunnable);
550 constexpr ThreadOffset64 thread_flags_offset = Thread::ThreadFlagsOffset<kX86_64PointerSize>();
551 constexpr ThreadOffset64 thread_held_mutex_mutator_lock_offset =
552 Thread::HeldMutexOffset<kX86_64PointerSize>(kMutatorLock);
553 constexpr ThreadOffset64 thread_mutator_lock_offset =
554 Thread::MutatorLockOffset<kX86_64PointerSize>();
555
556 DCHECK_GE(scratch_regs.size(), 2u);
557 DCHECK(!scratch_regs[0].AsX86_64().Overlaps(return_reg.AsX86_64()));
558 CpuRegister scratch = scratch_regs[0].AsX86_64().AsCpuRegister();
559 DCHECK(!scratch_regs[1].AsX86_64().Overlaps(return_reg.AsX86_64()));
560 CpuRegister saved_rax = scratch_regs[1].AsX86_64().AsCpuRegister();
561 CpuRegister rax(RAX);
562 bool preserve_rax = return_reg.AsX86_64().Overlaps(X86_64ManagedRegister::FromCpuRegister(RAX));
563
564 // CAS acquire, old_value = kNativeStateValue, new_value = kRunnableStateValue, no flags.
565 if (preserve_rax) {
566 __ movq(saved_rax, rax); // Save RAX.
567 }
568 __ movl(rax, Immediate(kNativeStateValue));
569 static_assert(kRunnableStateValue == 0u);
570 __ xorl(scratch, scratch);
571 __ gs()->LockCmpxchgl(Address::Absolute(thread_flags_offset.Uint32Value(), /*no_rip=*/ true),
572 scratch);
573 // LOCK CMPXCHG has full barrier semantics, so we don't need barriers here.
574 if (preserve_rax) {
575 __ movq(rax, saved_rax); // Restore RAX; MOV does not change flags.
576 }
577 // If any flags are set, or the state is not Native, go to the slow path.
578 // (While the thread can theoretically transition between different Suspended states,
579 // it would be very unexpected to see a state other than Native at this point.)
580 __ j(kNotZero, X86_64JNIMacroLabel::Cast(label)->AsX86_64());
581
582 // Set `self->tlsPtr_.held_mutexes[kMutatorLock]` to the mutator lock.
583 __ gs()->movq(scratch,
584 Address::Absolute(thread_mutator_lock_offset.Uint32Value(), /*no_rip=*/ true));
585 __ gs()->movq(
586 Address::Absolute(thread_held_mutex_mutator_lock_offset.Uint32Value(), /*no_rip=*/ true),
587 scratch);
588 }
589
SuspendCheck(JNIMacroLabel * label)590 void X86_64JNIMacroAssembler::SuspendCheck(JNIMacroLabel* label) {
591 __ gs()->testl(Address::Absolute(Thread::ThreadFlagsOffset<kX86_64PointerSize>(), true),
592 Immediate(Thread::SuspendOrCheckpointRequestFlags()));
593 __ j(kNotZero, X86_64JNIMacroLabel::Cast(label)->AsX86_64());
594 }
595
ExceptionPoll(JNIMacroLabel * label)596 void X86_64JNIMacroAssembler::ExceptionPoll(JNIMacroLabel* label) {
597 __ gs()->cmpl(Address::Absolute(Thread::ExceptionOffset<kX86_64PointerSize>(), true),
598 Immediate(0));
599 __ j(kNotEqual, X86_64JNIMacroLabel::Cast(label)->AsX86_64());
600 }
601
DeliverPendingException()602 void X86_64JNIMacroAssembler::DeliverPendingException() {
603 // Pass exception as argument in RDI
604 __ gs()->movq(CpuRegister(RDI),
605 Address::Absolute(Thread::ExceptionOffset<kX86_64PointerSize>(), true));
606 __ gs()->call(
607 Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64PointerSize, pDeliverException), true));
608 // this call should never return
609 __ int3();
610 }
611
CreateLabel()612 std::unique_ptr<JNIMacroLabel> X86_64JNIMacroAssembler::CreateLabel() {
613 return std::unique_ptr<JNIMacroLabel>(new (asm_.GetAllocator()) X86_64JNIMacroLabel());
614 }
615
Jump(JNIMacroLabel * label)616 void X86_64JNIMacroAssembler::Jump(JNIMacroLabel* label) {
617 CHECK(label != nullptr);
618 __ jmp(X86_64JNIMacroLabel::Cast(label)->AsX86_64());
619 }
620
UnaryConditionToX86_64Condition(JNIMacroUnaryCondition cond)621 static Condition UnaryConditionToX86_64Condition(JNIMacroUnaryCondition cond) {
622 switch (cond) {
623 case JNIMacroUnaryCondition::kZero:
624 return kZero;
625 case JNIMacroUnaryCondition::kNotZero:
626 return kNotZero;
627 }
628 }
629
TestGcMarking(JNIMacroLabel * label,JNIMacroUnaryCondition cond)630 void X86_64JNIMacroAssembler::TestGcMarking(JNIMacroLabel* label, JNIMacroUnaryCondition cond) {
631 CHECK(label != nullptr);
632
633 // CMP self->tls32_.is_gc_marking, 0
634 // Jcc <Offset>
635 DCHECK_EQ(Thread::IsGcMarkingSize(), 4u);
636 __ gs()->cmpl(Address::Absolute(Thread::IsGcMarkingOffset<kX86_64PointerSize>(), true),
637 Immediate(0));
638 __ j(UnaryConditionToX86_64Condition(cond), X86_64JNIMacroLabel::Cast(label)->AsX86_64());
639 }
640
TestMarkBit(ManagedRegister mref,JNIMacroLabel * label,JNIMacroUnaryCondition cond)641 void X86_64JNIMacroAssembler::TestMarkBit(ManagedRegister mref,
642 JNIMacroLabel* label,
643 JNIMacroUnaryCondition cond) {
644 DCHECK(kUseBakerReadBarrier);
645 CpuRegister ref = mref.AsX86_64().AsCpuRegister();
646 static_assert(LockWord::kMarkBitStateSize == 1u);
647 __ testl(Address(ref, mirror::Object::MonitorOffset().SizeValue()),
648 Immediate(LockWord::kMarkBitStateMaskShifted));
649 __ j(UnaryConditionToX86_64Condition(cond), X86_64JNIMacroLabel::Cast(label)->AsX86_64());
650 }
651
TestByteAndJumpIfNotZero(uintptr_t address,JNIMacroLabel * label)652 void X86_64JNIMacroAssembler::TestByteAndJumpIfNotZero(uintptr_t address, JNIMacroLabel* label) {
653 CpuRegister scratch = GetScratchRegister();
654 __ movq(scratch, Immediate(address));
655 __ cmpb(Address(scratch, 0), Immediate(0));
656 __ j(kNotZero, X86_64JNIMacroLabel::Cast(label)->AsX86_64());
657 }
658
Bind(JNIMacroLabel * label)659 void X86_64JNIMacroAssembler::Bind(JNIMacroLabel* label) {
660 CHECK(label != nullptr);
661 __ Bind(X86_64JNIMacroLabel::Cast(label)->AsX86_64());
662 }
663
664 #undef __
665
666 } // namespace x86_64
667 } // namespace art
668