1 /*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "context_x86.h"
18
19 #include "art_method-inl.h"
20 #include "base/bit_utils.h"
21 #include "quick/quick_method_frame_info.h"
22
23 namespace art {
24 namespace x86 {
25
26 static constexpr uintptr_t gZero = 0;
27
Reset()28 void X86Context::Reset() {
29 std::fill_n(gprs_, arraysize(gprs_), nullptr);
30 std::fill_n(fprs_, arraysize(fprs_), nullptr);
31 gprs_[ESP] = &esp_;
32 // Initialize registers with easy to spot debug values.
33 esp_ = X86Context::kBadGprBase + ESP;
34 eip_ = X86Context::kBadGprBase + kNumberOfCpuRegisters;
35 }
36
FillCalleeSaves(const StackVisitor & fr)37 void X86Context::FillCalleeSaves(const StackVisitor& fr) {
38 ArtMethod* method = fr.GetMethod();
39 const QuickMethodFrameInfo frame_info = method->GetQuickFrameInfo();
40 int spill_pos = 0;
41
42 // Core registers come first, from the highest down to the lowest.
43 uint32_t core_regs =
44 frame_info.CoreSpillMask() & ~(static_cast<uint32_t>(-1) << kNumberOfCpuRegisters);
45 DCHECK_EQ(1, POPCOUNT(frame_info.CoreSpillMask() & ~core_regs)); // Return address spill.
46 for (uint32_t core_reg : HighToLowBits(core_regs)) {
47 gprs_[core_reg] = fr.CalleeSaveAddress(spill_pos, frame_info.FrameSizeInBytes());
48 ++spill_pos;
49 }
50 DCHECK_EQ(spill_pos, POPCOUNT(frame_info.CoreSpillMask()) - 1);
51
52 // FP registers come second, from the highest down to the lowest.
53 uint32_t fp_regs = frame_info.FpSpillMask();
54 DCHECK_EQ(0u, fp_regs & (static_cast<uint32_t>(-1) << kNumberOfFloatRegisters));
55 for (uint32_t fp_reg : HighToLowBits(fp_regs)) {
56 // Two void* per XMM register.
57 fprs_[2 * fp_reg] = reinterpret_cast<uint32_t*>(
58 fr.CalleeSaveAddress(spill_pos + 1, frame_info.FrameSizeInBytes()));
59 fprs_[2 * fp_reg + 1] = reinterpret_cast<uint32_t*>(
60 fr.CalleeSaveAddress(spill_pos, frame_info.FrameSizeInBytes()));
61 spill_pos += 2;
62 }
63 DCHECK_EQ(spill_pos,
64 POPCOUNT(frame_info.CoreSpillMask()) - 1 + 2 * POPCOUNT(frame_info.FpSpillMask()));
65 }
66
SmashCallerSaves()67 void X86Context::SmashCallerSaves() {
68 // This needs to be 0 because we want a null/zero return value.
69 gprs_[EAX] = const_cast<uintptr_t*>(&gZero);
70 gprs_[EDX] = const_cast<uintptr_t*>(&gZero);
71 gprs_[ECX] = nullptr;
72 gprs_[EBX] = nullptr;
73 memset(&fprs_[0], '\0', sizeof(fprs_));
74 }
75
SetGPR(uint32_t reg,uintptr_t value)76 void X86Context::SetGPR(uint32_t reg, uintptr_t value) {
77 CHECK_LT(reg, static_cast<uint32_t>(kNumberOfCpuRegisters));
78 DCHECK(IsAccessibleGPR(reg));
79 CHECK_NE(gprs_[reg], &gZero);
80 *gprs_[reg] = value;
81 }
82
SetFPR(uint32_t reg,uintptr_t value)83 void X86Context::SetFPR(uint32_t reg, uintptr_t value) {
84 CHECK_LT(reg, static_cast<uint32_t>(kNumberOfFloatRegisters));
85 DCHECK(IsAccessibleFPR(reg));
86 CHECK_NE(fprs_[reg], reinterpret_cast<const uint32_t*>(&gZero));
87 *fprs_[reg] = value;
88 }
89
DoLongJump()90 void X86Context::DoLongJump() {
91 #if defined(__i386__)
92 // Array of GPR values, filled from the context backward for the long jump pop. We add a slot at
93 // the top for the stack pointer that doesn't get popped in a pop-all.
94 volatile uintptr_t gprs[kNumberOfCpuRegisters + 1];
95 for (size_t i = 0; i < kNumberOfCpuRegisters; ++i) {
96 gprs[kNumberOfCpuRegisters - i - 1] = gprs_[i] != nullptr ? *gprs_[i] : X86Context::kBadGprBase + i;
97 }
98 uint32_t fprs[kNumberOfFloatRegisters];
99 for (size_t i = 0; i < kNumberOfFloatRegisters; ++i) {
100 fprs[i] = fprs_[i] != nullptr ? *fprs_[i] : X86Context::kBadFprBase + i;
101 }
102 // We want to load the stack pointer one slot below so that the ret will pop eip.
103 uintptr_t esp = gprs[kNumberOfCpuRegisters - ESP - 1] - sizeof(intptr_t);
104 gprs[kNumberOfCpuRegisters] = esp;
105 *(reinterpret_cast<uintptr_t*>(esp)) = eip_;
106 __asm__ __volatile__(
107 "movl %1, %%ebx\n\t" // Address base of FPRs.
108 "movsd 0(%%ebx), %%xmm0\n\t" // Load up XMM0-XMM7.
109 "movsd 8(%%ebx), %%xmm1\n\t"
110 "movsd 16(%%ebx), %%xmm2\n\t"
111 "movsd 24(%%ebx), %%xmm3\n\t"
112 "movsd 32(%%ebx), %%xmm4\n\t"
113 "movsd 40(%%ebx), %%xmm5\n\t"
114 "movsd 48(%%ebx), %%xmm6\n\t"
115 "movsd 56(%%ebx), %%xmm7\n\t"
116 "movl %0, %%esp\n\t" // ESP points to gprs.
117 "popal\n\t" // Load all registers except ESP and EIP with values in gprs.
118 "popl %%esp\n\t" // Load stack pointer.
119 "ret\n\t" // From higher in the stack pop eip.
120 : // output.
121 : "g"(&gprs[0]), "g"(&fprs[0]) // input.
122 :); // clobber.
123 #else
124 UNIMPLEMENTED(FATAL);
125 #endif
126 UNREACHABLE();
127 }
128
129 } // namespace x86
130 } // namespace art
131