• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "context_x86.h"
18 
19 #include "base/bit_utils.h"
20 #include "base/bit_utils_iterator.h"
21 #include "base/memory_tool.h"
22 #include "quick/quick_method_frame_info.h"
23 
24 namespace art {
25 namespace x86 {
26 
27 static constexpr uintptr_t gZero = 0;
28 
Reset()29 void X86Context::Reset() {
30   std::fill_n(gprs_, arraysize(gprs_), nullptr);
31   std::fill_n(fprs_, arraysize(fprs_), nullptr);
32   gprs_[ESP] = &esp_;
33   gprs_[EAX] = &arg0_;
34   // Initialize registers with easy to spot debug values.
35   esp_ = X86Context::kBadGprBase + ESP;
36   eip_ = X86Context::kBadGprBase + kNumberOfCpuRegisters;
37   arg0_ = 0;
38 }
39 
FillCalleeSaves(uint8_t * frame,const QuickMethodFrameInfo & frame_info)40 void X86Context::FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& frame_info) {
41   int spill_pos = 0;
42 
43   // Core registers come first, from the highest down to the lowest.
44   uint32_t core_regs =
45       frame_info.CoreSpillMask() & ~(static_cast<uint32_t>(-1) << kNumberOfCpuRegisters);
46   DCHECK_EQ(1, POPCOUNT(frame_info.CoreSpillMask() & ~core_regs));  // Return address spill.
47   for (uint32_t core_reg : HighToLowBits(core_regs)) {
48     gprs_[core_reg] = CalleeSaveAddress(frame, spill_pos, frame_info.FrameSizeInBytes());
49     ++spill_pos;
50   }
51   DCHECK_EQ(spill_pos, POPCOUNT(frame_info.CoreSpillMask()) - 1);
52 
53   // FP registers come second, from the highest down to the lowest.
54   uint32_t fp_regs = frame_info.FpSpillMask();
55   DCHECK_EQ(0u, fp_regs & (static_cast<uint32_t>(-1) << kNumberOfFloatRegisters));
56   for (uint32_t fp_reg : HighToLowBits(fp_regs)) {
57     // Two void* per XMM register.
58     fprs_[2 * fp_reg] = reinterpret_cast<uint32_t*>(
59         CalleeSaveAddress(frame, spill_pos + 1, frame_info.FrameSizeInBytes()));
60     fprs_[2 * fp_reg + 1] = reinterpret_cast<uint32_t*>(
61         CalleeSaveAddress(frame, spill_pos, frame_info.FrameSizeInBytes()));
62     spill_pos += 2;
63   }
64   DCHECK_EQ(spill_pos,
65             POPCOUNT(frame_info.CoreSpillMask()) - 1 + 2 * POPCOUNT(frame_info.FpSpillMask()));
66 }
67 
SmashCallerSaves()68 void X86Context::SmashCallerSaves() {
69   // This needs to be 0 because we want a null/zero return value.
70   gprs_[EAX] = const_cast<uintptr_t*>(&gZero);
71   gprs_[EDX] = const_cast<uintptr_t*>(&gZero);
72   gprs_[ECX] = nullptr;
73   gprs_[EBX] = nullptr;
74   memset(&fprs_[0], '\0', sizeof(fprs_));
75 }
76 
SetGPR(uint32_t reg,uintptr_t value)77 void X86Context::SetGPR(uint32_t reg, uintptr_t value) {
78   CHECK_LT(reg, static_cast<uint32_t>(kNumberOfCpuRegisters));
79   DCHECK(IsAccessibleGPR(reg));
80   CHECK_NE(gprs_[reg], &gZero);
81   *gprs_[reg] = value;
82 }
83 
SetFPR(uint32_t reg,uintptr_t value)84 void X86Context::SetFPR(uint32_t reg, uintptr_t value) {
85   CHECK_LT(reg, static_cast<uint32_t>(kNumberOfFloatRegisters));
86   DCHECK(IsAccessibleFPR(reg));
87   CHECK_NE(fprs_[reg], reinterpret_cast<const uint32_t*>(&gZero));
88   *fprs_[reg] = value;
89 }
90 
DoLongJump()91 void X86Context::DoLongJump() {
92 #if defined(__i386__)
93   // Array of GPR values, filled from the context backward for the long jump pop. We add a slot at
94   // the top for the stack pointer that doesn't get popped in a pop-all.
95   volatile uintptr_t gprs[kNumberOfCpuRegisters + 1];
96   for (size_t i = 0; i < kNumberOfCpuRegisters; ++i) {
97     gprs[kNumberOfCpuRegisters - i - 1] = gprs_[i] != nullptr ? *gprs_[i] : X86Context::kBadGprBase + i;
98   }
99   uint32_t fprs[kNumberOfFloatRegisters];
100   for (size_t i = 0; i < kNumberOfFloatRegisters; ++i) {
101     fprs[i] = fprs_[i] != nullptr ? *fprs_[i] : X86Context::kBadFprBase + i;
102   }
103   // We want to load the stack pointer one slot below so that the ret will pop eip.
104   uintptr_t esp = gprs[kNumberOfCpuRegisters - ESP - 1] - sizeof(intptr_t);
105   gprs[kNumberOfCpuRegisters] = esp;
106   *(reinterpret_cast<uintptr_t*>(esp)) = eip_;
107   MEMORY_TOOL_HANDLE_NO_RETURN;
108   __asm__ __volatile__(
109       "movl %1, %%ebx\n\t"          // Address base of FPRs.
110       "movsd 0(%%ebx), %%xmm0\n\t"  // Load up XMM0-XMM7.
111       "movsd 8(%%ebx), %%xmm1\n\t"
112       "movsd 16(%%ebx), %%xmm2\n\t"
113       "movsd 24(%%ebx), %%xmm3\n\t"
114       "movsd 32(%%ebx), %%xmm4\n\t"
115       "movsd 40(%%ebx), %%xmm5\n\t"
116       "movsd 48(%%ebx), %%xmm6\n\t"
117       "movsd 56(%%ebx), %%xmm7\n\t"
118       "movl %0, %%esp\n\t"  // ESP points to gprs.
119       "popal\n\t"           // Load all registers except ESP and EIP with values in gprs.
120       "popl %%esp\n\t"      // Load stack pointer.
121       "ret\n\t"             // From higher in the stack pop eip.
122       :  // output.
123       : "g"(&gprs[0]), "g"(&fprs[0]) // input.
124       :);  // clobber.
125 #else
126   UNIMPLEMENTED(FATAL);
127 #endif
128   UNREACHABLE();
129 }
130 
131 }  // namespace x86
132 }  // namespace art
133