1 /*
2 * x86 memory access helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "cpu.h"
21 #include "helper.h"
22
23 #if !defined(CONFIG_USER_ONLY)
24 #include "exec/softmmu_exec.h"
25 #endif /* !defined(CONFIG_USER_ONLY) */
26
27 /* broken thread support */
28
29 static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
30
helper_lock(void)31 void helper_lock(void)
32 {
33 spin_lock(&global_cpu_lock);
34 }
35
helper_unlock(void)36 void helper_unlock(void)
37 {
38 spin_unlock(&global_cpu_lock);
39 }
40
helper_cmpxchg8b(CPUX86State * env,target_ulong a0)41 void helper_cmpxchg8b(CPUX86State *env, target_ulong a0)
42 {
43 uint64_t d;
44 int eflags;
45
46 eflags = helper_cc_compute_all(env, CC_OP);
47 d = cpu_ldq_data(env, a0);
48 if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
49 cpu_stq_data(env, a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
50 eflags |= CC_Z;
51 } else {
52 /* always do the store */
53 cpu_stq_data(env, a0, d);
54 EDX = (uint32_t)(d >> 32);
55 EAX = (uint32_t)d;
56 eflags &= ~CC_Z;
57 }
58 CC_SRC = eflags;
59 }
60
61 #ifdef TARGET_X86_64
helper_cmpxchg16b(CPUX86State * env,target_ulong a0)62 void helper_cmpxchg16b(CPUX86State *env, target_ulong a0)
63 {
64 uint64_t d0, d1;
65 int eflags;
66
67 if ((a0 & 0xf) != 0)
68 raise_exception(env, EXCP0D_GPF);
69 eflags = helper_cc_compute_all(env, CC_OP);
70 d0 = cpu_ldq_data(env, a0);
71 d1 = cpu_ldq_data(env, a0 + 8);
72 if (d0 == EAX && d1 == EDX) {
73 cpu_stq_data(env, a0, EBX);
74 cpu_stq_data(env, a0 + 8, ECX);
75 eflags |= CC_Z;
76 } else {
77 /* always do the store */
78 cpu_stq_data(env, a0, d0);
79 cpu_stq_data(env, a0 + 8, d1);
80 EDX = d1;
81 EAX = d0;
82 eflags &= ~CC_Z;
83 }
84 CC_SRC = eflags;
85 }
86 #endif
87
helper_boundw(CPUX86State * env,target_ulong a0,int v)88 void helper_boundw(CPUX86State *env, target_ulong a0, int v)
89 {
90 int low, high;
91 low = cpu_ldsw_data(env, a0);
92 high = cpu_ldsw_data(env, a0 + 2);
93 v = (int16_t)v;
94 if (v < low || v > high) {
95 raise_exception(env, EXCP05_BOUND);
96 }
97 }
98
helper_boundl(CPUX86State * env,target_ulong a0,int v)99 void helper_boundl(CPUX86State *env, target_ulong a0, int v)
100 {
101 int low, high;
102 low = cpu_ldl_data(env, a0);
103 high = cpu_ldl_data(env, a0 + 4);
104 if (v < low || v > high) {
105 raise_exception(env, EXCP05_BOUND);
106 }
107 }
108
109 #if !defined(CONFIG_USER_ONLY)
110
111 #define MMUSUFFIX _mmu
112
113 #define SHIFT 0
114 #include "exec/softmmu_template.h"
115
116 #define SHIFT 1
117 #include "exec/softmmu_template.h"
118
119 #define SHIFT 2
120 #include "exec/softmmu_template.h"
121
122 #define SHIFT 3
123 #include "exec/softmmu_template.h"
124
125 #endif
126
127 #if !defined(CONFIG_USER_ONLY)
128 /* try to fill the TLB and return an exception if error. If retaddr is
129 NULL, it means that the function was called in C code (i.e. not
130 from generated code or from helper.c) */
131 /* XXX: fix it to restore all registers */
tlb_fill(CPUX86State * env,target_ulong addr,int is_write,int mmu_idx,uintptr_t retaddr)132 void tlb_fill(CPUX86State* env, target_ulong addr, int is_write, int mmu_idx,
133 uintptr_t retaddr)
134 {
135 int ret;
136
137 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx);
138 if (ret) {
139 if (retaddr) {
140 /* now we have a real cpu fault */
141 cpu_restore_state(env, retaddr);
142 }
143 raise_exception_err(env, env->exception_index, env->error_code);
144 }
145 }
146 #endif
147