1 //===-- DNBArchImplX86_64.cpp -----------------------------------*- C++ -*-===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // Created by Greg Clayton on 6/25/07.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #if defined (__i386__) || defined (__x86_64__)
15
16 #include <sys/cdefs.h>
17 #include <sys/types.h>
18 #include <sys/sysctl.h>
19
20 #include "MacOSX/x86_64/DNBArchImplX86_64.h"
21 #include "DNBLog.h"
22 #include "MachThread.h"
23 #include "MachProcess.h"
24 #include <mach/mach.h>
25 #include <stdlib.h>
26
27 #if defined (LLDB_DEBUGSERVER_RELEASE) || defined (LLDB_DEBUGSERVER_DEBUG)
28 enum debugState {
29 debugStateUnknown,
30 debugStateOff,
31 debugStateOn
32 };
33
34 static debugState sFPUDebugState = debugStateUnknown;
35 static debugState sAVXForceState = debugStateUnknown;
36
DebugFPURegs()37 static bool DebugFPURegs ()
38 {
39 if (sFPUDebugState == debugStateUnknown)
40 {
41 if (getenv("DNB_DEBUG_FPU_REGS"))
42 sFPUDebugState = debugStateOn;
43 else
44 sFPUDebugState = debugStateOff;
45 }
46
47 return (sFPUDebugState == debugStateOn);
48 }
49
ForceAVXRegs()50 static bool ForceAVXRegs ()
51 {
52 if (sFPUDebugState == debugStateUnknown)
53 {
54 if (getenv("DNB_DEBUG_X86_FORCE_AVX_REGS"))
55 sAVXForceState = debugStateOn;
56 else
57 sAVXForceState = debugStateOff;
58 }
59
60 return (sAVXForceState == debugStateOn);
61 }
62
63 #define DEBUG_FPU_REGS (DebugFPURegs())
64 #define FORCE_AVX_REGS (ForceAVXRegs())
65 #else
66 #define DEBUG_FPU_REGS (0)
67 #define FORCE_AVX_REGS (0)
68 #endif
69
70
71 extern "C" bool
CPUHasAVX()72 CPUHasAVX()
73 {
74 enum AVXPresence
75 {
76 eAVXUnknown = -1,
77 eAVXNotPresent = 0,
78 eAVXPresent = 1
79 };
80
81 static AVXPresence g_has_avx = eAVXUnknown;
82 if (g_has_avx == eAVXUnknown)
83 {
84 g_has_avx = eAVXNotPresent;
85
86 // Only xnu-2020 or later has AVX support, any versions before
87 // this have a busted thread_get_state RPC where it would truncate
88 // the thread state buffer (<rdar://problem/10122874>). So we need to
89 // verify the kernel version number manually or disable AVX support.
90 int mib[2];
91 char buffer[1024];
92 size_t length = sizeof(buffer);
93 uint64_t xnu_version = 0;
94 mib[0] = CTL_KERN;
95 mib[1] = KERN_VERSION;
96 int err = ::sysctl(mib, 2, &buffer, &length, NULL, 0);
97 if (err == 0)
98 {
99 const char *xnu = strstr (buffer, "xnu-");
100 if (xnu)
101 {
102 const char *xnu_version_cstr = xnu + 4;
103 xnu_version = strtoull (xnu_version_cstr, NULL, 0);
104 if (xnu_version >= 2020 && xnu_version != ULLONG_MAX)
105 {
106 if (::HasAVX())
107 {
108 g_has_avx = eAVXPresent;
109 }
110 }
111 }
112 }
113 DNBLogThreadedIf (LOG_THREAD, "CPUHasAVX(): g_has_avx = %i (err = %i, errno = %i, xnu_version = %llu)", g_has_avx, err, errno, xnu_version);
114 }
115
116 return (g_has_avx == eAVXPresent);
117 }
118
119 uint64_t
GetPC(uint64_t failValue)120 DNBArchImplX86_64::GetPC(uint64_t failValue)
121 {
122 // Get program counter
123 if (GetGPRState(false) == KERN_SUCCESS)
124 return m_state.context.gpr.__rip;
125 return failValue;
126 }
127
128 kern_return_t
SetPC(uint64_t value)129 DNBArchImplX86_64::SetPC(uint64_t value)
130 {
131 // Get program counter
132 kern_return_t err = GetGPRState(false);
133 if (err == KERN_SUCCESS)
134 {
135 m_state.context.gpr.__rip = value;
136 err = SetGPRState();
137 }
138 return err == KERN_SUCCESS;
139 }
140
141 uint64_t
GetSP(uint64_t failValue)142 DNBArchImplX86_64::GetSP(uint64_t failValue)
143 {
144 // Get stack pointer
145 if (GetGPRState(false) == KERN_SUCCESS)
146 return m_state.context.gpr.__rsp;
147 return failValue;
148 }
149
150 // Uncomment the value below to verify the values in the debugger.
151 //#define DEBUG_GPR_VALUES 1 // DO NOT CHECK IN WITH THIS DEFINE ENABLED
152
153 kern_return_t
GetGPRState(bool force)154 DNBArchImplX86_64::GetGPRState(bool force)
155 {
156 if (force || m_state.GetError(e_regSetGPR, Read))
157 {
158 #if DEBUG_GPR_VALUES
159 m_state.context.gpr.__rax = ('a' << 8) + 'x';
160 m_state.context.gpr.__rbx = ('b' << 8) + 'x';
161 m_state.context.gpr.__rcx = ('c' << 8) + 'x';
162 m_state.context.gpr.__rdx = ('d' << 8) + 'x';
163 m_state.context.gpr.__rdi = ('d' << 8) + 'i';
164 m_state.context.gpr.__rsi = ('s' << 8) + 'i';
165 m_state.context.gpr.__rbp = ('b' << 8) + 'p';
166 m_state.context.gpr.__rsp = ('s' << 8) + 'p';
167 m_state.context.gpr.__r8 = ('r' << 8) + '8';
168 m_state.context.gpr.__r9 = ('r' << 8) + '9';
169 m_state.context.gpr.__r10 = ('r' << 8) + 'a';
170 m_state.context.gpr.__r11 = ('r' << 8) + 'b';
171 m_state.context.gpr.__r12 = ('r' << 8) + 'c';
172 m_state.context.gpr.__r13 = ('r' << 8) + 'd';
173 m_state.context.gpr.__r14 = ('r' << 8) + 'e';
174 m_state.context.gpr.__r15 = ('r' << 8) + 'f';
175 m_state.context.gpr.__rip = ('i' << 8) + 'p';
176 m_state.context.gpr.__rflags = ('f' << 8) + 'l';
177 m_state.context.gpr.__cs = ('c' << 8) + 's';
178 m_state.context.gpr.__fs = ('f' << 8) + 's';
179 m_state.context.gpr.__gs = ('g' << 8) + 's';
180 m_state.SetError(e_regSetGPR, Read, 0);
181 #else
182 mach_msg_type_number_t count = e_regSetWordSizeGPR;
183 m_state.SetError(e_regSetGPR, Read, ::thread_get_state(m_thread->MachPortNumber(), __x86_64_THREAD_STATE, (thread_state_t)&m_state.context.gpr, &count));
184 DNBLogThreadedIf (LOG_THREAD, "::thread_get_state (0x%4.4x, %u, &gpr, %u) => 0x%8.8x"
185 "\n\trax = %16.16llx rbx = %16.16llx rcx = %16.16llx rdx = %16.16llx"
186 "\n\trdi = %16.16llx rsi = %16.16llx rbp = %16.16llx rsp = %16.16llx"
187 "\n\t r8 = %16.16llx r9 = %16.16llx r10 = %16.16llx r11 = %16.16llx"
188 "\n\tr12 = %16.16llx r13 = %16.16llx r14 = %16.16llx r15 = %16.16llx"
189 "\n\trip = %16.16llx"
190 "\n\tflg = %16.16llx cs = %16.16llx fs = %16.16llx gs = %16.16llx",
191 m_thread->MachPortNumber(), x86_THREAD_STATE64, x86_THREAD_STATE64_COUNT,
192 m_state.GetError(e_regSetGPR, Read),
193 m_state.context.gpr.__rax,m_state.context.gpr.__rbx,m_state.context.gpr.__rcx,
194 m_state.context.gpr.__rdx,m_state.context.gpr.__rdi,m_state.context.gpr.__rsi,
195 m_state.context.gpr.__rbp,m_state.context.gpr.__rsp,m_state.context.gpr.__r8,
196 m_state.context.gpr.__r9, m_state.context.gpr.__r10,m_state.context.gpr.__r11,
197 m_state.context.gpr.__r12,m_state.context.gpr.__r13,m_state.context.gpr.__r14,
198 m_state.context.gpr.__r15,m_state.context.gpr.__rip,m_state.context.gpr.__rflags,
199 m_state.context.gpr.__cs,m_state.context.gpr.__fs, m_state.context.gpr.__gs);
200
201 // DNBLogThreadedIf (LOG_THREAD, "thread_get_state(0x%4.4x, %u, &gpr, %u) => 0x%8.8x"
202 // "\n\trax = %16.16llx"
203 // "\n\trbx = %16.16llx"
204 // "\n\trcx = %16.16llx"
205 // "\n\trdx = %16.16llx"
206 // "\n\trdi = %16.16llx"
207 // "\n\trsi = %16.16llx"
208 // "\n\trbp = %16.16llx"
209 // "\n\trsp = %16.16llx"
210 // "\n\t r8 = %16.16llx"
211 // "\n\t r9 = %16.16llx"
212 // "\n\tr10 = %16.16llx"
213 // "\n\tr11 = %16.16llx"
214 // "\n\tr12 = %16.16llx"
215 // "\n\tr13 = %16.16llx"
216 // "\n\tr14 = %16.16llx"
217 // "\n\tr15 = %16.16llx"
218 // "\n\trip = %16.16llx"
219 // "\n\tflg = %16.16llx"
220 // "\n\t cs = %16.16llx"
221 // "\n\t fs = %16.16llx"
222 // "\n\t gs = %16.16llx",
223 // m_thread->MachPortNumber(),
224 // x86_THREAD_STATE64,
225 // x86_THREAD_STATE64_COUNT,
226 // m_state.GetError(e_regSetGPR, Read),
227 // m_state.context.gpr.__rax,
228 // m_state.context.gpr.__rbx,
229 // m_state.context.gpr.__rcx,
230 // m_state.context.gpr.__rdx,
231 // m_state.context.gpr.__rdi,
232 // m_state.context.gpr.__rsi,
233 // m_state.context.gpr.__rbp,
234 // m_state.context.gpr.__rsp,
235 // m_state.context.gpr.__r8,
236 // m_state.context.gpr.__r9,
237 // m_state.context.gpr.__r10,
238 // m_state.context.gpr.__r11,
239 // m_state.context.gpr.__r12,
240 // m_state.context.gpr.__r13,
241 // m_state.context.gpr.__r14,
242 // m_state.context.gpr.__r15,
243 // m_state.context.gpr.__rip,
244 // m_state.context.gpr.__rflags,
245 // m_state.context.gpr.__cs,
246 // m_state.context.gpr.__fs,
247 // m_state.context.gpr.__gs);
248 #endif
249 }
250 return m_state.GetError(e_regSetGPR, Read);
251 }
252
253 // Uncomment the value below to verify the values in the debugger.
254 //#define DEBUG_FPU_REGS 1 // DO NOT CHECK IN WITH THIS DEFINE ENABLED
255
256 kern_return_t
GetFPUState(bool force)257 DNBArchImplX86_64::GetFPUState(bool force)
258 {
259 if (force || m_state.GetError(e_regSetFPU, Read))
260 {
261 if (DEBUG_FPU_REGS) {
262 if (CPUHasAVX() || FORCE_AVX_REGS)
263 {
264 m_state.context.fpu.avx.__fpu_reserved[0] = -1;
265 m_state.context.fpu.avx.__fpu_reserved[1] = -1;
266 *(uint16_t *)&(m_state.context.fpu.avx.__fpu_fcw) = 0x1234;
267 *(uint16_t *)&(m_state.context.fpu.avx.__fpu_fsw) = 0x5678;
268 m_state.context.fpu.avx.__fpu_ftw = 1;
269 m_state.context.fpu.avx.__fpu_rsrv1 = UINT8_MAX;
270 m_state.context.fpu.avx.__fpu_fop = 2;
271 m_state.context.fpu.avx.__fpu_ip = 3;
272 m_state.context.fpu.avx.__fpu_cs = 4;
273 m_state.context.fpu.avx.__fpu_rsrv2 = 5;
274 m_state.context.fpu.avx.__fpu_dp = 6;
275 m_state.context.fpu.avx.__fpu_ds = 7;
276 m_state.context.fpu.avx.__fpu_rsrv3 = UINT16_MAX;
277 m_state.context.fpu.avx.__fpu_mxcsr = 8;
278 m_state.context.fpu.avx.__fpu_mxcsrmask = 9;
279 int i;
280 for (i=0; i<16; ++i)
281 {
282 if (i<10)
283 {
284 m_state.context.fpu.avx.__fpu_stmm0.__mmst_reg[i] = 'a';
285 m_state.context.fpu.avx.__fpu_stmm1.__mmst_reg[i] = 'b';
286 m_state.context.fpu.avx.__fpu_stmm2.__mmst_reg[i] = 'c';
287 m_state.context.fpu.avx.__fpu_stmm3.__mmst_reg[i] = 'd';
288 m_state.context.fpu.avx.__fpu_stmm4.__mmst_reg[i] = 'e';
289 m_state.context.fpu.avx.__fpu_stmm5.__mmst_reg[i] = 'f';
290 m_state.context.fpu.avx.__fpu_stmm6.__mmst_reg[i] = 'g';
291 m_state.context.fpu.avx.__fpu_stmm7.__mmst_reg[i] = 'h';
292 }
293 else
294 {
295 m_state.context.fpu.avx.__fpu_stmm0.__mmst_reg[i] = INT8_MIN;
296 m_state.context.fpu.avx.__fpu_stmm1.__mmst_reg[i] = INT8_MIN;
297 m_state.context.fpu.avx.__fpu_stmm2.__mmst_reg[i] = INT8_MIN;
298 m_state.context.fpu.avx.__fpu_stmm3.__mmst_reg[i] = INT8_MIN;
299 m_state.context.fpu.avx.__fpu_stmm4.__mmst_reg[i] = INT8_MIN;
300 m_state.context.fpu.avx.__fpu_stmm5.__mmst_reg[i] = INT8_MIN;
301 m_state.context.fpu.avx.__fpu_stmm6.__mmst_reg[i] = INT8_MIN;
302 m_state.context.fpu.avx.__fpu_stmm7.__mmst_reg[i] = INT8_MIN;
303 }
304
305 m_state.context.fpu.avx.__fpu_xmm0.__xmm_reg[i] = '0';
306 m_state.context.fpu.avx.__fpu_xmm1.__xmm_reg[i] = '1';
307 m_state.context.fpu.avx.__fpu_xmm2.__xmm_reg[i] = '2';
308 m_state.context.fpu.avx.__fpu_xmm3.__xmm_reg[i] = '3';
309 m_state.context.fpu.avx.__fpu_xmm4.__xmm_reg[i] = '4';
310 m_state.context.fpu.avx.__fpu_xmm5.__xmm_reg[i] = '5';
311 m_state.context.fpu.avx.__fpu_xmm6.__xmm_reg[i] = '6';
312 m_state.context.fpu.avx.__fpu_xmm7.__xmm_reg[i] = '7';
313 m_state.context.fpu.avx.__fpu_xmm8.__xmm_reg[i] = '8';
314 m_state.context.fpu.avx.__fpu_xmm9.__xmm_reg[i] = '9';
315 m_state.context.fpu.avx.__fpu_xmm10.__xmm_reg[i] = 'A';
316 m_state.context.fpu.avx.__fpu_xmm11.__xmm_reg[i] = 'B';
317 m_state.context.fpu.avx.__fpu_xmm12.__xmm_reg[i] = 'C';
318 m_state.context.fpu.avx.__fpu_xmm13.__xmm_reg[i] = 'D';
319 m_state.context.fpu.avx.__fpu_xmm14.__xmm_reg[i] = 'E';
320 m_state.context.fpu.avx.__fpu_xmm15.__xmm_reg[i] = 'F';
321
322 m_state.context.fpu.avx.__fpu_ymmh0.__xmm_reg[i] = '0';
323 m_state.context.fpu.avx.__fpu_ymmh1.__xmm_reg[i] = '1';
324 m_state.context.fpu.avx.__fpu_ymmh2.__xmm_reg[i] = '2';
325 m_state.context.fpu.avx.__fpu_ymmh3.__xmm_reg[i] = '3';
326 m_state.context.fpu.avx.__fpu_ymmh4.__xmm_reg[i] = '4';
327 m_state.context.fpu.avx.__fpu_ymmh5.__xmm_reg[i] = '5';
328 m_state.context.fpu.avx.__fpu_ymmh6.__xmm_reg[i] = '6';
329 m_state.context.fpu.avx.__fpu_ymmh7.__xmm_reg[i] = '7';
330 m_state.context.fpu.avx.__fpu_ymmh8.__xmm_reg[i] = '8';
331 m_state.context.fpu.avx.__fpu_ymmh9.__xmm_reg[i] = '9';
332 m_state.context.fpu.avx.__fpu_ymmh10.__xmm_reg[i] = 'A';
333 m_state.context.fpu.avx.__fpu_ymmh11.__xmm_reg[i] = 'B';
334 m_state.context.fpu.avx.__fpu_ymmh12.__xmm_reg[i] = 'C';
335 m_state.context.fpu.avx.__fpu_ymmh13.__xmm_reg[i] = 'D';
336 m_state.context.fpu.avx.__fpu_ymmh14.__xmm_reg[i] = 'E';
337 m_state.context.fpu.avx.__fpu_ymmh15.__xmm_reg[i] = 'F';
338 }
339 for (i=0; i<sizeof(m_state.context.fpu.avx.__fpu_rsrv4); ++i)
340 m_state.context.fpu.avx.__fpu_rsrv4[i] = INT8_MIN;
341 m_state.context.fpu.avx.__fpu_reserved1 = -1;
342 for (i=0; i<sizeof(m_state.context.fpu.avx.__avx_reserved1); ++i)
343 m_state.context.fpu.avx.__avx_reserved1[i] = INT8_MIN;
344 m_state.SetError(e_regSetFPU, Read, 0);
345 }
346 else
347 {
348 m_state.context.fpu.no_avx.__fpu_reserved[0] = -1;
349 m_state.context.fpu.no_avx.__fpu_reserved[1] = -1;
350 *(uint16_t *)&(m_state.context.fpu.no_avx.__fpu_fcw) = 0x1234;
351 *(uint16_t *)&(m_state.context.fpu.no_avx.__fpu_fsw) = 0x5678;
352 m_state.context.fpu.no_avx.__fpu_ftw = 1;
353 m_state.context.fpu.no_avx.__fpu_rsrv1 = UINT8_MAX;
354 m_state.context.fpu.no_avx.__fpu_fop = 2;
355 m_state.context.fpu.no_avx.__fpu_ip = 3;
356 m_state.context.fpu.no_avx.__fpu_cs = 4;
357 m_state.context.fpu.no_avx.__fpu_rsrv2 = 5;
358 m_state.context.fpu.no_avx.__fpu_dp = 6;
359 m_state.context.fpu.no_avx.__fpu_ds = 7;
360 m_state.context.fpu.no_avx.__fpu_rsrv3 = UINT16_MAX;
361 m_state.context.fpu.no_avx.__fpu_mxcsr = 8;
362 m_state.context.fpu.no_avx.__fpu_mxcsrmask = 9;
363 int i;
364 for (i=0; i<16; ++i)
365 {
366 if (i<10)
367 {
368 m_state.context.fpu.no_avx.__fpu_stmm0.__mmst_reg[i] = 'a';
369 m_state.context.fpu.no_avx.__fpu_stmm1.__mmst_reg[i] = 'b';
370 m_state.context.fpu.no_avx.__fpu_stmm2.__mmst_reg[i] = 'c';
371 m_state.context.fpu.no_avx.__fpu_stmm3.__mmst_reg[i] = 'd';
372 m_state.context.fpu.no_avx.__fpu_stmm4.__mmst_reg[i] = 'e';
373 m_state.context.fpu.no_avx.__fpu_stmm5.__mmst_reg[i] = 'f';
374 m_state.context.fpu.no_avx.__fpu_stmm6.__mmst_reg[i] = 'g';
375 m_state.context.fpu.no_avx.__fpu_stmm7.__mmst_reg[i] = 'h';
376 }
377 else
378 {
379 m_state.context.fpu.no_avx.__fpu_stmm0.__mmst_reg[i] = INT8_MIN;
380 m_state.context.fpu.no_avx.__fpu_stmm1.__mmst_reg[i] = INT8_MIN;
381 m_state.context.fpu.no_avx.__fpu_stmm2.__mmst_reg[i] = INT8_MIN;
382 m_state.context.fpu.no_avx.__fpu_stmm3.__mmst_reg[i] = INT8_MIN;
383 m_state.context.fpu.no_avx.__fpu_stmm4.__mmst_reg[i] = INT8_MIN;
384 m_state.context.fpu.no_avx.__fpu_stmm5.__mmst_reg[i] = INT8_MIN;
385 m_state.context.fpu.no_avx.__fpu_stmm6.__mmst_reg[i] = INT8_MIN;
386 m_state.context.fpu.no_avx.__fpu_stmm7.__mmst_reg[i] = INT8_MIN;
387 }
388
389 m_state.context.fpu.no_avx.__fpu_xmm0.__xmm_reg[i] = '0';
390 m_state.context.fpu.no_avx.__fpu_xmm1.__xmm_reg[i] = '1';
391 m_state.context.fpu.no_avx.__fpu_xmm2.__xmm_reg[i] = '2';
392 m_state.context.fpu.no_avx.__fpu_xmm3.__xmm_reg[i] = '3';
393 m_state.context.fpu.no_avx.__fpu_xmm4.__xmm_reg[i] = '4';
394 m_state.context.fpu.no_avx.__fpu_xmm5.__xmm_reg[i] = '5';
395 m_state.context.fpu.no_avx.__fpu_xmm6.__xmm_reg[i] = '6';
396 m_state.context.fpu.no_avx.__fpu_xmm7.__xmm_reg[i] = '7';
397 m_state.context.fpu.no_avx.__fpu_xmm8.__xmm_reg[i] = '8';
398 m_state.context.fpu.no_avx.__fpu_xmm9.__xmm_reg[i] = '9';
399 m_state.context.fpu.no_avx.__fpu_xmm10.__xmm_reg[i] = 'A';
400 m_state.context.fpu.no_avx.__fpu_xmm11.__xmm_reg[i] = 'B';
401 m_state.context.fpu.no_avx.__fpu_xmm12.__xmm_reg[i] = 'C';
402 m_state.context.fpu.no_avx.__fpu_xmm13.__xmm_reg[i] = 'D';
403 m_state.context.fpu.no_avx.__fpu_xmm14.__xmm_reg[i] = 'E';
404 m_state.context.fpu.no_avx.__fpu_xmm15.__xmm_reg[i] = 'F';
405 }
406 for (i=0; i<sizeof(m_state.context.fpu.no_avx.__fpu_rsrv4); ++i)
407 m_state.context.fpu.no_avx.__fpu_rsrv4[i] = INT8_MIN;
408 m_state.context.fpu.no_avx.__fpu_reserved1 = -1;
409 m_state.SetError(e_regSetFPU, Read, 0);
410 }
411 }
412 else
413 {
414 if (CPUHasAVX() || FORCE_AVX_REGS)
415 {
416 mach_msg_type_number_t count = e_regSetWordSizeAVX;
417 m_state.SetError(e_regSetFPU, Read, ::thread_get_state(m_thread->MachPortNumber(), __x86_64_AVX_STATE, (thread_state_t)&m_state.context.fpu.avx, &count));
418 DNBLogThreadedIf (LOG_THREAD, "::thread_get_state (0x%4.4x, %u, &avx, %u (%u passed in) carp) => 0x%8.8x",
419 m_thread->MachPortNumber(), __x86_64_AVX_STATE, (uint32_t)count,
420 e_regSetWordSizeAVX, m_state.GetError(e_regSetFPU, Read));
421 }
422 else
423 {
424 mach_msg_type_number_t count = e_regSetWordSizeFPU;
425 m_state.SetError(e_regSetFPU, Read, ::thread_get_state(m_thread->MachPortNumber(), __x86_64_FLOAT_STATE, (thread_state_t)&m_state.context.fpu.no_avx, &count));
426 DNBLogThreadedIf (LOG_THREAD, "::thread_get_state (0x%4.4x, %u, &fpu, %u (%u passed in) => 0x%8.8x",
427 m_thread->MachPortNumber(), __x86_64_FLOAT_STATE, (uint32_t)count,
428 e_regSetWordSizeFPU, m_state.GetError(e_regSetFPU, Read));
429 }
430 }
431 }
432 return m_state.GetError(e_regSetFPU, Read);
433 }
434
435 kern_return_t
GetEXCState(bool force)436 DNBArchImplX86_64::GetEXCState(bool force)
437 {
438 if (force || m_state.GetError(e_regSetEXC, Read))
439 {
440 mach_msg_type_number_t count = e_regSetWordSizeEXC;
441 m_state.SetError(e_regSetEXC, Read, ::thread_get_state(m_thread->MachPortNumber(), __x86_64_EXCEPTION_STATE, (thread_state_t)&m_state.context.exc, &count));
442 }
443 return m_state.GetError(e_regSetEXC, Read);
444 }
445
446 kern_return_t
SetGPRState()447 DNBArchImplX86_64::SetGPRState()
448 {
449 kern_return_t kret = ::thread_abort_safely(m_thread->MachPortNumber());
450 DNBLogThreadedIf (LOG_THREAD, "thread = 0x%4.4x calling thread_abort_safely (tid) => %u (SetGPRState() for stop_count = %u)", m_thread->MachPortNumber(), kret, m_thread->Process()->StopCount());
451
452 m_state.SetError(e_regSetGPR, Write, ::thread_set_state(m_thread->MachPortNumber(), __x86_64_THREAD_STATE, (thread_state_t)&m_state.context.gpr, e_regSetWordSizeGPR));
453 DNBLogThreadedIf (LOG_THREAD, "::thread_set_state (0x%4.4x, %u, &gpr, %u) => 0x%8.8x"
454 "\n\trax = %16.16llx rbx = %16.16llx rcx = %16.16llx rdx = %16.16llx"
455 "\n\trdi = %16.16llx rsi = %16.16llx rbp = %16.16llx rsp = %16.16llx"
456 "\n\t r8 = %16.16llx r9 = %16.16llx r10 = %16.16llx r11 = %16.16llx"
457 "\n\tr12 = %16.16llx r13 = %16.16llx r14 = %16.16llx r15 = %16.16llx"
458 "\n\trip = %16.16llx"
459 "\n\tflg = %16.16llx cs = %16.16llx fs = %16.16llx gs = %16.16llx",
460 m_thread->MachPortNumber(), __x86_64_THREAD_STATE, e_regSetWordSizeGPR,
461 m_state.GetError(e_regSetGPR, Write),
462 m_state.context.gpr.__rax,m_state.context.gpr.__rbx,m_state.context.gpr.__rcx,
463 m_state.context.gpr.__rdx,m_state.context.gpr.__rdi,m_state.context.gpr.__rsi,
464 m_state.context.gpr.__rbp,m_state.context.gpr.__rsp,m_state.context.gpr.__r8,
465 m_state.context.gpr.__r9, m_state.context.gpr.__r10,m_state.context.gpr.__r11,
466 m_state.context.gpr.__r12,m_state.context.gpr.__r13,m_state.context.gpr.__r14,
467 m_state.context.gpr.__r15,m_state.context.gpr.__rip,m_state.context.gpr.__rflags,
468 m_state.context.gpr.__cs, m_state.context.gpr.__fs, m_state.context.gpr.__gs);
469 return m_state.GetError(e_regSetGPR, Write);
470 }
471
472 kern_return_t
SetFPUState()473 DNBArchImplX86_64::SetFPUState()
474 {
475 if (DEBUG_FPU_REGS)
476 {
477 m_state.SetError(e_regSetFPU, Write, 0);
478 return m_state.GetError(e_regSetFPU, Write);
479 }
480 else
481 {
482 if (CPUHasAVX() || FORCE_AVX_REGS)
483 {
484 m_state.SetError(e_regSetFPU, Write, ::thread_set_state(m_thread->MachPortNumber(), __x86_64_AVX_STATE, (thread_state_t)&m_state.context.fpu.avx, e_regSetWordSizeAVX));
485 return m_state.GetError(e_regSetFPU, Write);
486 }
487 else
488 {
489 m_state.SetError(e_regSetFPU, Write, ::thread_set_state(m_thread->MachPortNumber(), __x86_64_FLOAT_STATE, (thread_state_t)&m_state.context.fpu.no_avx, e_regSetWordSizeFPU));
490 return m_state.GetError(e_regSetFPU, Write);
491 }
492 }
493 }
494
495 kern_return_t
SetEXCState()496 DNBArchImplX86_64::SetEXCState()
497 {
498 m_state.SetError(e_regSetEXC, Write, ::thread_set_state(m_thread->MachPortNumber(), __x86_64_EXCEPTION_STATE, (thread_state_t)&m_state.context.exc, e_regSetWordSizeEXC));
499 return m_state.GetError(e_regSetEXC, Write);
500 }
501
502 kern_return_t
GetDBGState(bool force)503 DNBArchImplX86_64::GetDBGState(bool force)
504 {
505 if (force || m_state.GetError(e_regSetDBG, Read))
506 {
507 mach_msg_type_number_t count = e_regSetWordSizeDBG;
508 m_state.SetError(e_regSetDBG, Read, ::thread_get_state(m_thread->MachPortNumber(), __x86_64_DEBUG_STATE, (thread_state_t)&m_state.context.dbg, &count));
509 }
510 return m_state.GetError(e_regSetDBG, Read);
511 }
512
513 kern_return_t
SetDBGState(bool also_set_on_task)514 DNBArchImplX86_64::SetDBGState(bool also_set_on_task)
515 {
516 m_state.SetError(e_regSetDBG, Write, ::thread_set_state(m_thread->MachPortNumber(), __x86_64_DEBUG_STATE, (thread_state_t)&m_state.context.dbg, e_regSetWordSizeDBG));
517 if (also_set_on_task)
518 {
519 kern_return_t kret = ::task_set_state(m_thread->Process()->Task().TaskPort(), __x86_64_DEBUG_STATE, (thread_state_t)&m_state.context.dbg, e_regSetWordSizeDBG);
520 if (kret != KERN_SUCCESS)
521 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplX86_64::SetDBGState failed to set debug control register state: 0x%8.8x.", kret);
522 }
523 return m_state.GetError(e_regSetDBG, Write);
524 }
525
526 void
ThreadWillResume()527 DNBArchImplX86_64::ThreadWillResume()
528 {
529 // Do we need to step this thread? If so, let the mach thread tell us so.
530 if (m_thread->IsStepping())
531 {
532 // This is the primary thread, let the arch do anything it needs
533 EnableHardwareSingleStep(true);
534 }
535
536 // Reset the debug status register, if necessary, before we resume.
537 kern_return_t kret = GetDBGState(false);
538 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplX86_64::ThreadWillResume() GetDBGState() => 0x%8.8x.", kret);
539 if (kret != KERN_SUCCESS)
540 return;
541
542 DBG &debug_state = m_state.context.dbg;
543 bool need_reset = false;
544 uint32_t i, num = NumSupportedHardwareWatchpoints();
545 for (i = 0; i < num; ++i)
546 if (IsWatchpointHit(debug_state, i))
547 need_reset = true;
548
549 if (need_reset)
550 {
551 ClearWatchpointHits(debug_state);
552 kret = SetDBGState(false);
553 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplX86_64::ThreadWillResume() SetDBGState() => 0x%8.8x.", kret);
554 }
555 }
556
557 bool
ThreadDidStop()558 DNBArchImplX86_64::ThreadDidStop()
559 {
560 bool success = true;
561
562 m_state.InvalidateAllRegisterStates();
563
564 // Are we stepping a single instruction?
565 if (GetGPRState(true) == KERN_SUCCESS)
566 {
567 // We are single stepping, was this the primary thread?
568 if (m_thread->IsStepping())
569 {
570 // This was the primary thread, we need to clear the trace
571 // bit if so.
572 success = EnableHardwareSingleStep(false) == KERN_SUCCESS;
573 }
574 else
575 {
576 // The MachThread will automatically restore the suspend count
577 // in ThreadDidStop(), so we don't need to do anything here if
578 // we weren't the primary thread the last time
579 }
580 }
581 return success;
582 }
583
584 bool
NotifyException(MachException::Data & exc)585 DNBArchImplX86_64::NotifyException(MachException::Data& exc)
586 {
587 switch (exc.exc_type)
588 {
589 case EXC_BAD_ACCESS:
590 break;
591 case EXC_BAD_INSTRUCTION:
592 break;
593 case EXC_ARITHMETIC:
594 break;
595 case EXC_EMULATION:
596 break;
597 case EXC_SOFTWARE:
598 break;
599 case EXC_BREAKPOINT:
600 if (exc.exc_data.size() >= 2 && exc.exc_data[0] == 2)
601 {
602 // exc_code = EXC_I386_BPT
603 //
604 nub_addr_t pc = GetPC(INVALID_NUB_ADDRESS);
605 if (pc != INVALID_NUB_ADDRESS && pc > 0)
606 {
607 pc -= 1;
608 // Check for a breakpoint at one byte prior to the current PC value
609 // since the PC will be just past the trap.
610
611 DNBBreakpoint *bp = m_thread->Process()->Breakpoints().FindByAddress(pc);
612 if (bp)
613 {
614 // Backup the PC for i386 since the trap was taken and the PC
615 // is at the address following the single byte trap instruction.
616 if (m_state.context.gpr.__rip > 0)
617 {
618 m_state.context.gpr.__rip = pc;
619 // Write the new PC back out
620 SetGPRState ();
621 }
622 }
623 return true;
624 }
625 }
626 else if (exc.exc_data.size() >= 2 && exc.exc_data[0] == 1)
627 {
628 // exc_code = EXC_I386_SGL
629 //
630 // Check whether this corresponds to a watchpoint hit event.
631 // If yes, set the exc_sub_code to the data break address.
632 nub_addr_t addr = 0;
633 uint32_t hw_index = GetHardwareWatchpointHit(addr);
634 if (hw_index != INVALID_NUB_HW_INDEX)
635 {
636 exc.exc_data[1] = addr;
637 // Piggyback the hw_index in the exc.data.
638 exc.exc_data.push_back(hw_index);
639 }
640
641 return true;
642 }
643 break;
644 case EXC_SYSCALL:
645 break;
646 case EXC_MACH_SYSCALL:
647 break;
648 case EXC_RPC_ALERT:
649 break;
650 }
651 return false;
652 }
653
654 uint32_t
NumSupportedHardwareWatchpoints()655 DNBArchImplX86_64::NumSupportedHardwareWatchpoints()
656 {
657 // Available debug address registers: dr0, dr1, dr2, dr3.
658 return 4;
659 }
660
661 static uint32_t
size_and_rw_bits(nub_size_t size,bool read,bool write)662 size_and_rw_bits(nub_size_t size, bool read, bool write)
663 {
664 uint32_t rw;
665 if (read) {
666 rw = 0x3; // READ or READ/WRITE
667 } else if (write) {
668 rw = 0x1; // WRITE
669 } else {
670 assert(0 && "read and write cannot both be false");
671 }
672
673 switch (size) {
674 case 1:
675 return rw;
676 case 2:
677 return (0x1 << 2) | rw;
678 case 4:
679 return (0x3 << 2) | rw;
680 case 8:
681 return (0x2 << 2) | rw;
682 default:
683 assert(0 && "invalid size, must be one of 1, 2, 4, or 8");
684 }
685 }
686 void
SetWatchpoint(DBG & debug_state,uint32_t hw_index,nub_addr_t addr,nub_size_t size,bool read,bool write)687 DNBArchImplX86_64::SetWatchpoint(DBG &debug_state, uint32_t hw_index, nub_addr_t addr, nub_size_t size, bool read, bool write)
688 {
689 // Set both dr7 (debug control register) and dri (debug address register).
690
691 // dr7{7-0} encodes the local/gloabl enable bits:
692 // global enable --. .-- local enable
693 // | |
694 // v v
695 // dr0 -> bits{1-0}
696 // dr1 -> bits{3-2}
697 // dr2 -> bits{5-4}
698 // dr3 -> bits{7-6}
699 //
700 // dr7{31-16} encodes the rw/len bits:
701 // b_x+3, b_x+2, b_x+1, b_x
702 // where bits{x+1, x} => rw
703 // 0b00: execute, 0b01: write, 0b11: read-or-write, 0b10: io read-or-write (unused)
704 // and bits{x+3, x+2} => len
705 // 0b00: 1-byte, 0b01: 2-byte, 0b11: 4-byte, 0b10: 8-byte
706 //
707 // dr0 -> bits{19-16}
708 // dr1 -> bits{23-20}
709 // dr2 -> bits{27-24}
710 // dr3 -> bits{31-28}
711 debug_state.__dr7 |= (1 << (2*hw_index) |
712 size_and_rw_bits(size, read, write) << (16+4*hw_index));
713 switch (hw_index) {
714 case 0:
715 debug_state.__dr0 = addr; break;
716 case 1:
717 debug_state.__dr1 = addr; break;
718 case 2:
719 debug_state.__dr2 = addr; break;
720 case 3:
721 debug_state.__dr3 = addr; break;
722 default:
723 assert(0 && "invalid hardware register index, must be one of 0, 1, 2, or 3");
724 }
725 return;
726 }
727
728 void
ClearWatchpoint(DBG & debug_state,uint32_t hw_index)729 DNBArchImplX86_64::ClearWatchpoint(DBG &debug_state, uint32_t hw_index)
730 {
731 debug_state.__dr7 &= ~(3 << (2*hw_index));
732 switch (hw_index) {
733 case 0:
734 debug_state.__dr0 = 0; break;
735 case 1:
736 debug_state.__dr1 = 0; break;
737 case 2:
738 debug_state.__dr2 = 0; break;
739 case 3:
740 debug_state.__dr3 = 0; break;
741 default:
742 assert(0 && "invalid hardware register index, must be one of 0, 1, 2, or 3");
743 }
744 return;
745 }
746
747 bool
IsWatchpointVacant(const DBG & debug_state,uint32_t hw_index)748 DNBArchImplX86_64::IsWatchpointVacant(const DBG &debug_state, uint32_t hw_index)
749 {
750 // Check dr7 (debug control register) for local/global enable bits:
751 // global enable --. .-- local enable
752 // | |
753 // v v
754 // dr0 -> bits{1-0}
755 // dr1 -> bits{3-2}
756 // dr2 -> bits{5-4}
757 // dr3 -> bits{7-6}
758 return (debug_state.__dr7 & (3 << (2*hw_index))) == 0;
759 }
760
761 // Resets local copy of debug status register to wait for the next debug excpetion.
762 void
ClearWatchpointHits(DBG & debug_state)763 DNBArchImplX86_64::ClearWatchpointHits(DBG &debug_state)
764 {
765 // See also IsWatchpointHit().
766 debug_state.__dr6 = 0;
767 return;
768 }
769
770 bool
IsWatchpointHit(const DBG & debug_state,uint32_t hw_index)771 DNBArchImplX86_64::IsWatchpointHit(const DBG &debug_state, uint32_t hw_index)
772 {
773 // Check dr6 (debug status register) whether a watchpoint hits:
774 // is watchpoint hit?
775 // |
776 // v
777 // dr0 -> bits{0}
778 // dr1 -> bits{1}
779 // dr2 -> bits{2}
780 // dr3 -> bits{3}
781 return (debug_state.__dr6 & (1 << hw_index));
782 }
783
784 nub_addr_t
GetWatchAddress(const DBG & debug_state,uint32_t hw_index)785 DNBArchImplX86_64::GetWatchAddress(const DBG &debug_state, uint32_t hw_index)
786 {
787 switch (hw_index) {
788 case 0:
789 return debug_state.__dr0;
790 case 1:
791 return debug_state.__dr1;
792 case 2:
793 return debug_state.__dr2;
794 case 3:
795 return debug_state.__dr3;
796 default:
797 assert(0 && "invalid hardware register index, must be one of 0, 1, 2, or 3");
798 }
799 }
800
801 bool
StartTransForHWP()802 DNBArchImplX86_64::StartTransForHWP()
803 {
804 if (m_2pc_trans_state != Trans_Done && m_2pc_trans_state != Trans_Rolled_Back)
805 DNBLogError ("%s inconsistent state detected, expected %d or %d, got: %d", __FUNCTION__, Trans_Done, Trans_Rolled_Back, m_2pc_trans_state);
806 m_2pc_dbg_checkpoint = m_state.context.dbg;
807 m_2pc_trans_state = Trans_Pending;
808 return true;
809 }
810 bool
RollbackTransForHWP()811 DNBArchImplX86_64::RollbackTransForHWP()
812 {
813 m_state.context.dbg = m_2pc_dbg_checkpoint;
814 if (m_2pc_trans_state != Trans_Pending)
815 DNBLogError ("%s inconsistent state detected, expected %d, got: %d", __FUNCTION__, Trans_Pending, m_2pc_trans_state);
816 m_2pc_trans_state = Trans_Rolled_Back;
817 kern_return_t kret = SetDBGState(false);
818 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplX86_64::RollbackTransForHWP() SetDBGState() => 0x%8.8x.", kret);
819
820 if (kret == KERN_SUCCESS)
821 return true;
822 else
823 return false;
824 }
825 bool
FinishTransForHWP()826 DNBArchImplX86_64::FinishTransForHWP()
827 {
828 m_2pc_trans_state = Trans_Done;
829 return true;
830 }
831 DNBArchImplX86_64::DBG
GetDBGCheckpoint()832 DNBArchImplX86_64::GetDBGCheckpoint()
833 {
834 return m_2pc_dbg_checkpoint;
835 }
836
837 uint32_t
EnableHardwareWatchpoint(nub_addr_t addr,nub_size_t size,bool read,bool write,bool also_set_on_task)838 DNBArchImplX86_64::EnableHardwareWatchpoint (nub_addr_t addr, nub_size_t size, bool read, bool write, bool also_set_on_task)
839 {
840 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplX86_64::EnableHardwareWatchpoint(addr = 0x%llx, size = %llu, read = %u, write = %u)", (uint64_t)addr, (uint64_t)size, read, write);
841
842 const uint32_t num_hw_watchpoints = NumSupportedHardwareWatchpoints();
843
844 // Can only watch 1, 2, 4, or 8 bytes.
845 if (!(size == 1 || size == 2 || size == 4 || size == 8))
846 return INVALID_NUB_HW_INDEX;
847
848 // We must watch for either read or write
849 if (read == false && write == false)
850 return INVALID_NUB_HW_INDEX;
851
852 // Read the debug state
853 kern_return_t kret = GetDBGState(false);
854
855 if (kret == KERN_SUCCESS)
856 {
857 // Check to make sure we have the needed hardware support
858 uint32_t i = 0;
859
860 DBG &debug_state = m_state.context.dbg;
861 for (i = 0; i < num_hw_watchpoints; ++i)
862 {
863 if (IsWatchpointVacant(debug_state, i))
864 break;
865 }
866
867 // See if we found an available hw breakpoint slot above
868 if (i < num_hw_watchpoints)
869 {
870 StartTransForHWP();
871
872 // Modify our local copy of the debug state, first.
873 SetWatchpoint(debug_state, i, addr, size, read, write);
874 // Now set the watch point in the inferior.
875 kret = SetDBGState(also_set_on_task);
876 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplX86_64::EnableHardwareWatchpoint() SetDBGState() => 0x%8.8x.", kret);
877
878 if (kret == KERN_SUCCESS)
879 return i;
880 else // Revert to the previous debug state voluntarily. The transaction coordinator knows that we have failed.
881 m_state.context.dbg = GetDBGCheckpoint();
882 }
883 else
884 {
885 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplX86_64::EnableHardwareWatchpoint(): All hardware resources (%u) are in use.", num_hw_watchpoints);
886 }
887 }
888 return INVALID_NUB_HW_INDEX;
889 }
890
891 bool
DisableHardwareWatchpoint(uint32_t hw_index,bool also_set_on_task)892 DNBArchImplX86_64::DisableHardwareWatchpoint (uint32_t hw_index, bool also_set_on_task)
893 {
894 kern_return_t kret = GetDBGState(false);
895
896 const uint32_t num_hw_points = NumSupportedHardwareWatchpoints();
897 if (kret == KERN_SUCCESS)
898 {
899 DBG &debug_state = m_state.context.dbg;
900 if (hw_index < num_hw_points && !IsWatchpointVacant(debug_state, hw_index))
901 {
902 StartTransForHWP();
903
904 // Modify our local copy of the debug state, first.
905 ClearWatchpoint(debug_state, hw_index);
906 // Now disable the watch point in the inferior.
907 kret = SetDBGState(also_set_on_task);
908 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplX86_64::DisableHardwareWatchpoint( %u )",
909 hw_index);
910
911 if (kret == KERN_SUCCESS)
912 return true;
913 else // Revert to the previous debug state voluntarily. The transaction coordinator knows that we have failed.
914 m_state.context.dbg = GetDBGCheckpoint();
915 }
916 }
917 return false;
918 }
919
920 // Iterate through the debug status register; return the index of the first hit.
921 uint32_t
GetHardwareWatchpointHit(nub_addr_t & addr)922 DNBArchImplX86_64::GetHardwareWatchpointHit(nub_addr_t &addr)
923 {
924 // Read the debug state
925 kern_return_t kret = GetDBGState(true);
926 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplX86_64::GetHardwareWatchpointHit() GetDBGState() => 0x%8.8x.", kret);
927 if (kret == KERN_SUCCESS)
928 {
929 DBG &debug_state = m_state.context.dbg;
930 uint32_t i, num = NumSupportedHardwareWatchpoints();
931 for (i = 0; i < num; ++i)
932 {
933 if (IsWatchpointHit(debug_state, i))
934 {
935 addr = GetWatchAddress(debug_state, i);
936 DNBLogThreadedIf(LOG_WATCHPOINTS,
937 "DNBArchImplX86_64::GetHardwareWatchpointHit() found => %u (addr = 0x%llx).",
938 i,
939 (uint64_t)addr);
940 return i;
941 }
942 }
943 }
944 return INVALID_NUB_HW_INDEX;
945 }
946
947 // Set the single step bit in the processor status register.
948 kern_return_t
EnableHardwareSingleStep(bool enable)949 DNBArchImplX86_64::EnableHardwareSingleStep (bool enable)
950 {
951 if (GetGPRState(false) == KERN_SUCCESS)
952 {
953 const uint32_t trace_bit = 0x100u;
954 if (enable)
955 m_state.context.gpr.__rflags |= trace_bit;
956 else
957 m_state.context.gpr.__rflags &= ~trace_bit;
958 return SetGPRState();
959 }
960 return m_state.GetError(e_regSetGPR, Read);
961 }
962
963
964 //----------------------------------------------------------------------
965 // Register information defintions
966 //----------------------------------------------------------------------
967
968 enum
969 {
970 gpr_rax = 0,
971 gpr_rbx,
972 gpr_rcx,
973 gpr_rdx,
974 gpr_rdi,
975 gpr_rsi,
976 gpr_rbp,
977 gpr_rsp,
978 gpr_r8,
979 gpr_r9,
980 gpr_r10,
981 gpr_r11,
982 gpr_r12,
983 gpr_r13,
984 gpr_r14,
985 gpr_r15,
986 gpr_rip,
987 gpr_rflags,
988 gpr_cs,
989 gpr_fs,
990 gpr_gs,
991 gpr_eax,
992 gpr_ebx,
993 gpr_ecx,
994 gpr_edx,
995 gpr_edi,
996 gpr_esi,
997 gpr_ebp,
998 gpr_esp,
999 gpr_r8d, // Low 32 bits or r8
1000 gpr_r9d, // Low 32 bits or r9
1001 gpr_r10d, // Low 32 bits or r10
1002 gpr_r11d, // Low 32 bits or r11
1003 gpr_r12d, // Low 32 bits or r12
1004 gpr_r13d, // Low 32 bits or r13
1005 gpr_r14d, // Low 32 bits or r14
1006 gpr_r15d, // Low 32 bits or r15
1007 gpr_ax ,
1008 gpr_bx ,
1009 gpr_cx ,
1010 gpr_dx ,
1011 gpr_di ,
1012 gpr_si ,
1013 gpr_bp ,
1014 gpr_sp ,
1015 gpr_r8w, // Low 16 bits or r8
1016 gpr_r9w, // Low 16 bits or r9
1017 gpr_r10w, // Low 16 bits or r10
1018 gpr_r11w, // Low 16 bits or r11
1019 gpr_r12w, // Low 16 bits or r12
1020 gpr_r13w, // Low 16 bits or r13
1021 gpr_r14w, // Low 16 bits or r14
1022 gpr_r15w, // Low 16 bits or r15
1023 gpr_ah ,
1024 gpr_bh ,
1025 gpr_ch ,
1026 gpr_dh ,
1027 gpr_al ,
1028 gpr_bl ,
1029 gpr_cl ,
1030 gpr_dl ,
1031 gpr_dil,
1032 gpr_sil,
1033 gpr_bpl,
1034 gpr_spl,
1035 gpr_r8l, // Low 8 bits or r8
1036 gpr_r9l, // Low 8 bits or r9
1037 gpr_r10l, // Low 8 bits or r10
1038 gpr_r11l, // Low 8 bits or r11
1039 gpr_r12l, // Low 8 bits or r12
1040 gpr_r13l, // Low 8 bits or r13
1041 gpr_r14l, // Low 8 bits or r14
1042 gpr_r15l, // Low 8 bits or r15
1043 k_num_gpr_regs
1044 };
1045
1046 enum {
1047 fpu_fcw,
1048 fpu_fsw,
1049 fpu_ftw,
1050 fpu_fop,
1051 fpu_ip,
1052 fpu_cs,
1053 fpu_dp,
1054 fpu_ds,
1055 fpu_mxcsr,
1056 fpu_mxcsrmask,
1057 fpu_stmm0,
1058 fpu_stmm1,
1059 fpu_stmm2,
1060 fpu_stmm3,
1061 fpu_stmm4,
1062 fpu_stmm5,
1063 fpu_stmm6,
1064 fpu_stmm7,
1065 fpu_xmm0,
1066 fpu_xmm1,
1067 fpu_xmm2,
1068 fpu_xmm3,
1069 fpu_xmm4,
1070 fpu_xmm5,
1071 fpu_xmm6,
1072 fpu_xmm7,
1073 fpu_xmm8,
1074 fpu_xmm9,
1075 fpu_xmm10,
1076 fpu_xmm11,
1077 fpu_xmm12,
1078 fpu_xmm13,
1079 fpu_xmm14,
1080 fpu_xmm15,
1081 fpu_ymm0,
1082 fpu_ymm1,
1083 fpu_ymm2,
1084 fpu_ymm3,
1085 fpu_ymm4,
1086 fpu_ymm5,
1087 fpu_ymm6,
1088 fpu_ymm7,
1089 fpu_ymm8,
1090 fpu_ymm9,
1091 fpu_ymm10,
1092 fpu_ymm11,
1093 fpu_ymm12,
1094 fpu_ymm13,
1095 fpu_ymm14,
1096 fpu_ymm15,
1097 k_num_fpu_regs,
1098
1099 // Aliases
1100 fpu_fctrl = fpu_fcw,
1101 fpu_fstat = fpu_fsw,
1102 fpu_ftag = fpu_ftw,
1103 fpu_fiseg = fpu_cs,
1104 fpu_fioff = fpu_ip,
1105 fpu_foseg = fpu_ds,
1106 fpu_fooff = fpu_dp
1107 };
1108
1109 enum {
1110 exc_trapno,
1111 exc_err,
1112 exc_faultvaddr,
1113 k_num_exc_regs,
1114 };
1115
1116
1117 enum gcc_dwarf_regnums
1118 {
1119 gcc_dwarf_rax = 0,
1120 gcc_dwarf_rdx = 1,
1121 gcc_dwarf_rcx = 2,
1122 gcc_dwarf_rbx = 3,
1123 gcc_dwarf_rsi = 4,
1124 gcc_dwarf_rdi = 5,
1125 gcc_dwarf_rbp = 6,
1126 gcc_dwarf_rsp = 7,
1127 gcc_dwarf_r8,
1128 gcc_dwarf_r9,
1129 gcc_dwarf_r10,
1130 gcc_dwarf_r11,
1131 gcc_dwarf_r12,
1132 gcc_dwarf_r13,
1133 gcc_dwarf_r14,
1134 gcc_dwarf_r15,
1135 gcc_dwarf_rip,
1136 gcc_dwarf_xmm0,
1137 gcc_dwarf_xmm1,
1138 gcc_dwarf_xmm2,
1139 gcc_dwarf_xmm3,
1140 gcc_dwarf_xmm4,
1141 gcc_dwarf_xmm5,
1142 gcc_dwarf_xmm6,
1143 gcc_dwarf_xmm7,
1144 gcc_dwarf_xmm8,
1145 gcc_dwarf_xmm9,
1146 gcc_dwarf_xmm10,
1147 gcc_dwarf_xmm11,
1148 gcc_dwarf_xmm12,
1149 gcc_dwarf_xmm13,
1150 gcc_dwarf_xmm14,
1151 gcc_dwarf_xmm15,
1152 gcc_dwarf_stmm0,
1153 gcc_dwarf_stmm1,
1154 gcc_dwarf_stmm2,
1155 gcc_dwarf_stmm3,
1156 gcc_dwarf_stmm4,
1157 gcc_dwarf_stmm5,
1158 gcc_dwarf_stmm6,
1159 gcc_dwarf_stmm7,
1160 gcc_dwarf_ymm0 = gcc_dwarf_xmm0,
1161 gcc_dwarf_ymm1 = gcc_dwarf_xmm1,
1162 gcc_dwarf_ymm2 = gcc_dwarf_xmm2,
1163 gcc_dwarf_ymm3 = gcc_dwarf_xmm3,
1164 gcc_dwarf_ymm4 = gcc_dwarf_xmm4,
1165 gcc_dwarf_ymm5 = gcc_dwarf_xmm5,
1166 gcc_dwarf_ymm6 = gcc_dwarf_xmm6,
1167 gcc_dwarf_ymm7 = gcc_dwarf_xmm7,
1168 gcc_dwarf_ymm8 = gcc_dwarf_xmm8,
1169 gcc_dwarf_ymm9 = gcc_dwarf_xmm9,
1170 gcc_dwarf_ymm10 = gcc_dwarf_xmm10,
1171 gcc_dwarf_ymm11 = gcc_dwarf_xmm11,
1172 gcc_dwarf_ymm12 = gcc_dwarf_xmm12,
1173 gcc_dwarf_ymm13 = gcc_dwarf_xmm13,
1174 gcc_dwarf_ymm14 = gcc_dwarf_xmm14,
1175 gcc_dwarf_ymm15 = gcc_dwarf_xmm15
1176 };
1177
1178 enum gdb_regnums
1179 {
1180 gdb_rax = 0,
1181 gdb_rbx = 1,
1182 gdb_rcx = 2,
1183 gdb_rdx = 3,
1184 gdb_rsi = 4,
1185 gdb_rdi = 5,
1186 gdb_rbp = 6,
1187 gdb_rsp = 7,
1188 gdb_r8 = 8,
1189 gdb_r9 = 9,
1190 gdb_r10 = 10,
1191 gdb_r11 = 11,
1192 gdb_r12 = 12,
1193 gdb_r13 = 13,
1194 gdb_r14 = 14,
1195 gdb_r15 = 15,
1196 gdb_rip = 16,
1197 gdb_rflags = 17,
1198 gdb_cs = 18,
1199 gdb_ss = 19,
1200 gdb_ds = 20,
1201 gdb_es = 21,
1202 gdb_fs = 22,
1203 gdb_gs = 23,
1204 gdb_stmm0 = 24,
1205 gdb_stmm1 = 25,
1206 gdb_stmm2 = 26,
1207 gdb_stmm3 = 27,
1208 gdb_stmm4 = 28,
1209 gdb_stmm5 = 29,
1210 gdb_stmm6 = 30,
1211 gdb_stmm7 = 31,
1212 gdb_fctrl = 32, gdb_fcw = gdb_fctrl,
1213 gdb_fstat = 33, gdb_fsw = gdb_fstat,
1214 gdb_ftag = 34, gdb_ftw = gdb_ftag,
1215 gdb_fiseg = 35, gdb_fpu_cs = gdb_fiseg,
1216 gdb_fioff = 36, gdb_ip = gdb_fioff,
1217 gdb_foseg = 37, gdb_fpu_ds = gdb_foseg,
1218 gdb_fooff = 38, gdb_dp = gdb_fooff,
1219 gdb_fop = 39,
1220 gdb_xmm0 = 40,
1221 gdb_xmm1 = 41,
1222 gdb_xmm2 = 42,
1223 gdb_xmm3 = 43,
1224 gdb_xmm4 = 44,
1225 gdb_xmm5 = 45,
1226 gdb_xmm6 = 46,
1227 gdb_xmm7 = 47,
1228 gdb_xmm8 = 48,
1229 gdb_xmm9 = 49,
1230 gdb_xmm10 = 50,
1231 gdb_xmm11 = 51,
1232 gdb_xmm12 = 52,
1233 gdb_xmm13 = 53,
1234 gdb_xmm14 = 54,
1235 gdb_xmm15 = 55,
1236 gdb_mxcsr = 56,
1237 gdb_ymm0 = gdb_xmm0,
1238 gdb_ymm1 = gdb_xmm1,
1239 gdb_ymm2 = gdb_xmm2,
1240 gdb_ymm3 = gdb_xmm3,
1241 gdb_ymm4 = gdb_xmm4,
1242 gdb_ymm5 = gdb_xmm5,
1243 gdb_ymm6 = gdb_xmm6,
1244 gdb_ymm7 = gdb_xmm7,
1245 gdb_ymm8 = gdb_xmm8,
1246 gdb_ymm9 = gdb_xmm9,
1247 gdb_ymm10 = gdb_xmm10,
1248 gdb_ymm11 = gdb_xmm11,
1249 gdb_ymm12 = gdb_xmm12,
1250 gdb_ymm13 = gdb_xmm13,
1251 gdb_ymm14 = gdb_xmm14,
1252 gdb_ymm15 = gdb_xmm15
1253 };
1254
1255 #define GPR_OFFSET(reg) (offsetof (DNBArchImplX86_64::GPR, __##reg))
1256 #define FPU_OFFSET(reg) (offsetof (DNBArchImplX86_64::FPU, __fpu_##reg) + offsetof (DNBArchImplX86_64::Context, fpu.no_avx))
1257 #define AVX_OFFSET(reg) (offsetof (DNBArchImplX86_64::AVX, __fpu_##reg) + offsetof (DNBArchImplX86_64::Context, fpu.avx))
1258 #define EXC_OFFSET(reg) (offsetof (DNBArchImplX86_64::EXC, __##reg) + offsetof (DNBArchImplX86_64::Context, exc))
1259
1260 // This does not accurately identify the location of ymm0...7 in
1261 // Context.fpu.avx. That is because there is a bunch of padding
1262 // in Context.fpu.avx that we don't need. Offset macros lay out
1263 // the register state that Debugserver transmits to the debugger
1264 // -- not to interpret the thread_get_state info.
1265 #define AVX_OFFSET_YMM(n) (AVX_OFFSET(xmm7) + FPU_SIZE_XMM(xmm7) + (32 * n))
1266
1267 #define GPR_SIZE(reg) (sizeof(((DNBArchImplX86_64::GPR *)NULL)->__##reg))
1268 #define FPU_SIZE_UINT(reg) (sizeof(((DNBArchImplX86_64::FPU *)NULL)->__fpu_##reg))
1269 #define FPU_SIZE_MMST(reg) (sizeof(((DNBArchImplX86_64::FPU *)NULL)->__fpu_##reg.__mmst_reg))
1270 #define FPU_SIZE_XMM(reg) (sizeof(((DNBArchImplX86_64::FPU *)NULL)->__fpu_##reg.__xmm_reg))
1271 #define FPU_SIZE_YMM(reg) (32)
1272 #define EXC_SIZE(reg) (sizeof(((DNBArchImplX86_64::EXC *)NULL)->__##reg))
1273
1274 // These macros will auto define the register name, alt name, register size,
1275 // register offset, encoding, format and native register. This ensures that
1276 // the register state structures are defined correctly and have the correct
1277 // sizes and offsets.
1278 #define DEFINE_GPR(reg) { e_regSetGPR, gpr_##reg, #reg, NULL, Uint, Hex, GPR_SIZE(reg), GPR_OFFSET(reg), gcc_dwarf_##reg, gcc_dwarf_##reg, INVALID_NUB_REGNUM, gdb_##reg, NULL, g_invalidate_##reg }
1279 #define DEFINE_GPR_ALT(reg, alt, gen) { e_regSetGPR, gpr_##reg, #reg, alt, Uint, Hex, GPR_SIZE(reg), GPR_OFFSET(reg), gcc_dwarf_##reg, gcc_dwarf_##reg, gen, gdb_##reg, NULL, g_invalidate_##reg }
1280 #define DEFINE_GPR_ALT2(reg, alt) { e_regSetGPR, gpr_##reg, #reg, alt, Uint, Hex, GPR_SIZE(reg), GPR_OFFSET(reg), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, gdb_##reg, NULL, NULL }
1281 #define DEFINE_GPR_ALT3(reg, alt, gen) { e_regSetGPR, gpr_##reg, #reg, alt, Uint, Hex, GPR_SIZE(reg), GPR_OFFSET(reg), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, gen, gdb_##reg, NULL, NULL }
1282 #define DEFINE_GPR_ALT4(reg, alt, gen) { e_regSetGPR, gpr_##reg, #reg, alt, Uint, Hex, GPR_SIZE(reg), GPR_OFFSET(reg), gcc_dwarf_##reg, gcc_dwarf_##reg, gen, gdb_##reg, NULL, NULL }
1283
1284 #define DEFINE_GPR_PSEUDO_32(reg32,reg64) { e_regSetGPR, gpr_##reg32, #reg32, NULL, Uint, Hex, 4, GPR_OFFSET(reg64) ,INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, g_contained_##reg64, g_invalidate_##reg64 }
1285 #define DEFINE_GPR_PSEUDO_16(reg16,reg64) { e_regSetGPR, gpr_##reg16, #reg16, NULL, Uint, Hex, 2, GPR_OFFSET(reg64) ,INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, g_contained_##reg64, g_invalidate_##reg64 }
1286 #define DEFINE_GPR_PSEUDO_8H(reg8,reg64) { e_regSetGPR, gpr_##reg8 , #reg8 , NULL, Uint, Hex, 1, GPR_OFFSET(reg64)+1,INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, g_contained_##reg64, g_invalidate_##reg64 }
1287 #define DEFINE_GPR_PSEUDO_8L(reg8,reg64) { e_regSetGPR, gpr_##reg8 , #reg8 , NULL, Uint, Hex, 1, GPR_OFFSET(reg64) ,INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, g_contained_##reg64, g_invalidate_##reg64 }
1288
1289 // General purpose registers for 64 bit
1290
1291 uint32_t g_contained_rax[] = { gpr_rax, INVALID_NUB_REGNUM };
1292 uint32_t g_contained_rbx[] = { gpr_rbx, INVALID_NUB_REGNUM };
1293 uint32_t g_contained_rcx[] = { gpr_rcx, INVALID_NUB_REGNUM };
1294 uint32_t g_contained_rdx[] = { gpr_rdx, INVALID_NUB_REGNUM };
1295 uint32_t g_contained_rdi[] = { gpr_rdi, INVALID_NUB_REGNUM };
1296 uint32_t g_contained_rsi[] = { gpr_rsi, INVALID_NUB_REGNUM };
1297 uint32_t g_contained_rbp[] = { gpr_rbp, INVALID_NUB_REGNUM };
1298 uint32_t g_contained_rsp[] = { gpr_rsp, INVALID_NUB_REGNUM };
1299 uint32_t g_contained_r8[] = { gpr_r8 , INVALID_NUB_REGNUM };
1300 uint32_t g_contained_r9[] = { gpr_r9 , INVALID_NUB_REGNUM };
1301 uint32_t g_contained_r10[] = { gpr_r10, INVALID_NUB_REGNUM };
1302 uint32_t g_contained_r11[] = { gpr_r11, INVALID_NUB_REGNUM };
1303 uint32_t g_contained_r12[] = { gpr_r12, INVALID_NUB_REGNUM };
1304 uint32_t g_contained_r13[] = { gpr_r13, INVALID_NUB_REGNUM };
1305 uint32_t g_contained_r14[] = { gpr_r14, INVALID_NUB_REGNUM };
1306 uint32_t g_contained_r15[] = { gpr_r15, INVALID_NUB_REGNUM };
1307
1308 uint32_t g_invalidate_rax[] = { gpr_rax, gpr_eax , gpr_ax , gpr_ah , gpr_al, INVALID_NUB_REGNUM };
1309 uint32_t g_invalidate_rbx[] = { gpr_rbx, gpr_ebx , gpr_bx , gpr_bh , gpr_bl, INVALID_NUB_REGNUM };
1310 uint32_t g_invalidate_rcx[] = { gpr_rcx, gpr_ecx , gpr_cx , gpr_ch , gpr_cl, INVALID_NUB_REGNUM };
1311 uint32_t g_invalidate_rdx[] = { gpr_rdx, gpr_edx , gpr_dx , gpr_dh , gpr_dl, INVALID_NUB_REGNUM };
1312 uint32_t g_invalidate_rdi[] = { gpr_rdi, gpr_edi , gpr_di , gpr_dil , INVALID_NUB_REGNUM };
1313 uint32_t g_invalidate_rsi[] = { gpr_rsi, gpr_esi , gpr_si , gpr_sil , INVALID_NUB_REGNUM };
1314 uint32_t g_invalidate_rbp[] = { gpr_rbp, gpr_ebp , gpr_bp , gpr_bpl , INVALID_NUB_REGNUM };
1315 uint32_t g_invalidate_rsp[] = { gpr_rsp, gpr_esp , gpr_sp , gpr_spl , INVALID_NUB_REGNUM };
1316 uint32_t g_invalidate_r8 [] = { gpr_r8 , gpr_r8d , gpr_r8w , gpr_r8l , INVALID_NUB_REGNUM };
1317 uint32_t g_invalidate_r9 [] = { gpr_r9 , gpr_r9d , gpr_r9w , gpr_r9l , INVALID_NUB_REGNUM };
1318 uint32_t g_invalidate_r10[] = { gpr_r10, gpr_r10d, gpr_r10w, gpr_r10l, INVALID_NUB_REGNUM };
1319 uint32_t g_invalidate_r11[] = { gpr_r11, gpr_r11d, gpr_r11w, gpr_r11l, INVALID_NUB_REGNUM };
1320 uint32_t g_invalidate_r12[] = { gpr_r12, gpr_r12d, gpr_r12w, gpr_r12l, INVALID_NUB_REGNUM };
1321 uint32_t g_invalidate_r13[] = { gpr_r13, gpr_r13d, gpr_r13w, gpr_r13l, INVALID_NUB_REGNUM };
1322 uint32_t g_invalidate_r14[] = { gpr_r14, gpr_r14d, gpr_r14w, gpr_r14l, INVALID_NUB_REGNUM };
1323 uint32_t g_invalidate_r15[] = { gpr_r15, gpr_r15d, gpr_r15w, gpr_r15l, INVALID_NUB_REGNUM };
1324
1325 const DNBRegisterInfo
1326 DNBArchImplX86_64::g_gpr_registers[] =
1327 {
1328 DEFINE_GPR (rax),
1329 DEFINE_GPR (rbx),
1330 DEFINE_GPR_ALT (rcx , "arg4", GENERIC_REGNUM_ARG4),
1331 DEFINE_GPR_ALT (rdx , "arg3", GENERIC_REGNUM_ARG3),
1332 DEFINE_GPR_ALT (rdi , "arg1", GENERIC_REGNUM_ARG1),
1333 DEFINE_GPR_ALT (rsi , "arg2", GENERIC_REGNUM_ARG2),
1334 DEFINE_GPR_ALT (rbp , "fp" , GENERIC_REGNUM_FP),
1335 DEFINE_GPR_ALT (rsp , "sp" , GENERIC_REGNUM_SP),
1336 DEFINE_GPR_ALT (r8 , "arg5", GENERIC_REGNUM_ARG5),
1337 DEFINE_GPR_ALT (r9 , "arg6", GENERIC_REGNUM_ARG6),
1338 DEFINE_GPR (r10),
1339 DEFINE_GPR (r11),
1340 DEFINE_GPR (r12),
1341 DEFINE_GPR (r13),
1342 DEFINE_GPR (r14),
1343 DEFINE_GPR (r15),
1344 DEFINE_GPR_ALT4 (rip , "pc", GENERIC_REGNUM_PC),
1345 DEFINE_GPR_ALT3 (rflags, "flags", GENERIC_REGNUM_FLAGS),
1346 DEFINE_GPR_ALT2 (cs, NULL),
1347 DEFINE_GPR_ALT2 (fs, NULL),
1348 DEFINE_GPR_ALT2 (gs, NULL),
1349 DEFINE_GPR_PSEUDO_32 (eax, rax),
1350 DEFINE_GPR_PSEUDO_32 (ebx, rbx),
1351 DEFINE_GPR_PSEUDO_32 (ecx, rcx),
1352 DEFINE_GPR_PSEUDO_32 (edx, rdx),
1353 DEFINE_GPR_PSEUDO_32 (edi, rdi),
1354 DEFINE_GPR_PSEUDO_32 (esi, rsi),
1355 DEFINE_GPR_PSEUDO_32 (ebp, rbp),
1356 DEFINE_GPR_PSEUDO_32 (esp, rsp),
1357 DEFINE_GPR_PSEUDO_32 (r8d, r8),
1358 DEFINE_GPR_PSEUDO_32 (r9d, r9),
1359 DEFINE_GPR_PSEUDO_32 (r10d, r10),
1360 DEFINE_GPR_PSEUDO_32 (r11d, r11),
1361 DEFINE_GPR_PSEUDO_32 (r12d, r12),
1362 DEFINE_GPR_PSEUDO_32 (r13d, r13),
1363 DEFINE_GPR_PSEUDO_32 (r14d, r14),
1364 DEFINE_GPR_PSEUDO_32 (r15d, r15),
1365 DEFINE_GPR_PSEUDO_16 (ax , rax),
1366 DEFINE_GPR_PSEUDO_16 (bx , rbx),
1367 DEFINE_GPR_PSEUDO_16 (cx , rcx),
1368 DEFINE_GPR_PSEUDO_16 (dx , rdx),
1369 DEFINE_GPR_PSEUDO_16 (di , rdi),
1370 DEFINE_GPR_PSEUDO_16 (si , rsi),
1371 DEFINE_GPR_PSEUDO_16 (bp , rbp),
1372 DEFINE_GPR_PSEUDO_16 (sp , rsp),
1373 DEFINE_GPR_PSEUDO_16 (r8w, r8),
1374 DEFINE_GPR_PSEUDO_16 (r9w, r9),
1375 DEFINE_GPR_PSEUDO_16 (r10w, r10),
1376 DEFINE_GPR_PSEUDO_16 (r11w, r11),
1377 DEFINE_GPR_PSEUDO_16 (r12w, r12),
1378 DEFINE_GPR_PSEUDO_16 (r13w, r13),
1379 DEFINE_GPR_PSEUDO_16 (r14w, r14),
1380 DEFINE_GPR_PSEUDO_16 (r15w, r15),
1381 DEFINE_GPR_PSEUDO_8H (ah , rax),
1382 DEFINE_GPR_PSEUDO_8H (bh , rbx),
1383 DEFINE_GPR_PSEUDO_8H (ch , rcx),
1384 DEFINE_GPR_PSEUDO_8H (dh , rdx),
1385 DEFINE_GPR_PSEUDO_8L (al , rax),
1386 DEFINE_GPR_PSEUDO_8L (bl , rbx),
1387 DEFINE_GPR_PSEUDO_8L (cl , rcx),
1388 DEFINE_GPR_PSEUDO_8L (dl , rdx),
1389 DEFINE_GPR_PSEUDO_8L (dil, rdi),
1390 DEFINE_GPR_PSEUDO_8L (sil, rsi),
1391 DEFINE_GPR_PSEUDO_8L (bpl, rbp),
1392 DEFINE_GPR_PSEUDO_8L (spl, rsp),
1393 DEFINE_GPR_PSEUDO_8L (r8l, r8),
1394 DEFINE_GPR_PSEUDO_8L (r9l, r9),
1395 DEFINE_GPR_PSEUDO_8L (r10l, r10),
1396 DEFINE_GPR_PSEUDO_8L (r11l, r11),
1397 DEFINE_GPR_PSEUDO_8L (r12l, r12),
1398 DEFINE_GPR_PSEUDO_8L (r13l, r13),
1399 DEFINE_GPR_PSEUDO_8L (r14l, r14),
1400 DEFINE_GPR_PSEUDO_8L (r15l, r15)
1401 };
1402
1403 // Floating point registers 64 bit
1404 const DNBRegisterInfo
1405 DNBArchImplX86_64::g_fpu_registers_no_avx[] =
1406 {
1407 { e_regSetFPU, fpu_fcw , "fctrl" , NULL, Uint, Hex, FPU_SIZE_UINT(fcw) , FPU_OFFSET(fcw) , -1U, -1U, -1U, -1U, NULL, NULL },
1408 { e_regSetFPU, fpu_fsw , "fstat" , NULL, Uint, Hex, FPU_SIZE_UINT(fsw) , FPU_OFFSET(fsw) , -1U, -1U, -1U, -1U, NULL, NULL },
1409 { e_regSetFPU, fpu_ftw , "ftag" , NULL, Uint, Hex, FPU_SIZE_UINT(ftw) , FPU_OFFSET(ftw) , -1U, -1U, -1U, -1U, NULL, NULL },
1410 { e_regSetFPU, fpu_fop , "fop" , NULL, Uint, Hex, FPU_SIZE_UINT(fop) , FPU_OFFSET(fop) , -1U, -1U, -1U, -1U, NULL, NULL },
1411 { e_regSetFPU, fpu_ip , "fioff" , NULL, Uint, Hex, FPU_SIZE_UINT(ip) , FPU_OFFSET(ip) , -1U, -1U, -1U, -1U, NULL, NULL },
1412 { e_regSetFPU, fpu_cs , "fiseg" , NULL, Uint, Hex, FPU_SIZE_UINT(cs) , FPU_OFFSET(cs) , -1U, -1U, -1U, -1U, NULL, NULL },
1413 { e_regSetFPU, fpu_dp , "fooff" , NULL, Uint, Hex, FPU_SIZE_UINT(dp) , FPU_OFFSET(dp) , -1U, -1U, -1U, -1U, NULL, NULL },
1414 { e_regSetFPU, fpu_ds , "foseg" , NULL, Uint, Hex, FPU_SIZE_UINT(ds) , FPU_OFFSET(ds) , -1U, -1U, -1U, -1U, NULL, NULL },
1415 { e_regSetFPU, fpu_mxcsr , "mxcsr" , NULL, Uint, Hex, FPU_SIZE_UINT(mxcsr) , FPU_OFFSET(mxcsr) , -1U, -1U, -1U, -1U, NULL, NULL },
1416 { e_regSetFPU, fpu_mxcsrmask, "mxcsrmask" , NULL, Uint, Hex, FPU_SIZE_UINT(mxcsrmask) , FPU_OFFSET(mxcsrmask) , -1U, -1U, -1U, -1U, NULL, NULL },
1417
1418 { e_regSetFPU, fpu_stmm0, "stmm0", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm0), FPU_OFFSET(stmm0), gcc_dwarf_stmm0, gcc_dwarf_stmm0, -1U, gdb_stmm0, NULL, NULL },
1419 { e_regSetFPU, fpu_stmm1, "stmm1", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm1), FPU_OFFSET(stmm1), gcc_dwarf_stmm1, gcc_dwarf_stmm1, -1U, gdb_stmm1, NULL, NULL },
1420 { e_regSetFPU, fpu_stmm2, "stmm2", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm2), FPU_OFFSET(stmm2), gcc_dwarf_stmm2, gcc_dwarf_stmm2, -1U, gdb_stmm2, NULL, NULL },
1421 { e_regSetFPU, fpu_stmm3, "stmm3", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm3), FPU_OFFSET(stmm3), gcc_dwarf_stmm3, gcc_dwarf_stmm3, -1U, gdb_stmm3, NULL, NULL },
1422 { e_regSetFPU, fpu_stmm4, "stmm4", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm4), FPU_OFFSET(stmm4), gcc_dwarf_stmm4, gcc_dwarf_stmm4, -1U, gdb_stmm4, NULL, NULL },
1423 { e_regSetFPU, fpu_stmm5, "stmm5", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm5), FPU_OFFSET(stmm5), gcc_dwarf_stmm5, gcc_dwarf_stmm5, -1U, gdb_stmm5, NULL, NULL },
1424 { e_regSetFPU, fpu_stmm6, "stmm6", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm6), FPU_OFFSET(stmm6), gcc_dwarf_stmm6, gcc_dwarf_stmm6, -1U, gdb_stmm6, NULL, NULL },
1425 { e_regSetFPU, fpu_stmm7, "stmm7", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm7), FPU_OFFSET(stmm7), gcc_dwarf_stmm7, gcc_dwarf_stmm7, -1U, gdb_stmm7, NULL, NULL },
1426
1427 { e_regSetFPU, fpu_xmm0 , "xmm0" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm0) , FPU_OFFSET(xmm0) , gcc_dwarf_xmm0 , gcc_dwarf_xmm0 , -1U, gdb_xmm0 , NULL, NULL },
1428 { e_regSetFPU, fpu_xmm1 , "xmm1" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm1) , FPU_OFFSET(xmm1) , gcc_dwarf_xmm1 , gcc_dwarf_xmm1 , -1U, gdb_xmm1 , NULL, NULL },
1429 { e_regSetFPU, fpu_xmm2 , "xmm2" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm2) , FPU_OFFSET(xmm2) , gcc_dwarf_xmm2 , gcc_dwarf_xmm2 , -1U, gdb_xmm2 , NULL, NULL },
1430 { e_regSetFPU, fpu_xmm3 , "xmm3" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm3) , FPU_OFFSET(xmm3) , gcc_dwarf_xmm3 , gcc_dwarf_xmm3 , -1U, gdb_xmm3 , NULL, NULL },
1431 { e_regSetFPU, fpu_xmm4 , "xmm4" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm4) , FPU_OFFSET(xmm4) , gcc_dwarf_xmm4 , gcc_dwarf_xmm4 , -1U, gdb_xmm4 , NULL, NULL },
1432 { e_regSetFPU, fpu_xmm5 , "xmm5" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm5) , FPU_OFFSET(xmm5) , gcc_dwarf_xmm5 , gcc_dwarf_xmm5 , -1U, gdb_xmm5 , NULL, NULL },
1433 { e_regSetFPU, fpu_xmm6 , "xmm6" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm6) , FPU_OFFSET(xmm6) , gcc_dwarf_xmm6 , gcc_dwarf_xmm6 , -1U, gdb_xmm6 , NULL, NULL },
1434 { e_regSetFPU, fpu_xmm7 , "xmm7" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm7) , FPU_OFFSET(xmm7) , gcc_dwarf_xmm7 , gcc_dwarf_xmm7 , -1U, gdb_xmm7 , NULL, NULL },
1435 { e_regSetFPU, fpu_xmm8 , "xmm8" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm8) , FPU_OFFSET(xmm8) , gcc_dwarf_xmm8 , gcc_dwarf_xmm8 , -1U, gdb_xmm8 , NULL, NULL },
1436 { e_regSetFPU, fpu_xmm9 , "xmm9" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm9) , FPU_OFFSET(xmm9) , gcc_dwarf_xmm9 , gcc_dwarf_xmm9 , -1U, gdb_xmm9 , NULL, NULL },
1437 { e_regSetFPU, fpu_xmm10, "xmm10" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm10) , FPU_OFFSET(xmm10), gcc_dwarf_xmm10, gcc_dwarf_xmm10, -1U, gdb_xmm10, NULL, NULL },
1438 { e_regSetFPU, fpu_xmm11, "xmm11" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm11) , FPU_OFFSET(xmm11), gcc_dwarf_xmm11, gcc_dwarf_xmm11, -1U, gdb_xmm11, NULL, NULL },
1439 { e_regSetFPU, fpu_xmm12, "xmm12" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm12) , FPU_OFFSET(xmm12), gcc_dwarf_xmm12, gcc_dwarf_xmm12, -1U, gdb_xmm12, NULL, NULL },
1440 { e_regSetFPU, fpu_xmm13, "xmm13" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm13) , FPU_OFFSET(xmm13), gcc_dwarf_xmm13, gcc_dwarf_xmm13, -1U, gdb_xmm13, NULL, NULL },
1441 { e_regSetFPU, fpu_xmm14, "xmm14" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm14) , FPU_OFFSET(xmm14), gcc_dwarf_xmm14, gcc_dwarf_xmm14, -1U, gdb_xmm14, NULL, NULL },
1442 { e_regSetFPU, fpu_xmm15, "xmm15" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm15) , FPU_OFFSET(xmm15), gcc_dwarf_xmm15, gcc_dwarf_xmm15, -1U, gdb_xmm15, NULL, NULL },
1443 };
1444
1445 const DNBRegisterInfo
1446 DNBArchImplX86_64::g_fpu_registers_avx[] =
1447 {
1448 { e_regSetFPU, fpu_fcw , "fctrl" , NULL, Uint, Hex, FPU_SIZE_UINT(fcw) , AVX_OFFSET(fcw) , -1U, -1U, -1U, -1U, NULL, NULL },
1449 { e_regSetFPU, fpu_fsw , "fstat" , NULL, Uint, Hex, FPU_SIZE_UINT(fsw) , AVX_OFFSET(fsw) , -1U, -1U, -1U, -1U, NULL, NULL },
1450 { e_regSetFPU, fpu_ftw , "ftag" , NULL, Uint, Hex, FPU_SIZE_UINT(ftw) , AVX_OFFSET(ftw) , -1U, -1U, -1U, -1U, NULL, NULL },
1451 { e_regSetFPU, fpu_fop , "fop" , NULL, Uint, Hex, FPU_SIZE_UINT(fop) , AVX_OFFSET(fop) , -1U, -1U, -1U, -1U, NULL, NULL },
1452 { e_regSetFPU, fpu_ip , "fioff" , NULL, Uint, Hex, FPU_SIZE_UINT(ip) , AVX_OFFSET(ip) , -1U, -1U, -1U, -1U, NULL, NULL },
1453 { e_regSetFPU, fpu_cs , "fiseg" , NULL, Uint, Hex, FPU_SIZE_UINT(cs) , AVX_OFFSET(cs) , -1U, -1U, -1U, -1U, NULL, NULL },
1454 { e_regSetFPU, fpu_dp , "fooff" , NULL, Uint, Hex, FPU_SIZE_UINT(dp) , AVX_OFFSET(dp) , -1U, -1U, -1U, -1U, NULL, NULL },
1455 { e_regSetFPU, fpu_ds , "foseg" , NULL, Uint, Hex, FPU_SIZE_UINT(ds) , AVX_OFFSET(ds) , -1U, -1U, -1U, -1U, NULL, NULL },
1456 { e_regSetFPU, fpu_mxcsr , "mxcsr" , NULL, Uint, Hex, FPU_SIZE_UINT(mxcsr) , AVX_OFFSET(mxcsr) , -1U, -1U, -1U, -1U, NULL, NULL },
1457 { e_regSetFPU, fpu_mxcsrmask, "mxcsrmask" , NULL, Uint, Hex, FPU_SIZE_UINT(mxcsrmask) , AVX_OFFSET(mxcsrmask) , -1U, -1U, -1U, -1U, NULL, NULL },
1458
1459 { e_regSetFPU, fpu_stmm0, "stmm0", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm0), AVX_OFFSET(stmm0), gcc_dwarf_stmm0, gcc_dwarf_stmm0, -1U, gdb_stmm0, NULL, NULL },
1460 { e_regSetFPU, fpu_stmm1, "stmm1", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm1), AVX_OFFSET(stmm1), gcc_dwarf_stmm1, gcc_dwarf_stmm1, -1U, gdb_stmm1, NULL, NULL },
1461 { e_regSetFPU, fpu_stmm2, "stmm2", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm2), AVX_OFFSET(stmm2), gcc_dwarf_stmm2, gcc_dwarf_stmm2, -1U, gdb_stmm2, NULL, NULL },
1462 { e_regSetFPU, fpu_stmm3, "stmm3", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm3), AVX_OFFSET(stmm3), gcc_dwarf_stmm3, gcc_dwarf_stmm3, -1U, gdb_stmm3, NULL, NULL },
1463 { e_regSetFPU, fpu_stmm4, "stmm4", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm4), AVX_OFFSET(stmm4), gcc_dwarf_stmm4, gcc_dwarf_stmm4, -1U, gdb_stmm4, NULL, NULL },
1464 { e_regSetFPU, fpu_stmm5, "stmm5", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm5), AVX_OFFSET(stmm5), gcc_dwarf_stmm5, gcc_dwarf_stmm5, -1U, gdb_stmm5, NULL, NULL },
1465 { e_regSetFPU, fpu_stmm6, "stmm6", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm6), AVX_OFFSET(stmm6), gcc_dwarf_stmm6, gcc_dwarf_stmm6, -1U, gdb_stmm6, NULL, NULL },
1466 { e_regSetFPU, fpu_stmm7, "stmm7", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm7), AVX_OFFSET(stmm7), gcc_dwarf_stmm7, gcc_dwarf_stmm7, -1U, gdb_stmm7, NULL, NULL },
1467
1468 { e_regSetFPU, fpu_xmm0 , "xmm0" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm0) , AVX_OFFSET(xmm0) , gcc_dwarf_xmm0 , gcc_dwarf_xmm0 , -1U, gdb_xmm0 , NULL, NULL },
1469 { e_regSetFPU, fpu_xmm1 , "xmm1" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm1) , AVX_OFFSET(xmm1) , gcc_dwarf_xmm1 , gcc_dwarf_xmm1 , -1U, gdb_xmm1 , NULL, NULL },
1470 { e_regSetFPU, fpu_xmm2 , "xmm2" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm2) , AVX_OFFSET(xmm2) , gcc_dwarf_xmm2 , gcc_dwarf_xmm2 , -1U, gdb_xmm2 , NULL, NULL },
1471 { e_regSetFPU, fpu_xmm3 , "xmm3" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm3) , AVX_OFFSET(xmm3) , gcc_dwarf_xmm3 , gcc_dwarf_xmm3 , -1U, gdb_xmm3 , NULL, NULL },
1472 { e_regSetFPU, fpu_xmm4 , "xmm4" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm4) , AVX_OFFSET(xmm4) , gcc_dwarf_xmm4 , gcc_dwarf_xmm4 , -1U, gdb_xmm4 , NULL, NULL },
1473 { e_regSetFPU, fpu_xmm5 , "xmm5" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm5) , AVX_OFFSET(xmm5) , gcc_dwarf_xmm5 , gcc_dwarf_xmm5 , -1U, gdb_xmm5 , NULL, NULL },
1474 { e_regSetFPU, fpu_xmm6 , "xmm6" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm6) , AVX_OFFSET(xmm6) , gcc_dwarf_xmm6 , gcc_dwarf_xmm6 , -1U, gdb_xmm6 , NULL, NULL },
1475 { e_regSetFPU, fpu_xmm7 , "xmm7" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm7) , AVX_OFFSET(xmm7) , gcc_dwarf_xmm7 , gcc_dwarf_xmm7 , -1U, gdb_xmm7 , NULL, NULL },
1476 { e_regSetFPU, fpu_xmm8 , "xmm8" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm8) , AVX_OFFSET(xmm8) , gcc_dwarf_xmm8 , gcc_dwarf_xmm8 , -1U, gdb_xmm8 , NULL, NULL },
1477 { e_regSetFPU, fpu_xmm9 , "xmm9" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm9) , AVX_OFFSET(xmm9) , gcc_dwarf_xmm9 , gcc_dwarf_xmm9 , -1U, gdb_xmm9 , NULL, NULL },
1478 { e_regSetFPU, fpu_xmm10, "xmm10" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm10) , AVX_OFFSET(xmm10), gcc_dwarf_xmm10, gcc_dwarf_xmm10, -1U, gdb_xmm10, NULL, NULL },
1479 { e_regSetFPU, fpu_xmm11, "xmm11" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm11) , AVX_OFFSET(xmm11), gcc_dwarf_xmm11, gcc_dwarf_xmm11, -1U, gdb_xmm11, NULL, NULL },
1480 { e_regSetFPU, fpu_xmm12, "xmm12" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm12) , AVX_OFFSET(xmm12), gcc_dwarf_xmm12, gcc_dwarf_xmm12, -1U, gdb_xmm12, NULL, NULL },
1481 { e_regSetFPU, fpu_xmm13, "xmm13" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm13) , AVX_OFFSET(xmm13), gcc_dwarf_xmm13, gcc_dwarf_xmm13, -1U, gdb_xmm13, NULL, NULL },
1482 { e_regSetFPU, fpu_xmm14, "xmm14" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm14) , AVX_OFFSET(xmm14), gcc_dwarf_xmm14, gcc_dwarf_xmm14, -1U, gdb_xmm14, NULL, NULL },
1483 { e_regSetFPU, fpu_xmm15, "xmm15" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm15) , AVX_OFFSET(xmm15), gcc_dwarf_xmm15, gcc_dwarf_xmm15, -1U, gdb_xmm15, NULL, NULL },
1484
1485 { e_regSetFPU, fpu_ymm0 , "ymm0" , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm0) , AVX_OFFSET_YMM(0) , gcc_dwarf_ymm0 , gcc_dwarf_ymm0 , -1U, gdb_ymm0, NULL, NULL },
1486 { e_regSetFPU, fpu_ymm1 , "ymm1" , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm1) , AVX_OFFSET_YMM(1) , gcc_dwarf_ymm1 , gcc_dwarf_ymm1 , -1U, gdb_ymm1, NULL, NULL },
1487 { e_regSetFPU, fpu_ymm2 , "ymm2" , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm2) , AVX_OFFSET_YMM(2) , gcc_dwarf_ymm2 , gcc_dwarf_ymm2 , -1U, gdb_ymm2, NULL, NULL },
1488 { e_regSetFPU, fpu_ymm3 , "ymm3" , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm3) , AVX_OFFSET_YMM(3) , gcc_dwarf_ymm3 , gcc_dwarf_ymm3 , -1U, gdb_ymm3, NULL, NULL },
1489 { e_regSetFPU, fpu_ymm4 , "ymm4" , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm4) , AVX_OFFSET_YMM(4) , gcc_dwarf_ymm4 , gcc_dwarf_ymm4 , -1U, gdb_ymm4, NULL, NULL },
1490 { e_regSetFPU, fpu_ymm5 , "ymm5" , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm5) , AVX_OFFSET_YMM(5) , gcc_dwarf_ymm5 , gcc_dwarf_ymm5 , -1U, gdb_ymm5, NULL, NULL },
1491 { e_regSetFPU, fpu_ymm6 , "ymm6" , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm6) , AVX_OFFSET_YMM(6) , gcc_dwarf_ymm6 , gcc_dwarf_ymm6 , -1U, gdb_ymm6, NULL, NULL },
1492 { e_regSetFPU, fpu_ymm7 , "ymm7" , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm7) , AVX_OFFSET_YMM(7) , gcc_dwarf_ymm7 , gcc_dwarf_ymm7 , -1U, gdb_ymm7, NULL, NULL },
1493 { e_regSetFPU, fpu_ymm8 , "ymm8" , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm8) , AVX_OFFSET_YMM(8) , gcc_dwarf_ymm8 , gcc_dwarf_ymm8 , -1U, gdb_ymm8 , NULL, NULL },
1494 { e_regSetFPU, fpu_ymm9 , "ymm9" , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm9) , AVX_OFFSET_YMM(9) , gcc_dwarf_ymm9 , gcc_dwarf_ymm9 , -1U, gdb_ymm9 , NULL, NULL },
1495 { e_regSetFPU, fpu_ymm10, "ymm10" , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm10) , AVX_OFFSET_YMM(10), gcc_dwarf_ymm10, gcc_dwarf_ymm10, -1U, gdb_ymm10, NULL, NULL },
1496 { e_regSetFPU, fpu_ymm11, "ymm11" , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm11) , AVX_OFFSET_YMM(11), gcc_dwarf_ymm11, gcc_dwarf_ymm11, -1U, gdb_ymm11, NULL, NULL },
1497 { e_regSetFPU, fpu_ymm12, "ymm12" , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm12) , AVX_OFFSET_YMM(12), gcc_dwarf_ymm12, gcc_dwarf_ymm12, -1U, gdb_ymm12, NULL, NULL },
1498 { e_regSetFPU, fpu_ymm13, "ymm13" , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm13) , AVX_OFFSET_YMM(13), gcc_dwarf_ymm13, gcc_dwarf_ymm13, -1U, gdb_ymm13, NULL, NULL },
1499 { e_regSetFPU, fpu_ymm14, "ymm14" , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm14) , AVX_OFFSET_YMM(14), gcc_dwarf_ymm14, gcc_dwarf_ymm14, -1U, gdb_ymm14, NULL, NULL },
1500 { e_regSetFPU, fpu_ymm15, "ymm15" , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm15) , AVX_OFFSET_YMM(15), gcc_dwarf_ymm15, gcc_dwarf_ymm15, -1U, gdb_ymm15, NULL, NULL }
1501 };
1502
1503 // Exception registers
1504
1505 const DNBRegisterInfo
1506 DNBArchImplX86_64::g_exc_registers[] =
1507 {
1508 { e_regSetEXC, exc_trapno, "trapno" , NULL, Uint, Hex, EXC_SIZE (trapno) , EXC_OFFSET (trapno) , -1U, -1U, -1U, -1U, NULL, NULL },
1509 { e_regSetEXC, exc_err, "err" , NULL, Uint, Hex, EXC_SIZE (err) , EXC_OFFSET (err) , -1U, -1U, -1U, -1U, NULL, NULL },
1510 { e_regSetEXC, exc_faultvaddr, "faultvaddr", NULL, Uint, Hex, EXC_SIZE (faultvaddr), EXC_OFFSET (faultvaddr) , -1U, -1U, -1U, -1U, NULL, NULL }
1511 };
1512
1513 // Number of registers in each register set
1514 const size_t DNBArchImplX86_64::k_num_gpr_registers = sizeof(g_gpr_registers)/sizeof(DNBRegisterInfo);
1515 const size_t DNBArchImplX86_64::k_num_fpu_registers_no_avx = sizeof(g_fpu_registers_no_avx)/sizeof(DNBRegisterInfo);
1516 const size_t DNBArchImplX86_64::k_num_fpu_registers_avx = sizeof(g_fpu_registers_avx)/sizeof(DNBRegisterInfo);
1517 const size_t DNBArchImplX86_64::k_num_exc_registers = sizeof(g_exc_registers)/sizeof(DNBRegisterInfo);
1518 const size_t DNBArchImplX86_64::k_num_all_registers_no_avx = k_num_gpr_registers + k_num_fpu_registers_no_avx + k_num_exc_registers;
1519 const size_t DNBArchImplX86_64::k_num_all_registers_avx = k_num_gpr_registers + k_num_fpu_registers_avx + k_num_exc_registers;
1520
1521 //----------------------------------------------------------------------
1522 // Register set definitions. The first definitions at register set index
1523 // of zero is for all registers, followed by other registers sets. The
1524 // register information for the all register set need not be filled in.
1525 //----------------------------------------------------------------------
1526 const DNBRegisterSetInfo
1527 DNBArchImplX86_64::g_reg_sets_no_avx[] =
1528 {
1529 { "x86_64 Registers", NULL, k_num_all_registers_no_avx },
1530 { "General Purpose Registers", g_gpr_registers, k_num_gpr_registers },
1531 { "Floating Point Registers", g_fpu_registers_no_avx, k_num_fpu_registers_no_avx },
1532 { "Exception State Registers", g_exc_registers, k_num_exc_registers }
1533 };
1534
1535 const DNBRegisterSetInfo
1536 DNBArchImplX86_64::g_reg_sets_avx[] =
1537 {
1538 { "x86_64 Registers", NULL, k_num_all_registers_avx },
1539 { "General Purpose Registers", g_gpr_registers, k_num_gpr_registers },
1540 { "Floating Point Registers", g_fpu_registers_avx, k_num_fpu_registers_avx },
1541 { "Exception State Registers", g_exc_registers, k_num_exc_registers }
1542 };
1543
1544 // Total number of register sets for this architecture
1545 const size_t DNBArchImplX86_64::k_num_register_sets = sizeof(g_reg_sets_avx)/sizeof(DNBRegisterSetInfo);
1546
1547
1548 DNBArchProtocol *
Create(MachThread * thread)1549 DNBArchImplX86_64::Create (MachThread *thread)
1550 {
1551 DNBArchImplX86_64 *obj = new DNBArchImplX86_64 (thread);
1552 return obj;
1553 }
1554
1555 const uint8_t * const
SoftwareBreakpointOpcode(nub_size_t byte_size)1556 DNBArchImplX86_64::SoftwareBreakpointOpcode (nub_size_t byte_size)
1557 {
1558 static const uint8_t g_breakpoint_opcode[] = { 0xCC };
1559 if (byte_size == 1)
1560 return g_breakpoint_opcode;
1561 return NULL;
1562 }
1563
1564 const DNBRegisterSetInfo *
GetRegisterSetInfo(nub_size_t * num_reg_sets)1565 DNBArchImplX86_64::GetRegisterSetInfo(nub_size_t *num_reg_sets)
1566 {
1567 *num_reg_sets = k_num_register_sets;
1568
1569 if (CPUHasAVX() || FORCE_AVX_REGS)
1570 return g_reg_sets_avx;
1571 else
1572 return g_reg_sets_no_avx;
1573 }
1574
1575 void
Initialize()1576 DNBArchImplX86_64::Initialize()
1577 {
1578 DNBArchPluginInfo arch_plugin_info =
1579 {
1580 CPU_TYPE_X86_64,
1581 DNBArchImplX86_64::Create,
1582 DNBArchImplX86_64::GetRegisterSetInfo,
1583 DNBArchImplX86_64::SoftwareBreakpointOpcode
1584 };
1585
1586 // Register this arch plug-in with the main protocol class
1587 DNBArchProtocol::RegisterArchPlugin (arch_plugin_info);
1588 }
1589
1590 bool
GetRegisterValue(int set,int reg,DNBRegisterValue * value)1591 DNBArchImplX86_64::GetRegisterValue(int set, int reg, DNBRegisterValue *value)
1592 {
1593 if (set == REGISTER_SET_GENERIC)
1594 {
1595 switch (reg)
1596 {
1597 case GENERIC_REGNUM_PC: // Program Counter
1598 set = e_regSetGPR;
1599 reg = gpr_rip;
1600 break;
1601
1602 case GENERIC_REGNUM_SP: // Stack Pointer
1603 set = e_regSetGPR;
1604 reg = gpr_rsp;
1605 break;
1606
1607 case GENERIC_REGNUM_FP: // Frame Pointer
1608 set = e_regSetGPR;
1609 reg = gpr_rbp;
1610 break;
1611
1612 case GENERIC_REGNUM_FLAGS: // Processor flags register
1613 set = e_regSetGPR;
1614 reg = gpr_rflags;
1615 break;
1616
1617 case GENERIC_REGNUM_RA: // Return Address
1618 default:
1619 return false;
1620 }
1621 }
1622
1623 if (GetRegisterState(set, false) != KERN_SUCCESS)
1624 return false;
1625
1626 const DNBRegisterInfo *regInfo = m_thread->GetRegisterInfo(set, reg);
1627 if (regInfo)
1628 {
1629 value->info = *regInfo;
1630 switch (set)
1631 {
1632 case e_regSetGPR:
1633 if (reg < k_num_gpr_registers)
1634 {
1635 value->value.uint64 = ((uint64_t*)(&m_state.context.gpr))[reg];
1636 return true;
1637 }
1638 break;
1639
1640 case e_regSetFPU:
1641 if (CPUHasAVX() || FORCE_AVX_REGS)
1642 {
1643 switch (reg)
1644 {
1645 case fpu_fcw: value->value.uint16 = *((uint16_t *)(&m_state.context.fpu.avx.__fpu_fcw)); return true;
1646 case fpu_fsw: value->value.uint16 = *((uint16_t *)(&m_state.context.fpu.avx.__fpu_fsw)); return true;
1647 case fpu_ftw: value->value.uint8 = m_state.context.fpu.avx.__fpu_ftw; return true;
1648 case fpu_fop: value->value.uint16 = m_state.context.fpu.avx.__fpu_fop; return true;
1649 case fpu_ip: value->value.uint32 = m_state.context.fpu.avx.__fpu_ip; return true;
1650 case fpu_cs: value->value.uint16 = m_state.context.fpu.avx.__fpu_cs; return true;
1651 case fpu_dp: value->value.uint32 = m_state.context.fpu.avx.__fpu_dp; return true;
1652 case fpu_ds: value->value.uint16 = m_state.context.fpu.avx.__fpu_ds; return true;
1653 case fpu_mxcsr: value->value.uint32 = m_state.context.fpu.avx.__fpu_mxcsr; return true;
1654 case fpu_mxcsrmask: value->value.uint32 = m_state.context.fpu.avx.__fpu_mxcsrmask; return true;
1655
1656 case fpu_stmm0:
1657 case fpu_stmm1:
1658 case fpu_stmm2:
1659 case fpu_stmm3:
1660 case fpu_stmm4:
1661 case fpu_stmm5:
1662 case fpu_stmm6:
1663 case fpu_stmm7:
1664 memcpy(&value->value.uint8, &m_state.context.fpu.avx.__fpu_stmm0 + (reg - fpu_stmm0), 10);
1665 return true;
1666
1667 case fpu_xmm0:
1668 case fpu_xmm1:
1669 case fpu_xmm2:
1670 case fpu_xmm3:
1671 case fpu_xmm4:
1672 case fpu_xmm5:
1673 case fpu_xmm6:
1674 case fpu_xmm7:
1675 case fpu_xmm8:
1676 case fpu_xmm9:
1677 case fpu_xmm10:
1678 case fpu_xmm11:
1679 case fpu_xmm12:
1680 case fpu_xmm13:
1681 case fpu_xmm14:
1682 case fpu_xmm15:
1683 memcpy(&value->value.uint8, &m_state.context.fpu.avx.__fpu_xmm0 + (reg - fpu_xmm0), 16);
1684 return true;
1685
1686 case fpu_ymm0:
1687 case fpu_ymm1:
1688 case fpu_ymm2:
1689 case fpu_ymm3:
1690 case fpu_ymm4:
1691 case fpu_ymm5:
1692 case fpu_ymm6:
1693 case fpu_ymm7:
1694 case fpu_ymm8:
1695 case fpu_ymm9:
1696 case fpu_ymm10:
1697 case fpu_ymm11:
1698 case fpu_ymm12:
1699 case fpu_ymm13:
1700 case fpu_ymm14:
1701 case fpu_ymm15:
1702 memcpy(&value->value.uint8, &m_state.context.fpu.avx.__fpu_xmm0 + (reg - fpu_ymm0), 16);
1703 memcpy((&value->value.uint8) + 16, &m_state.context.fpu.avx.__fpu_ymmh0 + (reg - fpu_ymm0), 16);
1704 return true;
1705 }
1706 }
1707 else
1708 {
1709 switch (reg)
1710 {
1711 case fpu_fcw: value->value.uint16 = *((uint16_t *)(&m_state.context.fpu.no_avx.__fpu_fcw)); return true;
1712 case fpu_fsw: value->value.uint16 = *((uint16_t *)(&m_state.context.fpu.no_avx.__fpu_fsw)); return true;
1713 case fpu_ftw: value->value.uint8 = m_state.context.fpu.no_avx.__fpu_ftw; return true;
1714 case fpu_fop: value->value.uint16 = m_state.context.fpu.no_avx.__fpu_fop; return true;
1715 case fpu_ip: value->value.uint32 = m_state.context.fpu.no_avx.__fpu_ip; return true;
1716 case fpu_cs: value->value.uint16 = m_state.context.fpu.no_avx.__fpu_cs; return true;
1717 case fpu_dp: value->value.uint32 = m_state.context.fpu.no_avx.__fpu_dp; return true;
1718 case fpu_ds: value->value.uint16 = m_state.context.fpu.no_avx.__fpu_ds; return true;
1719 case fpu_mxcsr: value->value.uint32 = m_state.context.fpu.no_avx.__fpu_mxcsr; return true;
1720 case fpu_mxcsrmask: value->value.uint32 = m_state.context.fpu.no_avx.__fpu_mxcsrmask; return true;
1721
1722 case fpu_stmm0:
1723 case fpu_stmm1:
1724 case fpu_stmm2:
1725 case fpu_stmm3:
1726 case fpu_stmm4:
1727 case fpu_stmm5:
1728 case fpu_stmm6:
1729 case fpu_stmm7:
1730 memcpy(&value->value.uint8, &m_state.context.fpu.no_avx.__fpu_stmm0 + (reg - fpu_stmm0), 10);
1731 return true;
1732
1733 case fpu_xmm0:
1734 case fpu_xmm1:
1735 case fpu_xmm2:
1736 case fpu_xmm3:
1737 case fpu_xmm4:
1738 case fpu_xmm5:
1739 case fpu_xmm6:
1740 case fpu_xmm7:
1741 case fpu_xmm8:
1742 case fpu_xmm9:
1743 case fpu_xmm10:
1744 case fpu_xmm11:
1745 case fpu_xmm12:
1746 case fpu_xmm13:
1747 case fpu_xmm14:
1748 case fpu_xmm15:
1749 memcpy(&value->value.uint8, &m_state.context.fpu.no_avx.__fpu_xmm0 + (reg - fpu_xmm0), 16);
1750 return true;
1751 }
1752 }
1753 break;
1754
1755 case e_regSetEXC:
1756 switch (reg)
1757 {
1758 case exc_trapno: value->value.uint32 = m_state.context.exc.__trapno; return true;
1759 case exc_err: value->value.uint32 = m_state.context.exc.__err; return true;
1760 case exc_faultvaddr:value->value.uint64 = m_state.context.exc.__faultvaddr; return true;
1761 }
1762 break;
1763 }
1764 }
1765 return false;
1766 }
1767
1768
1769 bool
SetRegisterValue(int set,int reg,const DNBRegisterValue * value)1770 DNBArchImplX86_64::SetRegisterValue(int set, int reg, const DNBRegisterValue *value)
1771 {
1772 if (set == REGISTER_SET_GENERIC)
1773 {
1774 switch (reg)
1775 {
1776 case GENERIC_REGNUM_PC: // Program Counter
1777 set = e_regSetGPR;
1778 reg = gpr_rip;
1779 break;
1780
1781 case GENERIC_REGNUM_SP: // Stack Pointer
1782 set = e_regSetGPR;
1783 reg = gpr_rsp;
1784 break;
1785
1786 case GENERIC_REGNUM_FP: // Frame Pointer
1787 set = e_regSetGPR;
1788 reg = gpr_rbp;
1789 break;
1790
1791 case GENERIC_REGNUM_FLAGS: // Processor flags register
1792 set = e_regSetGPR;
1793 reg = gpr_rflags;
1794 break;
1795
1796 case GENERIC_REGNUM_RA: // Return Address
1797 default:
1798 return false;
1799 }
1800 }
1801
1802 if (GetRegisterState(set, false) != KERN_SUCCESS)
1803 return false;
1804
1805 bool success = false;
1806 const DNBRegisterInfo *regInfo = m_thread->GetRegisterInfo(set, reg);
1807 if (regInfo)
1808 {
1809 switch (set)
1810 {
1811 case e_regSetGPR:
1812 if (reg < k_num_gpr_registers)
1813 {
1814 ((uint64_t*)(&m_state.context.gpr))[reg] = value->value.uint64;
1815 success = true;
1816 }
1817 break;
1818
1819 case e_regSetFPU:
1820 if (CPUHasAVX() || FORCE_AVX_REGS)
1821 {
1822 switch (reg)
1823 {
1824 case fpu_fcw: *((uint16_t *)(&m_state.context.fpu.avx.__fpu_fcw)) = value->value.uint16; success = true; break;
1825 case fpu_fsw: *((uint16_t *)(&m_state.context.fpu.avx.__fpu_fsw)) = value->value.uint16; success = true; break;
1826 case fpu_ftw: m_state.context.fpu.avx.__fpu_ftw = value->value.uint8; success = true; break;
1827 case fpu_fop: m_state.context.fpu.avx.__fpu_fop = value->value.uint16; success = true; break;
1828 case fpu_ip: m_state.context.fpu.avx.__fpu_ip = value->value.uint32; success = true; break;
1829 case fpu_cs: m_state.context.fpu.avx.__fpu_cs = value->value.uint16; success = true; break;
1830 case fpu_dp: m_state.context.fpu.avx.__fpu_dp = value->value.uint32; success = true; break;
1831 case fpu_ds: m_state.context.fpu.avx.__fpu_ds = value->value.uint16; success = true; break;
1832 case fpu_mxcsr: m_state.context.fpu.avx.__fpu_mxcsr = value->value.uint32; success = true; break;
1833 case fpu_mxcsrmask: m_state.context.fpu.avx.__fpu_mxcsrmask = value->value.uint32; success = true; break;
1834
1835 case fpu_stmm0:
1836 case fpu_stmm1:
1837 case fpu_stmm2:
1838 case fpu_stmm3:
1839 case fpu_stmm4:
1840 case fpu_stmm5:
1841 case fpu_stmm6:
1842 case fpu_stmm7:
1843 memcpy (&m_state.context.fpu.avx.__fpu_stmm0 + (reg - fpu_stmm0), &value->value.uint8, 10);
1844 success = true;
1845 break;
1846
1847 case fpu_xmm0:
1848 case fpu_xmm1:
1849 case fpu_xmm2:
1850 case fpu_xmm3:
1851 case fpu_xmm4:
1852 case fpu_xmm5:
1853 case fpu_xmm6:
1854 case fpu_xmm7:
1855 case fpu_xmm8:
1856 case fpu_xmm9:
1857 case fpu_xmm10:
1858 case fpu_xmm11:
1859 case fpu_xmm12:
1860 case fpu_xmm13:
1861 case fpu_xmm14:
1862 case fpu_xmm15:
1863 memcpy (&m_state.context.fpu.avx.__fpu_xmm0 + (reg - fpu_xmm0), &value->value.uint8, 16);
1864 success = true;
1865 break;
1866
1867 case fpu_ymm0:
1868 case fpu_ymm1:
1869 case fpu_ymm2:
1870 case fpu_ymm3:
1871 case fpu_ymm4:
1872 case fpu_ymm5:
1873 case fpu_ymm6:
1874 case fpu_ymm7:
1875 case fpu_ymm8:
1876 case fpu_ymm9:
1877 case fpu_ymm10:
1878 case fpu_ymm11:
1879 case fpu_ymm12:
1880 case fpu_ymm13:
1881 case fpu_ymm14:
1882 case fpu_ymm15:
1883 memcpy(&m_state.context.fpu.avx.__fpu_xmm0 + (reg - fpu_ymm0), &value->value.uint8, 16);
1884 memcpy(&m_state.context.fpu.avx.__fpu_ymmh0 + (reg - fpu_ymm0), (&value->value.uint8) + 16, 16);
1885 return true;
1886 }
1887 }
1888 else
1889 {
1890 switch (reg)
1891 {
1892 case fpu_fcw: *((uint16_t *)(&m_state.context.fpu.no_avx.__fpu_fcw)) = value->value.uint16; success = true; break;
1893 case fpu_fsw: *((uint16_t *)(&m_state.context.fpu.no_avx.__fpu_fsw)) = value->value.uint16; success = true; break;
1894 case fpu_ftw: m_state.context.fpu.no_avx.__fpu_ftw = value->value.uint8; success = true; break;
1895 case fpu_fop: m_state.context.fpu.no_avx.__fpu_fop = value->value.uint16; success = true; break;
1896 case fpu_ip: m_state.context.fpu.no_avx.__fpu_ip = value->value.uint32; success = true; break;
1897 case fpu_cs: m_state.context.fpu.no_avx.__fpu_cs = value->value.uint16; success = true; break;
1898 case fpu_dp: m_state.context.fpu.no_avx.__fpu_dp = value->value.uint32; success = true; break;
1899 case fpu_ds: m_state.context.fpu.no_avx.__fpu_ds = value->value.uint16; success = true; break;
1900 case fpu_mxcsr: m_state.context.fpu.no_avx.__fpu_mxcsr = value->value.uint32; success = true; break;
1901 case fpu_mxcsrmask: m_state.context.fpu.no_avx.__fpu_mxcsrmask = value->value.uint32; success = true; break;
1902
1903 case fpu_stmm0:
1904 case fpu_stmm1:
1905 case fpu_stmm2:
1906 case fpu_stmm3:
1907 case fpu_stmm4:
1908 case fpu_stmm5:
1909 case fpu_stmm6:
1910 case fpu_stmm7:
1911 memcpy (&m_state.context.fpu.no_avx.__fpu_stmm0 + (reg - fpu_stmm0), &value->value.uint8, 10);
1912 success = true;
1913 break;
1914
1915 case fpu_xmm0:
1916 case fpu_xmm1:
1917 case fpu_xmm2:
1918 case fpu_xmm3:
1919 case fpu_xmm4:
1920 case fpu_xmm5:
1921 case fpu_xmm6:
1922 case fpu_xmm7:
1923 case fpu_xmm8:
1924 case fpu_xmm9:
1925 case fpu_xmm10:
1926 case fpu_xmm11:
1927 case fpu_xmm12:
1928 case fpu_xmm13:
1929 case fpu_xmm14:
1930 case fpu_xmm15:
1931 memcpy (&m_state.context.fpu.no_avx.__fpu_xmm0 + (reg - fpu_xmm0), &value->value.uint8, 16);
1932 success = true;
1933 break;
1934 }
1935 }
1936 break;
1937
1938 case e_regSetEXC:
1939 switch (reg)
1940 {
1941 case exc_trapno: m_state.context.exc.__trapno = value->value.uint32; success = true; break;
1942 case exc_err: m_state.context.exc.__err = value->value.uint32; success = true; break;
1943 case exc_faultvaddr:m_state.context.exc.__faultvaddr = value->value.uint64; success = true; break;
1944 }
1945 break;
1946 }
1947 }
1948
1949 if (success)
1950 return SetRegisterState(set) == KERN_SUCCESS;
1951 return false;
1952 }
1953
1954
1955 nub_size_t
GetRegisterContext(void * buf,nub_size_t buf_len)1956 DNBArchImplX86_64::GetRegisterContext (void *buf, nub_size_t buf_len)
1957 {
1958 nub_size_t size = sizeof (m_state.context);
1959
1960 if (buf && buf_len)
1961 {
1962 if (size > buf_len)
1963 size = buf_len;
1964
1965 bool force = false;
1966 kern_return_t kret;
1967 if ((kret = GetGPRState(force)) != KERN_SUCCESS)
1968 {
1969 DNBLogThreadedIf (LOG_THREAD, "DNBArchImplX86_64::GetRegisterContext (buf = %p, len = %llu) error: GPR regs failed to read: %u ", buf, (uint64_t)buf_len, kret);
1970 size = 0;
1971 }
1972 else
1973 if ((kret = GetFPUState(force)) != KERN_SUCCESS)
1974 {
1975 DNBLogThreadedIf (LOG_THREAD, "DNBArchImplX86_64::GetRegisterContext (buf = %p, len = %llu) error: %s regs failed to read: %u", buf, (uint64_t)buf_len, CPUHasAVX() ? "AVX" : "FPU", kret);
1976 size = 0;
1977 }
1978 else
1979 if ((kret = GetEXCState(force)) != KERN_SUCCESS)
1980 {
1981 DNBLogThreadedIf (LOG_THREAD, "DNBArchImplX86_64::GetRegisterContext (buf = %p, len = %llu) error: EXC regs failed to read: %u", buf, (uint64_t)buf_len, kret);
1982 size = 0;
1983 }
1984 else
1985 {
1986 // Success
1987 ::memcpy (buf, &m_state.context, size);
1988 }
1989 }
1990 DNBLogThreadedIf (LOG_THREAD, "DNBArchImplX86_64::GetRegisterContext (buf = %p, len = %llu) => %llu", buf, (uint64_t)buf_len, (uint64_t)size);
1991 // Return the size of the register context even if NULL was passed in
1992 return size;
1993 }
1994
1995 nub_size_t
SetRegisterContext(const void * buf,nub_size_t buf_len)1996 DNBArchImplX86_64::SetRegisterContext (const void *buf, nub_size_t buf_len)
1997 {
1998 nub_size_t size = sizeof (m_state.context);
1999 if (buf == NULL || buf_len == 0)
2000 size = 0;
2001
2002 if (size)
2003 {
2004 if (size > buf_len)
2005 size = buf_len;
2006
2007 ::memcpy (&m_state.context, buf, size);
2008 kern_return_t kret;
2009 if ((kret = SetGPRState()) != KERN_SUCCESS)
2010 DNBLogThreadedIf (LOG_THREAD, "DNBArchImplX86_64::SetRegisterContext (buf = %p, len = %llu) error: GPR regs failed to write: %u", buf, (uint64_t)buf_len, kret);
2011 if ((kret = SetFPUState()) != KERN_SUCCESS)
2012 DNBLogThreadedIf (LOG_THREAD, "DNBArchImplX86_64::SetRegisterContext (buf = %p, len = %llu) error: %s regs failed to write: %u", buf, (uint64_t)buf_len, CPUHasAVX() ? "AVX" : "FPU", kret);
2013 if ((kret = SetEXCState()) != KERN_SUCCESS)
2014 DNBLogThreadedIf (LOG_THREAD, "DNBArchImplX86_64::SetRegisterContext (buf = %p, len = %llu) error: EXP regs failed to write: %u", buf, (uint64_t)buf_len, kret);
2015 }
2016 DNBLogThreadedIf (LOG_THREAD, "DNBArchImplX86_64::SetRegisterContext (buf = %p, len = %llu) => %llu", buf, (uint64_t)buf_len, (uint64_t)size);
2017 return size;
2018 }
2019
2020
2021 kern_return_t
GetRegisterState(int set,bool force)2022 DNBArchImplX86_64::GetRegisterState(int set, bool force)
2023 {
2024 switch (set)
2025 {
2026 case e_regSetALL: return GetGPRState(force) | GetFPUState(force) | GetEXCState(force);
2027 case e_regSetGPR: return GetGPRState(force);
2028 case e_regSetFPU: return GetFPUState(force);
2029 case e_regSetEXC: return GetEXCState(force);
2030 default: break;
2031 }
2032 return KERN_INVALID_ARGUMENT;
2033 }
2034
2035 kern_return_t
SetRegisterState(int set)2036 DNBArchImplX86_64::SetRegisterState(int set)
2037 {
2038 // Make sure we have a valid context to set.
2039 if (RegisterSetStateIsValid(set))
2040 {
2041 switch (set)
2042 {
2043 case e_regSetALL: return SetGPRState() | SetFPUState() | SetEXCState();
2044 case e_regSetGPR: return SetGPRState();
2045 case e_regSetFPU: return SetFPUState();
2046 case e_regSetEXC: return SetEXCState();
2047 default: break;
2048 }
2049 }
2050 return KERN_INVALID_ARGUMENT;
2051 }
2052
2053 bool
RegisterSetStateIsValid(int set) const2054 DNBArchImplX86_64::RegisterSetStateIsValid (int set) const
2055 {
2056 return m_state.RegsAreValid(set);
2057 }
2058
2059
2060
2061 #endif // #if defined (__i386__) || defined (__x86_64__)
2062