1 //===-- DNBArchImplARM64.cpp ------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Created by Greg Clayton on 6/25/07.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #if defined(__arm__) || defined(__arm64__) || defined(__aarch64__)
14
15 #include "MacOSX/arm64/DNBArchImplARM64.h"
16
17 #if defined(ARM_THREAD_STATE64_COUNT)
18
19 #include "DNB.h"
20 #include "DNBBreakpoint.h"
21 #include "DNBLog.h"
22 #include "DNBRegisterInfo.h"
23 #include "MacOSX/MachProcess.h"
24 #include "MacOSX/MachThread.h"
25
26 #include <inttypes.h>
27 #include <sys/sysctl.h>
28
29 #if __has_feature(ptrauth_calls)
30 #include <ptrauth.h>
31 #endif
32
33 // Break only in privileged or user mode
34 // (PAC bits in the DBGWVRn_EL1 watchpoint control register)
35 #define S_USER ((uint32_t)(2u << 1))
36
37 #define BCR_ENABLE ((uint32_t)(1u))
38 #define WCR_ENABLE ((uint32_t)(1u))
39
40 // Watchpoint load/store
41 // (LSC bits in the DBGWVRn_EL1 watchpoint control register)
42 #define WCR_LOAD ((uint32_t)(1u << 3))
43 #define WCR_STORE ((uint32_t)(1u << 4))
44
45 // Enable breakpoint, watchpoint, and vector catch debug exceptions.
46 // (MDE bit in the MDSCR_EL1 register. Equivalent to the MDBGen bit in
47 // DBGDSCRext in Aarch32)
48 #define MDE_ENABLE ((uint32_t)(1u << 15))
49
50 // Single instruction step
51 // (SS bit in the MDSCR_EL1 register)
52 #define SS_ENABLE ((uint32_t)(1u))
53
54 static const uint8_t g_arm64_breakpoint_opcode[] = {
55 0x00, 0x00, 0x20, 0xD4}; // "brk #0", 0xd4200000 in BE byte order
56
57 // If we need to set one logical watchpoint by using
58 // two hardware watchpoint registers, the watchpoint
59 // will be split into a "high" and "low" watchpoint.
60 // Record both of them in the LoHi array.
61
62 // It's safe to initialize to all 0's since
63 // hi > lo and therefore LoHi[i] cannot be 0.
64 static uint32_t LoHi[16] = {0};
65
Initialize()66 void DNBArchMachARM64::Initialize() {
67 DNBArchPluginInfo arch_plugin_info = {
68 CPU_TYPE_ARM64, DNBArchMachARM64::Create,
69 DNBArchMachARM64::GetRegisterSetInfo,
70 DNBArchMachARM64::SoftwareBreakpointOpcode};
71
72 // Register this arch plug-in with the main protocol class
73 DNBArchProtocol::RegisterArchPlugin(arch_plugin_info);
74
75 DNBArchPluginInfo arch_plugin_info_32 = {
76 CPU_TYPE_ARM64_32, DNBArchMachARM64::Create,
77 DNBArchMachARM64::GetRegisterSetInfo,
78 DNBArchMachARM64::SoftwareBreakpointOpcode};
79
80 // Register this arch plug-in with the main protocol class
81 DNBArchProtocol::RegisterArchPlugin(arch_plugin_info_32);
82 }
83
Create(MachThread * thread)84 DNBArchProtocol *DNBArchMachARM64::Create(MachThread *thread) {
85 DNBArchMachARM64 *obj = new DNBArchMachARM64(thread);
86
87 return obj;
88 }
89
90 const uint8_t *
SoftwareBreakpointOpcode(nub_size_t byte_size)91 DNBArchMachARM64::SoftwareBreakpointOpcode(nub_size_t byte_size) {
92 return g_arm64_breakpoint_opcode;
93 }
94
GetCPUType()95 uint32_t DNBArchMachARM64::GetCPUType() { return CPU_TYPE_ARM64; }
96
GetPC(uint64_t failValue)97 uint64_t DNBArchMachARM64::GetPC(uint64_t failValue) {
98 // Get program counter
99 if (GetGPRState(false) == KERN_SUCCESS)
100 #if defined(__LP64__)
101 return arm_thread_state64_get_pc(m_state.context.gpr);
102 #else
103 return m_state.context.gpr.__pc;
104 #endif
105 return failValue;
106 }
107
SetPC(uint64_t value)108 kern_return_t DNBArchMachARM64::SetPC(uint64_t value) {
109 // Get program counter
110 kern_return_t err = GetGPRState(false);
111 if (err == KERN_SUCCESS) {
112 #if defined(__LP64__)
113 #if __has_feature(ptrauth_calls)
114 // The incoming value could be garbage. Strip it to avoid
115 // trapping when it gets resigned in the thread state.
116 value = (uint64_t) ptrauth_strip((void*) value, ptrauth_key_function_pointer);
117 value = (uint64_t) ptrauth_sign_unauthenticated((void*) value, ptrauth_key_function_pointer, 0);
118 #endif
119 arm_thread_state64_set_pc_fptr (m_state.context.gpr, (void*) value);
120 #else
121 m_state.context.gpr.__pc = value;
122 #endif
123 err = SetGPRState();
124 }
125 return err == KERN_SUCCESS;
126 }
127
GetSP(uint64_t failValue)128 uint64_t DNBArchMachARM64::GetSP(uint64_t failValue) {
129 // Get stack pointer
130 if (GetGPRState(false) == KERN_SUCCESS)
131 #if defined(__LP64__)
132 return arm_thread_state64_get_sp(m_state.context.gpr);
133 #else
134 return m_state.context.gpr.__sp;
135 #endif
136 return failValue;
137 }
138
GetGPRState(bool force)139 kern_return_t DNBArchMachARM64::GetGPRState(bool force) {
140 int set = e_regSetGPR;
141 // Check if we have valid cached registers
142 if (!force && m_state.GetError(set, Read) == KERN_SUCCESS)
143 return KERN_SUCCESS;
144
145 // Read the registers from our thread
146 mach_msg_type_number_t count = e_regSetGPRCount;
147 kern_return_t kret =
148 ::thread_get_state(m_thread->MachPortNumber(), ARM_THREAD_STATE64,
149 (thread_state_t)&m_state.context.gpr, &count);
150 if (DNBLogEnabledForAny(LOG_THREAD)) {
151 uint64_t *x = &m_state.context.gpr.__x[0];
152 DNBLogThreaded(
153 "thread_get_state(0x%4.4x, %u, &gpr, %u) => 0x%8.8x (count = %u) regs"
154 "\n x0=%16.16llx"
155 "\n x1=%16.16llx"
156 "\n x2=%16.16llx"
157 "\n x3=%16.16llx"
158 "\n x4=%16.16llx"
159 "\n x5=%16.16llx"
160 "\n x6=%16.16llx"
161 "\n x7=%16.16llx"
162 "\n x8=%16.16llx"
163 "\n x9=%16.16llx"
164 "\n x10=%16.16llx"
165 "\n x11=%16.16llx"
166 "\n x12=%16.16llx"
167 "\n x13=%16.16llx"
168 "\n x14=%16.16llx"
169 "\n x15=%16.16llx"
170 "\n x16=%16.16llx"
171 "\n x17=%16.16llx"
172 "\n x18=%16.16llx"
173 "\n x19=%16.16llx"
174 "\n x20=%16.16llx"
175 "\n x21=%16.16llx"
176 "\n x22=%16.16llx"
177 "\n x23=%16.16llx"
178 "\n x24=%16.16llx"
179 "\n x25=%16.16llx"
180 "\n x26=%16.16llx"
181 "\n x27=%16.16llx"
182 "\n x28=%16.16llx"
183 "\n fp=%16.16llx"
184 "\n lr=%16.16llx"
185 "\n sp=%16.16llx"
186 "\n pc=%16.16llx"
187 "\n cpsr=%8.8x",
188 m_thread->MachPortNumber(), e_regSetGPR, e_regSetGPRCount, kret, count,
189 x[0], x[1], x[2], x[3], x[4], x[5], x[6], x[7], x[8], x[9], x[0], x[11],
190 x[12], x[13], x[14], x[15], x[16], x[17], x[18], x[19], x[20], x[21],
191 x[22], x[23], x[24], x[25], x[26], x[27], x[28],
192 #if defined(__LP64__)
193 (uint64_t) arm_thread_state64_get_fp (m_state.context.gpr),
194 (uint64_t) arm_thread_state64_get_lr (m_state.context.gpr),
195 (uint64_t) arm_thread_state64_get_sp (m_state.context.gpr),
196 (uint64_t) arm_thread_state64_get_pc (m_state.context.gpr),
197 #else
198 m_state.context.gpr.__fp, m_state.context.gpr.__lr,
199 m_state.context.gpr.__sp, m_state.context.gpr.__pc,
200 #endif
201 m_state.context.gpr.__cpsr);
202 }
203 m_state.SetError(set, Read, kret);
204 return kret;
205 }
206
GetVFPState(bool force)207 kern_return_t DNBArchMachARM64::GetVFPState(bool force) {
208 int set = e_regSetVFP;
209 // Check if we have valid cached registers
210 if (!force && m_state.GetError(set, Read) == KERN_SUCCESS)
211 return KERN_SUCCESS;
212
213 // Read the registers from our thread
214 mach_msg_type_number_t count = e_regSetVFPCount;
215 kern_return_t kret =
216 ::thread_get_state(m_thread->MachPortNumber(), ARM_NEON_STATE64,
217 (thread_state_t)&m_state.context.vfp, &count);
218 if (DNBLogEnabledForAny(LOG_THREAD)) {
219 #if defined(__arm64__) || defined(__aarch64__)
220 DNBLogThreaded(
221 "thread_get_state(0x%4.4x, %u, &vfp, %u) => 0x%8.8x (count = %u) regs"
222 "\n q0 = 0x%16.16llx%16.16llx"
223 "\n q1 = 0x%16.16llx%16.16llx"
224 "\n q2 = 0x%16.16llx%16.16llx"
225 "\n q3 = 0x%16.16llx%16.16llx"
226 "\n q4 = 0x%16.16llx%16.16llx"
227 "\n q5 = 0x%16.16llx%16.16llx"
228 "\n q6 = 0x%16.16llx%16.16llx"
229 "\n q7 = 0x%16.16llx%16.16llx"
230 "\n q8 = 0x%16.16llx%16.16llx"
231 "\n q9 = 0x%16.16llx%16.16llx"
232 "\n q10 = 0x%16.16llx%16.16llx"
233 "\n q11 = 0x%16.16llx%16.16llx"
234 "\n q12 = 0x%16.16llx%16.16llx"
235 "\n q13 = 0x%16.16llx%16.16llx"
236 "\n q14 = 0x%16.16llx%16.16llx"
237 "\n q15 = 0x%16.16llx%16.16llx"
238 "\n q16 = 0x%16.16llx%16.16llx"
239 "\n q17 = 0x%16.16llx%16.16llx"
240 "\n q18 = 0x%16.16llx%16.16llx"
241 "\n q19 = 0x%16.16llx%16.16llx"
242 "\n q20 = 0x%16.16llx%16.16llx"
243 "\n q21 = 0x%16.16llx%16.16llx"
244 "\n q22 = 0x%16.16llx%16.16llx"
245 "\n q23 = 0x%16.16llx%16.16llx"
246 "\n q24 = 0x%16.16llx%16.16llx"
247 "\n q25 = 0x%16.16llx%16.16llx"
248 "\n q26 = 0x%16.16llx%16.16llx"
249 "\n q27 = 0x%16.16llx%16.16llx"
250 "\n q28 = 0x%16.16llx%16.16llx"
251 "\n q29 = 0x%16.16llx%16.16llx"
252 "\n q30 = 0x%16.16llx%16.16llx"
253 "\n q31 = 0x%16.16llx%16.16llx"
254 "\n fpsr = 0x%8.8x"
255 "\n fpcr = 0x%8.8x\n\n",
256 m_thread->MachPortNumber(), e_regSetVFP, e_regSetVFPCount, kret, count,
257 ((uint64_t *)&m_state.context.vfp.__v[0])[0],
258 ((uint64_t *)&m_state.context.vfp.__v[0])[1],
259 ((uint64_t *)&m_state.context.vfp.__v[1])[0],
260 ((uint64_t *)&m_state.context.vfp.__v[1])[1],
261 ((uint64_t *)&m_state.context.vfp.__v[2])[0],
262 ((uint64_t *)&m_state.context.vfp.__v[2])[1],
263 ((uint64_t *)&m_state.context.vfp.__v[3])[0],
264 ((uint64_t *)&m_state.context.vfp.__v[3])[1],
265 ((uint64_t *)&m_state.context.vfp.__v[4])[0],
266 ((uint64_t *)&m_state.context.vfp.__v[4])[1],
267 ((uint64_t *)&m_state.context.vfp.__v[5])[0],
268 ((uint64_t *)&m_state.context.vfp.__v[5])[1],
269 ((uint64_t *)&m_state.context.vfp.__v[6])[0],
270 ((uint64_t *)&m_state.context.vfp.__v[6])[1],
271 ((uint64_t *)&m_state.context.vfp.__v[7])[0],
272 ((uint64_t *)&m_state.context.vfp.__v[7])[1],
273 ((uint64_t *)&m_state.context.vfp.__v[8])[0],
274 ((uint64_t *)&m_state.context.vfp.__v[8])[1],
275 ((uint64_t *)&m_state.context.vfp.__v[9])[0],
276 ((uint64_t *)&m_state.context.vfp.__v[9])[1],
277 ((uint64_t *)&m_state.context.vfp.__v[10])[0],
278 ((uint64_t *)&m_state.context.vfp.__v[10])[1],
279 ((uint64_t *)&m_state.context.vfp.__v[11])[0],
280 ((uint64_t *)&m_state.context.vfp.__v[11])[1],
281 ((uint64_t *)&m_state.context.vfp.__v[12])[0],
282 ((uint64_t *)&m_state.context.vfp.__v[12])[1],
283 ((uint64_t *)&m_state.context.vfp.__v[13])[0],
284 ((uint64_t *)&m_state.context.vfp.__v[13])[1],
285 ((uint64_t *)&m_state.context.vfp.__v[14])[0],
286 ((uint64_t *)&m_state.context.vfp.__v[14])[1],
287 ((uint64_t *)&m_state.context.vfp.__v[15])[0],
288 ((uint64_t *)&m_state.context.vfp.__v[15])[1],
289 ((uint64_t *)&m_state.context.vfp.__v[16])[0],
290 ((uint64_t *)&m_state.context.vfp.__v[16])[1],
291 ((uint64_t *)&m_state.context.vfp.__v[17])[0],
292 ((uint64_t *)&m_state.context.vfp.__v[17])[1],
293 ((uint64_t *)&m_state.context.vfp.__v[18])[0],
294 ((uint64_t *)&m_state.context.vfp.__v[18])[1],
295 ((uint64_t *)&m_state.context.vfp.__v[19])[0],
296 ((uint64_t *)&m_state.context.vfp.__v[19])[1],
297 ((uint64_t *)&m_state.context.vfp.__v[20])[0],
298 ((uint64_t *)&m_state.context.vfp.__v[20])[1],
299 ((uint64_t *)&m_state.context.vfp.__v[21])[0],
300 ((uint64_t *)&m_state.context.vfp.__v[21])[1],
301 ((uint64_t *)&m_state.context.vfp.__v[22])[0],
302 ((uint64_t *)&m_state.context.vfp.__v[22])[1],
303 ((uint64_t *)&m_state.context.vfp.__v[23])[0],
304 ((uint64_t *)&m_state.context.vfp.__v[23])[1],
305 ((uint64_t *)&m_state.context.vfp.__v[24])[0],
306 ((uint64_t *)&m_state.context.vfp.__v[24])[1],
307 ((uint64_t *)&m_state.context.vfp.__v[25])[0],
308 ((uint64_t *)&m_state.context.vfp.__v[25])[1],
309 ((uint64_t *)&m_state.context.vfp.__v[26])[0],
310 ((uint64_t *)&m_state.context.vfp.__v[26])[1],
311 ((uint64_t *)&m_state.context.vfp.__v[27])[0],
312 ((uint64_t *)&m_state.context.vfp.__v[27])[1],
313 ((uint64_t *)&m_state.context.vfp.__v[28])[0],
314 ((uint64_t *)&m_state.context.vfp.__v[28])[1],
315 ((uint64_t *)&m_state.context.vfp.__v[29])[0],
316 ((uint64_t *)&m_state.context.vfp.__v[29])[1],
317 ((uint64_t *)&m_state.context.vfp.__v[30])[0],
318 ((uint64_t *)&m_state.context.vfp.__v[30])[1],
319 ((uint64_t *)&m_state.context.vfp.__v[31])[0],
320 ((uint64_t *)&m_state.context.vfp.__v[31])[1],
321 m_state.context.vfp.__fpsr, m_state.context.vfp.__fpcr);
322 #endif
323 }
324 m_state.SetError(set, Read, kret);
325 return kret;
326 }
327
GetEXCState(bool force)328 kern_return_t DNBArchMachARM64::GetEXCState(bool force) {
329 int set = e_regSetEXC;
330 // Check if we have valid cached registers
331 if (!force && m_state.GetError(set, Read) == KERN_SUCCESS)
332 return KERN_SUCCESS;
333
334 // Read the registers from our thread
335 mach_msg_type_number_t count = e_regSetEXCCount;
336 kern_return_t kret =
337 ::thread_get_state(m_thread->MachPortNumber(), ARM_EXCEPTION_STATE64,
338 (thread_state_t)&m_state.context.exc, &count);
339 m_state.SetError(set, Read, kret);
340 return kret;
341 }
342
DumpDBGState(const arm_debug_state_t & dbg)343 static void DumpDBGState(const arm_debug_state_t &dbg) {
344 uint32_t i = 0;
345 for (i = 0; i < 16; i++)
346 DNBLogThreadedIf(LOG_STEP, "BVR%-2u/BCR%-2u = { 0x%8.8x, 0x%8.8x } "
347 "WVR%-2u/WCR%-2u = { 0x%8.8x, 0x%8.8x }",
348 i, i, dbg.__bvr[i], dbg.__bcr[i], i, i, dbg.__wvr[i],
349 dbg.__wcr[i]);
350 }
351
GetDBGState(bool force)352 kern_return_t DNBArchMachARM64::GetDBGState(bool force) {
353 int set = e_regSetDBG;
354
355 // Check if we have valid cached registers
356 if (!force && m_state.GetError(set, Read) == KERN_SUCCESS)
357 return KERN_SUCCESS;
358
359 // Read the registers from our thread
360 mach_msg_type_number_t count = e_regSetDBGCount;
361 kern_return_t kret =
362 ::thread_get_state(m_thread->MachPortNumber(), ARM_DEBUG_STATE64,
363 (thread_state_t)&m_state.dbg, &count);
364 m_state.SetError(set, Read, kret);
365
366 return kret;
367 }
368
SetGPRState()369 kern_return_t DNBArchMachARM64::SetGPRState() {
370 int set = e_regSetGPR;
371 kern_return_t kret = ::thread_set_state(
372 m_thread->MachPortNumber(), ARM_THREAD_STATE64,
373 (thread_state_t)&m_state.context.gpr, e_regSetGPRCount);
374 m_state.SetError(set, Write,
375 kret); // Set the current write error for this register set
376 m_state.InvalidateRegisterSetState(set); // Invalidate the current register
377 // state in case registers are read
378 // back differently
379 return kret; // Return the error code
380 }
381
SetVFPState()382 kern_return_t DNBArchMachARM64::SetVFPState() {
383 int set = e_regSetVFP;
384 kern_return_t kret = ::thread_set_state(
385 m_thread->MachPortNumber(), ARM_NEON_STATE64,
386 (thread_state_t)&m_state.context.vfp, e_regSetVFPCount);
387 m_state.SetError(set, Write,
388 kret); // Set the current write error for this register set
389 m_state.InvalidateRegisterSetState(set); // Invalidate the current register
390 // state in case registers are read
391 // back differently
392 return kret; // Return the error code
393 }
394
SetEXCState()395 kern_return_t DNBArchMachARM64::SetEXCState() {
396 int set = e_regSetEXC;
397 kern_return_t kret = ::thread_set_state(
398 m_thread->MachPortNumber(), ARM_EXCEPTION_STATE64,
399 (thread_state_t)&m_state.context.exc, e_regSetEXCCount);
400 m_state.SetError(set, Write,
401 kret); // Set the current write error for this register set
402 m_state.InvalidateRegisterSetState(set); // Invalidate the current register
403 // state in case registers are read
404 // back differently
405 return kret; // Return the error code
406 }
407
SetDBGState(bool also_set_on_task)408 kern_return_t DNBArchMachARM64::SetDBGState(bool also_set_on_task) {
409 int set = e_regSetDBG;
410 kern_return_t kret =
411 ::thread_set_state(m_thread->MachPortNumber(), ARM_DEBUG_STATE64,
412 (thread_state_t)&m_state.dbg, e_regSetDBGCount);
413 if (also_set_on_task) {
414 kern_return_t task_kret = task_set_state(
415 m_thread->Process()->Task().TaskPort(), ARM_DEBUG_STATE64,
416 (thread_state_t)&m_state.dbg, e_regSetDBGCount);
417 if (task_kret != KERN_SUCCESS)
418 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM64::SetDBGState failed "
419 "to set debug control register state: "
420 "0x%8.8x.",
421 task_kret);
422 }
423 m_state.SetError(set, Write,
424 kret); // Set the current write error for this register set
425 m_state.InvalidateRegisterSetState(set); // Invalidate the current register
426 // state in case registers are read
427 // back differently
428
429 return kret; // Return the error code
430 }
431
ThreadWillResume()432 void DNBArchMachARM64::ThreadWillResume() {
433 // Do we need to step this thread? If so, let the mach thread tell us so.
434 if (m_thread->IsStepping()) {
435 EnableHardwareSingleStep(true);
436 }
437
438 // Disable the triggered watchpoint temporarily before we resume.
439 // Plus, we try to enable hardware single step to execute past the instruction
440 // which triggered our watchpoint.
441 if (m_watchpoint_did_occur) {
442 if (m_watchpoint_hw_index >= 0) {
443 kern_return_t kret = GetDBGState(false);
444 if (kret == KERN_SUCCESS &&
445 !IsWatchpointEnabled(m_state.dbg, m_watchpoint_hw_index)) {
446 // The watchpoint might have been disabled by the user. We don't need
447 // to do anything at all
448 // to enable hardware single stepping.
449 m_watchpoint_did_occur = false;
450 m_watchpoint_hw_index = -1;
451 return;
452 }
453
454 DisableHardwareWatchpoint(m_watchpoint_hw_index, false);
455 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM::ThreadWillResume() "
456 "DisableHardwareWatchpoint(%d) called",
457 m_watchpoint_hw_index);
458
459 // Enable hardware single step to move past the watchpoint-triggering
460 // instruction.
461 m_watchpoint_resume_single_step_enabled =
462 (EnableHardwareSingleStep(true) == KERN_SUCCESS);
463
464 // If we are not able to enable single step to move past the
465 // watchpoint-triggering instruction,
466 // at least we should reset the two watchpoint member variables so that
467 // the next time around
468 // this callback function is invoked, the enclosing logical branch is
469 // skipped.
470 if (!m_watchpoint_resume_single_step_enabled) {
471 // Reset the two watchpoint member variables.
472 m_watchpoint_did_occur = false;
473 m_watchpoint_hw_index = -1;
474 DNBLogThreadedIf(
475 LOG_WATCHPOINTS,
476 "DNBArchMachARM::ThreadWillResume() failed to enable single step");
477 } else
478 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM::ThreadWillResume() "
479 "succeeded to enable single step");
480 }
481 }
482 }
483
NotifyException(MachException::Data & exc)484 bool DNBArchMachARM64::NotifyException(MachException::Data &exc) {
485
486 switch (exc.exc_type) {
487 default:
488 break;
489 case EXC_BREAKPOINT:
490 if (exc.exc_data.size() == 2 && exc.exc_data[0] == EXC_ARM_DA_DEBUG) {
491 // The data break address is passed as exc_data[1].
492 nub_addr_t addr = exc.exc_data[1];
493 // Find the hardware index with the side effect of possibly massaging the
494 // addr to return the starting address as seen from the debugger side.
495 uint32_t hw_index = GetHardwareWatchpointHit(addr);
496
497 // One logical watchpoint was split into two watchpoint locations because
498 // it was too big. If the watchpoint exception is indicating the 2nd half
499 // of the two-parter, find the address of the 1st half and report that --
500 // that's what lldb is going to expect to see.
501 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM::NotifyException "
502 "watchpoint %d was hit on address "
503 "0x%llx",
504 hw_index, (uint64_t)addr);
505 const int num_watchpoints = NumSupportedHardwareWatchpoints();
506 for (int i = 0; i < num_watchpoints; i++) {
507 if (LoHi[i] != 0 && LoHi[i] == hw_index && LoHi[i] != i &&
508 GetWatchpointAddressByIndex(i) != INVALID_NUB_ADDRESS) {
509 addr = GetWatchpointAddressByIndex(i);
510 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM::NotifyException "
511 "It is a linked watchpoint; "
512 "rewritten to index %d addr 0x%llx",
513 LoHi[i], (uint64_t)addr);
514 }
515 }
516
517 if (hw_index != INVALID_NUB_HW_INDEX) {
518 m_watchpoint_did_occur = true;
519 m_watchpoint_hw_index = hw_index;
520 exc.exc_data[1] = addr;
521 // Piggyback the hw_index in the exc.data.
522 exc.exc_data.push_back(hw_index);
523 }
524
525 return true;
526 }
527 // detect a __builtin_debugtrap instruction pattern ("brk #0xf000")
528 // and advance the $pc past it, so that the user can continue execution.
529 // Generally speaking, this knowledge should be centralized in lldb,
530 // recognizing the builtin_trap instruction and knowing how to advance
531 // the pc past it, so that continue etc work.
532 if (exc.exc_data.size() == 2 && exc.exc_data[0] == EXC_ARM_BREAKPOINT) {
533 nub_addr_t pc = GetPC(INVALID_NUB_ADDRESS);
534 if (pc != INVALID_NUB_ADDRESS && pc > 0) {
535 DNBBreakpoint *bp =
536 m_thread->Process()->Breakpoints().FindByAddress(pc);
537 if (bp == nullptr) {
538 uint8_t insnbuf[4];
539 if (m_thread->Process()->ReadMemory(pc, 4, insnbuf) == 4) {
540 uint8_t builtin_debugtrap_insn[4] = {0x00, 0x00, 0x3e,
541 0xd4}; // brk #0xf000
542 if (memcmp(insnbuf, builtin_debugtrap_insn, 4) == 0) {
543 SetPC(pc + 4);
544 }
545 }
546 }
547 }
548 }
549 break;
550 }
551 return false;
552 }
553
ThreadDidStop()554 bool DNBArchMachARM64::ThreadDidStop() {
555 bool success = true;
556
557 m_state.InvalidateAllRegisterStates();
558
559 if (m_watchpoint_resume_single_step_enabled) {
560 // Great! We now disable the hardware single step as well as re-enable the
561 // hardware watchpoint.
562 // See also ThreadWillResume().
563 if (EnableHardwareSingleStep(false) == KERN_SUCCESS) {
564 if (m_watchpoint_did_occur && m_watchpoint_hw_index >= 0) {
565 ReenableHardwareWatchpoint(m_watchpoint_hw_index);
566 m_watchpoint_resume_single_step_enabled = false;
567 m_watchpoint_did_occur = false;
568 m_watchpoint_hw_index = -1;
569 } else {
570 DNBLogError("internal error detected: m_watchpoint_resume_step_enabled "
571 "is true but (m_watchpoint_did_occur && "
572 "m_watchpoint_hw_index >= 0) does not hold!");
573 }
574 } else {
575 DNBLogError("internal error detected: m_watchpoint_resume_step_enabled "
576 "is true but unable to disable single step!");
577 }
578 }
579
580 // Are we stepping a single instruction?
581 if (GetGPRState(true) == KERN_SUCCESS) {
582 // We are single stepping, was this the primary thread?
583 if (m_thread->IsStepping()) {
584 // This was the primary thread, we need to clear the trace
585 // bit if so.
586 success = EnableHardwareSingleStep(false) == KERN_SUCCESS;
587 } else {
588 // The MachThread will automatically restore the suspend count
589 // in ThreadDidStop(), so we don't need to do anything here if
590 // we weren't the primary thread the last time
591 }
592 }
593 return success;
594 }
595
596 // Set the single step bit in the processor status register.
EnableHardwareSingleStep(bool enable)597 kern_return_t DNBArchMachARM64::EnableHardwareSingleStep(bool enable) {
598 DNBError err;
599 DNBLogThreadedIf(LOG_STEP, "%s( enable = %d )", __FUNCTION__, enable);
600
601 err = GetGPRState(false);
602
603 if (err.Fail()) {
604 err.LogThreaded("%s: failed to read the GPR registers", __FUNCTION__);
605 return err.Status();
606 }
607
608 err = GetDBGState(false);
609
610 if (err.Fail()) {
611 err.LogThreaded("%s: failed to read the DBG registers", __FUNCTION__);
612 return err.Status();
613 }
614
615 #if defined(__LP64__)
616 uint64_t pc = arm_thread_state64_get_pc (m_state.context.gpr);
617 #else
618 uint64_t pc = m_state.context.gpr.__pc;
619 #endif
620
621 if (enable) {
622 DNBLogThreadedIf(LOG_STEP,
623 "%s: Setting MDSCR_EL1 Single Step bit at pc 0x%llx",
624 __FUNCTION__, pc);
625 m_state.dbg.__mdscr_el1 |= SS_ENABLE;
626 } else {
627 DNBLogThreadedIf(LOG_STEP,
628 "%s: Clearing MDSCR_EL1 Single Step bit at pc 0x%llx",
629 __FUNCTION__, pc);
630 m_state.dbg.__mdscr_el1 &= ~(SS_ENABLE);
631 }
632
633 return SetDBGState(false);
634 }
635
636 // return 1 if bit "BIT" is set in "value"
bit(uint32_t value,uint32_t bit)637 static inline uint32_t bit(uint32_t value, uint32_t bit) {
638 return (value >> bit) & 1u;
639 }
640
641 // return the bitfield "value[msbit:lsbit]".
bits(uint64_t value,uint32_t msbit,uint32_t lsbit)642 static inline uint64_t bits(uint64_t value, uint32_t msbit, uint32_t lsbit) {
643 assert(msbit >= lsbit);
644 uint64_t shift_left = sizeof(value) * 8 - 1 - msbit;
645 value <<=
646 shift_left; // shift anything above the msbit off of the unsigned edge
647 value >>= shift_left + lsbit; // shift it back again down to the lsbit
648 // (including undoing any shift from above)
649 return value; // return our result
650 }
651
NumSupportedHardwareWatchpoints()652 uint32_t DNBArchMachARM64::NumSupportedHardwareWatchpoints() {
653 // Set the init value to something that will let us know that we need to
654 // autodetect how many watchpoints are supported dynamically...
655 static uint32_t g_num_supported_hw_watchpoints = UINT_MAX;
656 if (g_num_supported_hw_watchpoints == UINT_MAX) {
657 // Set this to zero in case we can't tell if there are any HW breakpoints
658 g_num_supported_hw_watchpoints = 0;
659
660 size_t len;
661 uint32_t n = 0;
662 len = sizeof(n);
663 if (::sysctlbyname("hw.optional.watchpoint", &n, &len, NULL, 0) == 0) {
664 g_num_supported_hw_watchpoints = n;
665 DNBLogThreadedIf(LOG_THREAD, "hw.optional.watchpoint=%u", n);
666 } else {
667 // For AArch64 we would need to look at ID_AA64DFR0_EL1 but debugserver runs in
668 // EL0 so it can't
669 // access that reg. The kernel should have filled in the sysctls based on it
670 // though.
671 #if defined(__arm__)
672 uint32_t register_DBGDIDR;
673
674 asm("mrc p14, 0, %0, c0, c0, 0" : "=r"(register_DBGDIDR));
675 uint32_t numWRPs = bits(register_DBGDIDR, 31, 28);
676 // Zero is reserved for the WRP count, so don't increment it if it is zero
677 if (numWRPs > 0)
678 numWRPs++;
679 g_num_supported_hw_watchpoints = numWRPs;
680 DNBLogThreadedIf(LOG_THREAD,
681 "Number of supported hw watchpoints via asm(): %d",
682 g_num_supported_hw_watchpoints);
683 #endif
684 }
685 }
686 return g_num_supported_hw_watchpoints;
687 }
688
NumSupportedHardwareBreakpoints()689 uint32_t DNBArchMachARM64::NumSupportedHardwareBreakpoints() {
690 // Set the init value to something that will let us know that we need to
691 // autodetect how many breakpoints are supported dynamically...
692 static uint32_t g_num_supported_hw_breakpoints = UINT_MAX;
693 if (g_num_supported_hw_breakpoints == UINT_MAX) {
694 // Set this to zero in case we can't tell if there are any HW breakpoints
695 g_num_supported_hw_breakpoints = 0;
696
697 size_t len;
698 uint32_t n = 0;
699 len = sizeof(n);
700 if (::sysctlbyname("hw.optional.breakpoint", &n, &len, NULL, 0) == 0) {
701 g_num_supported_hw_breakpoints = n;
702 DNBLogThreadedIf(LOG_THREAD, "hw.optional.breakpoint=%u", n);
703 } else {
704 // For AArch64 we would need to look at ID_AA64DFR0_EL1 but debugserver runs in
705 // EL0 so it can't access that reg. The kernel should have filled in the
706 // sysctls based on it though.
707 #if defined(__arm__)
708 uint32_t register_DBGDIDR;
709
710 asm("mrc p14, 0, %0, c0, c0, 0" : "=r"(register_DBGDIDR));
711 uint32_t numWRPs = bits(register_DBGDIDR, 31, 28);
712 // Zero is reserved for the WRP count, so don't increment it if it is zero
713 if (numWRPs > 0)
714 numWRPs++;
715 g_num_supported_hw_breakpoints = numWRPs;
716 DNBLogThreadedIf(LOG_THREAD,
717 "Number of supported hw breakpoint via asm(): %d",
718 g_num_supported_hw_breakpoints);
719 #endif
720 }
721 }
722 return g_num_supported_hw_breakpoints;
723 }
724
EnableHardwareBreakpoint(nub_addr_t addr,nub_size_t size,bool also_set_on_task)725 uint32_t DNBArchMachARM64::EnableHardwareBreakpoint(nub_addr_t addr,
726 nub_size_t size,
727 bool also_set_on_task) {
728 DNBLogThreadedIf(LOG_WATCHPOINTS,
729 "DNBArchMachARM64::EnableHardwareBreakpoint(addr = "
730 "0x%8.8llx, size = %zu)",
731 (uint64_t)addr, size);
732
733 const uint32_t num_hw_breakpoints = NumSupportedHardwareBreakpoints();
734
735 nub_addr_t aligned_bp_address = addr;
736 uint32_t control_value = 0;
737
738 switch (size) {
739 case 2:
740 control_value = (0x3 << 5) | 7;
741 aligned_bp_address &= ~1;
742 break;
743 case 4:
744 control_value = (0xfu << 5) | 7;
745 aligned_bp_address &= ~3;
746 break;
747 };
748
749 // Read the debug state
750 kern_return_t kret = GetDBGState(false);
751 if (kret == KERN_SUCCESS) {
752 // Check to make sure we have the needed hardware support
753 uint32_t i = 0;
754
755 for (i = 0; i < num_hw_breakpoints; ++i) {
756 if ((m_state.dbg.__bcr[i] & BCR_ENABLE) == 0)
757 break; // We found an available hw breakpoint slot (in i)
758 }
759
760 // See if we found an available hw breakpoint slot above
761 if (i < num_hw_breakpoints) {
762 m_state.dbg.__bvr[i] = aligned_bp_address;
763 m_state.dbg.__bcr[i] = control_value;
764
765 DNBLogThreadedIf(LOG_WATCHPOINTS,
766 "DNBArchMachARM64::EnableHardwareBreakpoint() "
767 "adding breakpoint on address 0x%llx with control "
768 "register value 0x%x",
769 (uint64_t)m_state.dbg.__bvr[i],
770 (uint32_t)m_state.dbg.__bcr[i]);
771
772 // The kernel will set the MDE_ENABLE bit in the MDSCR_EL1 for us
773 // automatically, don't need to do it here.
774 kret = SetDBGState(also_set_on_task);
775
776 DNBLogThreadedIf(LOG_WATCHPOINTS,
777 "DNBArchMachARM64::"
778 "EnableHardwareBreakpoint() "
779 "SetDBGState() => 0x%8.8x.",
780 kret);
781
782 if (kret == KERN_SUCCESS)
783 return i;
784 } else {
785 DNBLogThreadedIf(LOG_WATCHPOINTS,
786 "DNBArchMachARM64::"
787 "EnableHardwareBreakpoint(): All "
788 "hardware resources (%u) are in use.",
789 num_hw_breakpoints);
790 }
791 }
792 return INVALID_NUB_HW_INDEX;
793 }
794
EnableHardwareWatchpoint(nub_addr_t addr,nub_size_t size,bool read,bool write,bool also_set_on_task)795 uint32_t DNBArchMachARM64::EnableHardwareWatchpoint(nub_addr_t addr,
796 nub_size_t size, bool read,
797 bool write,
798 bool also_set_on_task) {
799 DNBLogThreadedIf(LOG_WATCHPOINTS,
800 "DNBArchMachARM64::EnableHardwareWatchpoint(addr = "
801 "0x%8.8llx, size = %zu, read = %u, write = %u)",
802 (uint64_t)addr, size, read, write);
803
804 const uint32_t num_hw_watchpoints = NumSupportedHardwareWatchpoints();
805
806 // Can't watch zero bytes
807 if (size == 0)
808 return INVALID_NUB_HW_INDEX;
809
810 // We must watch for either read or write
811 if (read == false && write == false)
812 return INVALID_NUB_HW_INDEX;
813
814 // Otherwise, can't watch more than 8 bytes per WVR/WCR pair
815 if (size > 8)
816 return INVALID_NUB_HW_INDEX;
817
818 // Aarch64 watchpoints are in one of two forms: (1) 1-8 bytes, aligned to
819 // an 8 byte address, or (2) a power-of-two size region of memory; minimum
820 // 8 bytes, maximum 2GB; the starting address must be aligned to that power
821 // of two.
822 //
823 // For (1), 1-8 byte watchpoints, using the Byte Address Selector field in
824 // DBGWCR<n>.BAS. Any of the bytes may be watched, but if multiple bytes
825 // are watched, the bytes selected must be contiguous. The start address
826 // watched must be doubleword (8-byte) aligned; if the start address is
827 // word (4-byte) aligned, only 4 bytes can be watched.
828 //
829 // For (2), the MASK field in DBGWCR<n>.MASK is used.
830 //
831 // See the ARM ARM, section "Watchpoint exceptions", and more specifically,
832 // "Watchpoint data address comparisons".
833 //
834 // debugserver today only supports (1) - the Byte Address Selector 1-8 byte
835 // watchpoints that are 8-byte aligned. To support larger watchpoints,
836 // debugserver would need to interpret the mach exception when the watched
837 // region was hit, see if the address accessed lies within the subset
838 // of the power-of-two region that lldb asked us to watch (v. ARM ARM,
839 // "Determining the memory location that caused a Watchpoint exception"),
840 // and silently resume the inferior (disable watchpoint, stepi, re-enable
841 // watchpoint) if the address lies outside the region that lldb asked us
842 // to watch.
843 //
844 // Alternatively, lldb would need to be prepared for a larger region
845 // being watched than it requested, and silently resume the inferior if
846 // the accessed address is outside the region lldb wants to watch.
847
848 nub_addr_t aligned_wp_address = addr & ~0x7;
849 uint32_t addr_dword_offset = addr & 0x7;
850
851 // Do we need to split up this logical watchpoint into two hardware watchpoint
852 // registers?
853 // e.g. a watchpoint of length 4 on address 6. We need do this with
854 // one watchpoint on address 0 with bytes 6 & 7 being monitored
855 // one watchpoint on address 8 with bytes 0, 1, 2, 3 being monitored
856
857 if (addr_dword_offset + size > 8) {
858 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM64::"
859 "EnableHardwareWatchpoint(addr = "
860 "0x%8.8llx, size = %zu) needs two "
861 "hardware watchpoints slots to monitor",
862 (uint64_t)addr, size);
863 int low_watchpoint_size = 8 - addr_dword_offset;
864 int high_watchpoint_size = addr_dword_offset + size - 8;
865
866 uint32_t lo = EnableHardwareWatchpoint(addr, low_watchpoint_size, read,
867 write, also_set_on_task);
868 if (lo == INVALID_NUB_HW_INDEX)
869 return INVALID_NUB_HW_INDEX;
870 uint32_t hi =
871 EnableHardwareWatchpoint(aligned_wp_address + 8, high_watchpoint_size,
872 read, write, also_set_on_task);
873 if (hi == INVALID_NUB_HW_INDEX) {
874 DisableHardwareWatchpoint(lo, also_set_on_task);
875 return INVALID_NUB_HW_INDEX;
876 }
877 // Tag this lo->hi mapping in our database.
878 LoHi[lo] = hi;
879 return lo;
880 }
881
882 // At this point
883 // 1 aligned_wp_address is the requested address rounded down to 8-byte
884 // alignment
885 // 2 addr_dword_offset is the offset into that double word (8-byte) region
886 // that we are watching
887 // 3 size is the number of bytes within that 8-byte region that we are
888 // watching
889
890 // Set the Byte Address Selects bits DBGWCRn_EL1 bits [12:5] based on the
891 // above.
892 // The bit shift and negation operation will give us 0b11 for 2, 0b1111 for 4,
893 // etc, up to 0b11111111 for 8.
894 // then we shift those bits left by the offset into this dword that we are
895 // interested in.
896 // e.g. if we are watching bytes 4,5,6,7 in a dword we want a BAS of
897 // 0b11110000.
898 uint32_t byte_address_select = ((1 << size) - 1) << addr_dword_offset;
899
900 // Read the debug state
901 kern_return_t kret = GetDBGState(false);
902
903 if (kret == KERN_SUCCESS) {
904 // Check to make sure we have the needed hardware support
905 uint32_t i = 0;
906
907 for (i = 0; i < num_hw_watchpoints; ++i) {
908 if ((m_state.dbg.__wcr[i] & WCR_ENABLE) == 0)
909 break; // We found an available hw watchpoint slot (in i)
910 }
911
912 // See if we found an available hw watchpoint slot above
913 if (i < num_hw_watchpoints) {
914 // DumpDBGState(m_state.dbg);
915
916 // Clear any previous LoHi joined-watchpoint that may have been in use
917 LoHi[i] = 0;
918
919 // shift our Byte Address Select bits up to the correct bit range for the
920 // DBGWCRn_EL1
921 byte_address_select = byte_address_select << 5;
922
923 // Make sure bits 1:0 are clear in our address
924 m_state.dbg.__wvr[i] = aligned_wp_address; // DVA (Data Virtual Address)
925 m_state.dbg.__wcr[i] = byte_address_select | // Which bytes that follow
926 // the DVA that we will watch
927 S_USER | // Stop only in user mode
928 (read ? WCR_LOAD : 0) | // Stop on read access?
929 (write ? WCR_STORE : 0) | // Stop on write access?
930 WCR_ENABLE; // Enable this watchpoint;
931
932 DNBLogThreadedIf(
933 LOG_WATCHPOINTS, "DNBArchMachARM64::EnableHardwareWatchpoint() "
934 "adding watchpoint on address 0x%llx with control "
935 "register value 0x%x",
936 (uint64_t)m_state.dbg.__wvr[i], (uint32_t)m_state.dbg.__wcr[i]);
937
938 // The kernel will set the MDE_ENABLE bit in the MDSCR_EL1 for us
939 // automatically, don't need to do it here.
940
941 kret = SetDBGState(also_set_on_task);
942 // DumpDBGState(m_state.dbg);
943
944 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM64::"
945 "EnableHardwareWatchpoint() "
946 "SetDBGState() => 0x%8.8x.",
947 kret);
948
949 if (kret == KERN_SUCCESS)
950 return i;
951 } else {
952 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM64::"
953 "EnableHardwareWatchpoint(): All "
954 "hardware resources (%u) are in use.",
955 num_hw_watchpoints);
956 }
957 }
958 return INVALID_NUB_HW_INDEX;
959 }
960
ReenableHardwareWatchpoint(uint32_t hw_index)961 bool DNBArchMachARM64::ReenableHardwareWatchpoint(uint32_t hw_index) {
962 // If this logical watchpoint # is actually implemented using
963 // two hardware watchpoint registers, re-enable both of them.
964
965 if (hw_index < NumSupportedHardwareWatchpoints() && LoHi[hw_index]) {
966 return ReenableHardwareWatchpoint_helper(hw_index) &&
967 ReenableHardwareWatchpoint_helper(LoHi[hw_index]);
968 } else {
969 return ReenableHardwareWatchpoint_helper(hw_index);
970 }
971 }
972
ReenableHardwareWatchpoint_helper(uint32_t hw_index)973 bool DNBArchMachARM64::ReenableHardwareWatchpoint_helper(uint32_t hw_index) {
974 kern_return_t kret = GetDBGState(false);
975 if (kret != KERN_SUCCESS)
976 return false;
977
978 const uint32_t num_hw_points = NumSupportedHardwareWatchpoints();
979 if (hw_index >= num_hw_points)
980 return false;
981
982 m_state.dbg.__wvr[hw_index] = m_disabled_watchpoints[hw_index].addr;
983 m_state.dbg.__wcr[hw_index] = m_disabled_watchpoints[hw_index].control;
984
985 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM64::"
986 "EnableHardwareWatchpoint( %u ) - WVR%u = "
987 "0x%8.8llx WCR%u = 0x%8.8llx",
988 hw_index, hw_index, (uint64_t)m_state.dbg.__wvr[hw_index],
989 hw_index, (uint64_t)m_state.dbg.__wcr[hw_index]);
990
991 // The kernel will set the MDE_ENABLE bit in the MDSCR_EL1 for us
992 // automatically, don't need to do it here.
993
994 kret = SetDBGState(false);
995
996 return (kret == KERN_SUCCESS);
997 }
998
DisableHardwareWatchpoint(uint32_t hw_index,bool also_set_on_task)999 bool DNBArchMachARM64::DisableHardwareWatchpoint(uint32_t hw_index,
1000 bool also_set_on_task) {
1001 if (hw_index < NumSupportedHardwareWatchpoints() && LoHi[hw_index]) {
1002 return DisableHardwareWatchpoint_helper(hw_index, also_set_on_task) &&
1003 DisableHardwareWatchpoint_helper(LoHi[hw_index], also_set_on_task);
1004 } else {
1005 return DisableHardwareWatchpoint_helper(hw_index, also_set_on_task);
1006 }
1007 }
1008
DisableHardwareWatchpoint_helper(uint32_t hw_index,bool also_set_on_task)1009 bool DNBArchMachARM64::DisableHardwareWatchpoint_helper(uint32_t hw_index,
1010 bool also_set_on_task) {
1011 kern_return_t kret = GetDBGState(false);
1012 if (kret != KERN_SUCCESS)
1013 return false;
1014
1015 const uint32_t num_hw_points = NumSupportedHardwareWatchpoints();
1016 if (hw_index >= num_hw_points)
1017 return false;
1018
1019 m_disabled_watchpoints[hw_index].addr = m_state.dbg.__wvr[hw_index];
1020 m_disabled_watchpoints[hw_index].control = m_state.dbg.__wcr[hw_index];
1021
1022 m_state.dbg.__wcr[hw_index] &= ~((nub_addr_t)WCR_ENABLE);
1023 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM64::"
1024 "DisableHardwareWatchpoint( %u ) - WVR%u = "
1025 "0x%8.8llx WCR%u = 0x%8.8llx",
1026 hw_index, hw_index, (uint64_t)m_state.dbg.__wvr[hw_index],
1027 hw_index, (uint64_t)m_state.dbg.__wcr[hw_index]);
1028
1029 kret = SetDBGState(also_set_on_task);
1030
1031 return (kret == KERN_SUCCESS);
1032 }
1033
DisableHardwareBreakpoint(uint32_t hw_index,bool also_set_on_task)1034 bool DNBArchMachARM64::DisableHardwareBreakpoint(uint32_t hw_index,
1035 bool also_set_on_task) {
1036 kern_return_t kret = GetDBGState(false);
1037 if (kret != KERN_SUCCESS)
1038 return false;
1039
1040 const uint32_t num_hw_points = NumSupportedHardwareBreakpoints();
1041 if (hw_index >= num_hw_points)
1042 return false;
1043
1044 m_disabled_breakpoints[hw_index].addr = m_state.dbg.__bvr[hw_index];
1045 m_disabled_breakpoints[hw_index].control = m_state.dbg.__bcr[hw_index];
1046
1047 m_state.dbg.__bcr[hw_index] = 0;
1048 DNBLogThreadedIf(LOG_WATCHPOINTS,
1049 "DNBArchMachARM64::"
1050 "DisableHardwareBreakpoint( %u ) - WVR%u = "
1051 "0x%8.8llx BCR%u = 0x%8.8llx",
1052 hw_index, hw_index, (uint64_t)m_state.dbg.__bvr[hw_index],
1053 hw_index, (uint64_t)m_state.dbg.__bcr[hw_index]);
1054
1055 kret = SetDBGState(also_set_on_task);
1056
1057 return (kret == KERN_SUCCESS);
1058 }
1059
1060 // This is for checking the Byte Address Select bits in the DBRWCRn_EL1 control
1061 // register.
1062 // Returns -1 if the trailing bit patterns are not one of:
1063 // { 0b???????1, 0b??????10, 0b?????100, 0b????1000, 0b???10000, 0b??100000,
1064 // 0b?1000000, 0b10000000 }.
LowestBitSet(uint32_t val)1065 static inline int32_t LowestBitSet(uint32_t val) {
1066 for (unsigned i = 0; i < 8; ++i) {
1067 if (bit(val, i))
1068 return i;
1069 }
1070 return -1;
1071 }
1072
1073 // Iterate through the debug registers; return the index of the first watchpoint
1074 // whose address matches.
1075 // As a side effect, the starting address as understood by the debugger is
1076 // returned which could be
1077 // different from 'addr' passed as an in/out argument.
GetHardwareWatchpointHit(nub_addr_t & addr)1078 uint32_t DNBArchMachARM64::GetHardwareWatchpointHit(nub_addr_t &addr) {
1079 // Read the debug state
1080 kern_return_t kret = GetDBGState(true);
1081 // DumpDBGState(m_state.dbg);
1082 DNBLogThreadedIf(
1083 LOG_WATCHPOINTS,
1084 "DNBArchMachARM64::GetHardwareWatchpointHit() GetDBGState() => 0x%8.8x.",
1085 kret);
1086 DNBLogThreadedIf(LOG_WATCHPOINTS,
1087 "DNBArchMachARM64::GetHardwareWatchpointHit() addr = 0x%llx",
1088 (uint64_t)addr);
1089
1090 if (kret == KERN_SUCCESS) {
1091 DBG &debug_state = m_state.dbg;
1092 uint32_t i, num = NumSupportedHardwareWatchpoints();
1093 for (i = 0; i < num; ++i) {
1094 nub_addr_t wp_addr = GetWatchAddress(debug_state, i);
1095 uint32_t byte_mask = bits(debug_state.__wcr[i], 12, 5);
1096
1097 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplX86_64::"
1098 "GetHardwareWatchpointHit() slot: %u "
1099 "(addr = 0x%llx; byte_mask = 0x%x)",
1100 i, static_cast<uint64_t>(wp_addr),
1101 byte_mask);
1102
1103 if (!IsWatchpointEnabled(debug_state, i))
1104 continue;
1105
1106 if (bits(wp_addr, 48, 3) != bits(addr, 48, 3))
1107 continue;
1108
1109 // Sanity check the byte_mask
1110 uint32_t lsb = LowestBitSet(byte_mask);
1111 if (lsb < 0)
1112 continue;
1113
1114 uint64_t byte_to_match = bits(addr, 2, 0);
1115
1116 if (byte_mask & (1 << byte_to_match)) {
1117 addr = wp_addr + lsb;
1118 return i;
1119 }
1120 }
1121 }
1122 return INVALID_NUB_HW_INDEX;
1123 }
1124
GetWatchpointAddressByIndex(uint32_t hw_index)1125 nub_addr_t DNBArchMachARM64::GetWatchpointAddressByIndex(uint32_t hw_index) {
1126 kern_return_t kret = GetDBGState(true);
1127 if (kret != KERN_SUCCESS)
1128 return INVALID_NUB_ADDRESS;
1129 const uint32_t num = NumSupportedHardwareWatchpoints();
1130 if (hw_index >= num)
1131 return INVALID_NUB_ADDRESS;
1132 if (IsWatchpointEnabled(m_state.dbg, hw_index))
1133 return GetWatchAddress(m_state.dbg, hw_index);
1134 return INVALID_NUB_ADDRESS;
1135 }
1136
IsWatchpointEnabled(const DBG & debug_state,uint32_t hw_index)1137 bool DNBArchMachARM64::IsWatchpointEnabled(const DBG &debug_state,
1138 uint32_t hw_index) {
1139 // Watchpoint Control Registers, bitfield definitions
1140 // ...
1141 // Bits Value Description
1142 // [0] 0 Watchpoint disabled
1143 // 1 Watchpoint enabled.
1144 return (debug_state.__wcr[hw_index] & 1u);
1145 }
1146
GetWatchAddress(const DBG & debug_state,uint32_t hw_index)1147 nub_addr_t DNBArchMachARM64::GetWatchAddress(const DBG &debug_state,
1148 uint32_t hw_index) {
1149 // Watchpoint Value Registers, bitfield definitions
1150 // Bits Description
1151 // [31:2] Watchpoint value (word address, i.e., 4-byte aligned)
1152 // [1:0] RAZ/SBZP
1153 return bits(debug_state.__wvr[hw_index], 63, 0);
1154 }
1155
1156 // Register information definitions for 64 bit ARMv8.
1157 enum gpr_regnums {
1158 gpr_x0 = 0,
1159 gpr_x1,
1160 gpr_x2,
1161 gpr_x3,
1162 gpr_x4,
1163 gpr_x5,
1164 gpr_x6,
1165 gpr_x7,
1166 gpr_x8,
1167 gpr_x9,
1168 gpr_x10,
1169 gpr_x11,
1170 gpr_x12,
1171 gpr_x13,
1172 gpr_x14,
1173 gpr_x15,
1174 gpr_x16,
1175 gpr_x17,
1176 gpr_x18,
1177 gpr_x19,
1178 gpr_x20,
1179 gpr_x21,
1180 gpr_x22,
1181 gpr_x23,
1182 gpr_x24,
1183 gpr_x25,
1184 gpr_x26,
1185 gpr_x27,
1186 gpr_x28,
1187 gpr_fp,
1188 gpr_x29 = gpr_fp,
1189 gpr_lr,
1190 gpr_x30 = gpr_lr,
1191 gpr_sp,
1192 gpr_x31 = gpr_sp,
1193 gpr_pc,
1194 gpr_cpsr,
1195 gpr_w0,
1196 gpr_w1,
1197 gpr_w2,
1198 gpr_w3,
1199 gpr_w4,
1200 gpr_w5,
1201 gpr_w6,
1202 gpr_w7,
1203 gpr_w8,
1204 gpr_w9,
1205 gpr_w10,
1206 gpr_w11,
1207 gpr_w12,
1208 gpr_w13,
1209 gpr_w14,
1210 gpr_w15,
1211 gpr_w16,
1212 gpr_w17,
1213 gpr_w18,
1214 gpr_w19,
1215 gpr_w20,
1216 gpr_w21,
1217 gpr_w22,
1218 gpr_w23,
1219 gpr_w24,
1220 gpr_w25,
1221 gpr_w26,
1222 gpr_w27,
1223 gpr_w28
1224
1225 };
1226
1227 enum {
1228 vfp_v0 = 0,
1229 vfp_v1,
1230 vfp_v2,
1231 vfp_v3,
1232 vfp_v4,
1233 vfp_v5,
1234 vfp_v6,
1235 vfp_v7,
1236 vfp_v8,
1237 vfp_v9,
1238 vfp_v10,
1239 vfp_v11,
1240 vfp_v12,
1241 vfp_v13,
1242 vfp_v14,
1243 vfp_v15,
1244 vfp_v16,
1245 vfp_v17,
1246 vfp_v18,
1247 vfp_v19,
1248 vfp_v20,
1249 vfp_v21,
1250 vfp_v22,
1251 vfp_v23,
1252 vfp_v24,
1253 vfp_v25,
1254 vfp_v26,
1255 vfp_v27,
1256 vfp_v28,
1257 vfp_v29,
1258 vfp_v30,
1259 vfp_v31,
1260 vfp_fpsr,
1261 vfp_fpcr,
1262
1263 // lower 32 bits of the corresponding vfp_v<n> reg.
1264 vfp_s0,
1265 vfp_s1,
1266 vfp_s2,
1267 vfp_s3,
1268 vfp_s4,
1269 vfp_s5,
1270 vfp_s6,
1271 vfp_s7,
1272 vfp_s8,
1273 vfp_s9,
1274 vfp_s10,
1275 vfp_s11,
1276 vfp_s12,
1277 vfp_s13,
1278 vfp_s14,
1279 vfp_s15,
1280 vfp_s16,
1281 vfp_s17,
1282 vfp_s18,
1283 vfp_s19,
1284 vfp_s20,
1285 vfp_s21,
1286 vfp_s22,
1287 vfp_s23,
1288 vfp_s24,
1289 vfp_s25,
1290 vfp_s26,
1291 vfp_s27,
1292 vfp_s28,
1293 vfp_s29,
1294 vfp_s30,
1295 vfp_s31,
1296
1297 // lower 64 bits of the corresponding vfp_v<n> reg.
1298 vfp_d0,
1299 vfp_d1,
1300 vfp_d2,
1301 vfp_d3,
1302 vfp_d4,
1303 vfp_d5,
1304 vfp_d6,
1305 vfp_d7,
1306 vfp_d8,
1307 vfp_d9,
1308 vfp_d10,
1309 vfp_d11,
1310 vfp_d12,
1311 vfp_d13,
1312 vfp_d14,
1313 vfp_d15,
1314 vfp_d16,
1315 vfp_d17,
1316 vfp_d18,
1317 vfp_d19,
1318 vfp_d20,
1319 vfp_d21,
1320 vfp_d22,
1321 vfp_d23,
1322 vfp_d24,
1323 vfp_d25,
1324 vfp_d26,
1325 vfp_d27,
1326 vfp_d28,
1327 vfp_d29,
1328 vfp_d30,
1329 vfp_d31
1330 };
1331
1332 enum { exc_far = 0, exc_esr, exc_exception };
1333
1334 // These numbers from the "DWARF for the ARM 64-bit Architecture (AArch64)"
1335 // document.
1336
1337 enum {
1338 dwarf_x0 = 0,
1339 dwarf_x1,
1340 dwarf_x2,
1341 dwarf_x3,
1342 dwarf_x4,
1343 dwarf_x5,
1344 dwarf_x6,
1345 dwarf_x7,
1346 dwarf_x8,
1347 dwarf_x9,
1348 dwarf_x10,
1349 dwarf_x11,
1350 dwarf_x12,
1351 dwarf_x13,
1352 dwarf_x14,
1353 dwarf_x15,
1354 dwarf_x16,
1355 dwarf_x17,
1356 dwarf_x18,
1357 dwarf_x19,
1358 dwarf_x20,
1359 dwarf_x21,
1360 dwarf_x22,
1361 dwarf_x23,
1362 dwarf_x24,
1363 dwarf_x25,
1364 dwarf_x26,
1365 dwarf_x27,
1366 dwarf_x28,
1367 dwarf_x29,
1368 dwarf_x30,
1369 dwarf_x31,
1370 dwarf_pc = 32,
1371 dwarf_elr_mode = 33,
1372 dwarf_fp = dwarf_x29,
1373 dwarf_lr = dwarf_x30,
1374 dwarf_sp = dwarf_x31,
1375 // 34-63 reserved
1376
1377 // V0-V31 (128 bit vector registers)
1378 dwarf_v0 = 64,
1379 dwarf_v1,
1380 dwarf_v2,
1381 dwarf_v3,
1382 dwarf_v4,
1383 dwarf_v5,
1384 dwarf_v6,
1385 dwarf_v7,
1386 dwarf_v8,
1387 dwarf_v9,
1388 dwarf_v10,
1389 dwarf_v11,
1390 dwarf_v12,
1391 dwarf_v13,
1392 dwarf_v14,
1393 dwarf_v15,
1394 dwarf_v16,
1395 dwarf_v17,
1396 dwarf_v18,
1397 dwarf_v19,
1398 dwarf_v20,
1399 dwarf_v21,
1400 dwarf_v22,
1401 dwarf_v23,
1402 dwarf_v24,
1403 dwarf_v25,
1404 dwarf_v26,
1405 dwarf_v27,
1406 dwarf_v28,
1407 dwarf_v29,
1408 dwarf_v30,
1409 dwarf_v31
1410
1411 // 96-127 reserved
1412 };
1413
1414 enum {
1415 debugserver_gpr_x0 = 0,
1416 debugserver_gpr_x1,
1417 debugserver_gpr_x2,
1418 debugserver_gpr_x3,
1419 debugserver_gpr_x4,
1420 debugserver_gpr_x5,
1421 debugserver_gpr_x6,
1422 debugserver_gpr_x7,
1423 debugserver_gpr_x8,
1424 debugserver_gpr_x9,
1425 debugserver_gpr_x10,
1426 debugserver_gpr_x11,
1427 debugserver_gpr_x12,
1428 debugserver_gpr_x13,
1429 debugserver_gpr_x14,
1430 debugserver_gpr_x15,
1431 debugserver_gpr_x16,
1432 debugserver_gpr_x17,
1433 debugserver_gpr_x18,
1434 debugserver_gpr_x19,
1435 debugserver_gpr_x20,
1436 debugserver_gpr_x21,
1437 debugserver_gpr_x22,
1438 debugserver_gpr_x23,
1439 debugserver_gpr_x24,
1440 debugserver_gpr_x25,
1441 debugserver_gpr_x26,
1442 debugserver_gpr_x27,
1443 debugserver_gpr_x28,
1444 debugserver_gpr_fp, // x29
1445 debugserver_gpr_lr, // x30
1446 debugserver_gpr_sp, // sp aka xsp
1447 debugserver_gpr_pc,
1448 debugserver_gpr_cpsr,
1449 debugserver_vfp_v0,
1450 debugserver_vfp_v1,
1451 debugserver_vfp_v2,
1452 debugserver_vfp_v3,
1453 debugserver_vfp_v4,
1454 debugserver_vfp_v5,
1455 debugserver_vfp_v6,
1456 debugserver_vfp_v7,
1457 debugserver_vfp_v8,
1458 debugserver_vfp_v9,
1459 debugserver_vfp_v10,
1460 debugserver_vfp_v11,
1461 debugserver_vfp_v12,
1462 debugserver_vfp_v13,
1463 debugserver_vfp_v14,
1464 debugserver_vfp_v15,
1465 debugserver_vfp_v16,
1466 debugserver_vfp_v17,
1467 debugserver_vfp_v18,
1468 debugserver_vfp_v19,
1469 debugserver_vfp_v20,
1470 debugserver_vfp_v21,
1471 debugserver_vfp_v22,
1472 debugserver_vfp_v23,
1473 debugserver_vfp_v24,
1474 debugserver_vfp_v25,
1475 debugserver_vfp_v26,
1476 debugserver_vfp_v27,
1477 debugserver_vfp_v28,
1478 debugserver_vfp_v29,
1479 debugserver_vfp_v30,
1480 debugserver_vfp_v31,
1481 debugserver_vfp_fpsr,
1482 debugserver_vfp_fpcr
1483 };
1484
1485 const char *g_contained_x0[]{"x0", NULL};
1486 const char *g_contained_x1[]{"x1", NULL};
1487 const char *g_contained_x2[]{"x2", NULL};
1488 const char *g_contained_x3[]{"x3", NULL};
1489 const char *g_contained_x4[]{"x4", NULL};
1490 const char *g_contained_x5[]{"x5", NULL};
1491 const char *g_contained_x6[]{"x6", NULL};
1492 const char *g_contained_x7[]{"x7", NULL};
1493 const char *g_contained_x8[]{"x8", NULL};
1494 const char *g_contained_x9[]{"x9", NULL};
1495 const char *g_contained_x10[]{"x10", NULL};
1496 const char *g_contained_x11[]{"x11", NULL};
1497 const char *g_contained_x12[]{"x12", NULL};
1498 const char *g_contained_x13[]{"x13", NULL};
1499 const char *g_contained_x14[]{"x14", NULL};
1500 const char *g_contained_x15[]{"x15", NULL};
1501 const char *g_contained_x16[]{"x16", NULL};
1502 const char *g_contained_x17[]{"x17", NULL};
1503 const char *g_contained_x18[]{"x18", NULL};
1504 const char *g_contained_x19[]{"x19", NULL};
1505 const char *g_contained_x20[]{"x20", NULL};
1506 const char *g_contained_x21[]{"x21", NULL};
1507 const char *g_contained_x22[]{"x22", NULL};
1508 const char *g_contained_x23[]{"x23", NULL};
1509 const char *g_contained_x24[]{"x24", NULL};
1510 const char *g_contained_x25[]{"x25", NULL};
1511 const char *g_contained_x26[]{"x26", NULL};
1512 const char *g_contained_x27[]{"x27", NULL};
1513 const char *g_contained_x28[]{"x28", NULL};
1514
1515 const char *g_invalidate_x0[]{"x0", "w0", NULL};
1516 const char *g_invalidate_x1[]{"x1", "w1", NULL};
1517 const char *g_invalidate_x2[]{"x2", "w2", NULL};
1518 const char *g_invalidate_x3[]{"x3", "w3", NULL};
1519 const char *g_invalidate_x4[]{"x4", "w4", NULL};
1520 const char *g_invalidate_x5[]{"x5", "w5", NULL};
1521 const char *g_invalidate_x6[]{"x6", "w6", NULL};
1522 const char *g_invalidate_x7[]{"x7", "w7", NULL};
1523 const char *g_invalidate_x8[]{"x8", "w8", NULL};
1524 const char *g_invalidate_x9[]{"x9", "w9", NULL};
1525 const char *g_invalidate_x10[]{"x10", "w10", NULL};
1526 const char *g_invalidate_x11[]{"x11", "w11", NULL};
1527 const char *g_invalidate_x12[]{"x12", "w12", NULL};
1528 const char *g_invalidate_x13[]{"x13", "w13", NULL};
1529 const char *g_invalidate_x14[]{"x14", "w14", NULL};
1530 const char *g_invalidate_x15[]{"x15", "w15", NULL};
1531 const char *g_invalidate_x16[]{"x16", "w16", NULL};
1532 const char *g_invalidate_x17[]{"x17", "w17", NULL};
1533 const char *g_invalidate_x18[]{"x18", "w18", NULL};
1534 const char *g_invalidate_x19[]{"x19", "w19", NULL};
1535 const char *g_invalidate_x20[]{"x20", "w20", NULL};
1536 const char *g_invalidate_x21[]{"x21", "w21", NULL};
1537 const char *g_invalidate_x22[]{"x22", "w22", NULL};
1538 const char *g_invalidate_x23[]{"x23", "w23", NULL};
1539 const char *g_invalidate_x24[]{"x24", "w24", NULL};
1540 const char *g_invalidate_x25[]{"x25", "w25", NULL};
1541 const char *g_invalidate_x26[]{"x26", "w26", NULL};
1542 const char *g_invalidate_x27[]{"x27", "w27", NULL};
1543 const char *g_invalidate_x28[]{"x28", "w28", NULL};
1544
1545 #define GPR_OFFSET_IDX(idx) (offsetof(DNBArchMachARM64::GPR, __x[idx]))
1546
1547 #define GPR_OFFSET_NAME(reg) (offsetof(DNBArchMachARM64::GPR, __##reg))
1548
1549 // These macros will auto define the register name, alt name, register size,
1550 // register offset, encoding, format and native register. This ensures that
1551 // the register state structures are defined correctly and have the correct
1552 // sizes and offsets.
1553 #define DEFINE_GPR_IDX(idx, reg, alt, gen) \
1554 { \
1555 e_regSetGPR, gpr_##reg, #reg, alt, Uint, Hex, 8, GPR_OFFSET_IDX(idx), \
1556 dwarf_##reg, dwarf_##reg, gen, debugserver_gpr_##reg, NULL, \
1557 g_invalidate_x##idx \
1558 }
1559 #define DEFINE_GPR_NAME(reg, alt, gen) \
1560 { \
1561 e_regSetGPR, gpr_##reg, #reg, alt, Uint, Hex, 8, GPR_OFFSET_NAME(reg), \
1562 dwarf_##reg, dwarf_##reg, gen, debugserver_gpr_##reg, NULL, NULL \
1563 }
1564 #define DEFINE_PSEUDO_GPR_IDX(idx, reg) \
1565 { \
1566 e_regSetGPR, gpr_##reg, #reg, NULL, Uint, Hex, 4, 0, INVALID_NUB_REGNUM, \
1567 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, \
1568 g_contained_x##idx, g_invalidate_x##idx \
1569 }
1570
1571 //_STRUCT_ARM_THREAD_STATE64
1572 //{
1573 // uint64_t x[29]; /* General purpose registers x0-x28 */
1574 // uint64_t fp; /* Frame pointer x29 */
1575 // uint64_t lr; /* Link register x30 */
1576 // uint64_t sp; /* Stack pointer x31 */
1577 // uint64_t pc; /* Program counter */
1578 // uint32_t cpsr; /* Current program status register */
1579 //};
1580
1581 // General purpose registers
1582 const DNBRegisterInfo DNBArchMachARM64::g_gpr_registers[] = {
1583 DEFINE_GPR_IDX(0, x0, "arg1", GENERIC_REGNUM_ARG1),
1584 DEFINE_GPR_IDX(1, x1, "arg2", GENERIC_REGNUM_ARG2),
1585 DEFINE_GPR_IDX(2, x2, "arg3", GENERIC_REGNUM_ARG3),
1586 DEFINE_GPR_IDX(3, x3, "arg4", GENERIC_REGNUM_ARG4),
1587 DEFINE_GPR_IDX(4, x4, "arg5", GENERIC_REGNUM_ARG5),
1588 DEFINE_GPR_IDX(5, x5, "arg6", GENERIC_REGNUM_ARG6),
1589 DEFINE_GPR_IDX(6, x6, "arg7", GENERIC_REGNUM_ARG7),
1590 DEFINE_GPR_IDX(7, x7, "arg8", GENERIC_REGNUM_ARG8),
1591 DEFINE_GPR_IDX(8, x8, NULL, INVALID_NUB_REGNUM),
1592 DEFINE_GPR_IDX(9, x9, NULL, INVALID_NUB_REGNUM),
1593 DEFINE_GPR_IDX(10, x10, NULL, INVALID_NUB_REGNUM),
1594 DEFINE_GPR_IDX(11, x11, NULL, INVALID_NUB_REGNUM),
1595 DEFINE_GPR_IDX(12, x12, NULL, INVALID_NUB_REGNUM),
1596 DEFINE_GPR_IDX(13, x13, NULL, INVALID_NUB_REGNUM),
1597 DEFINE_GPR_IDX(14, x14, NULL, INVALID_NUB_REGNUM),
1598 DEFINE_GPR_IDX(15, x15, NULL, INVALID_NUB_REGNUM),
1599 DEFINE_GPR_IDX(16, x16, NULL, INVALID_NUB_REGNUM),
1600 DEFINE_GPR_IDX(17, x17, NULL, INVALID_NUB_REGNUM),
1601 DEFINE_GPR_IDX(18, x18, NULL, INVALID_NUB_REGNUM),
1602 DEFINE_GPR_IDX(19, x19, NULL, INVALID_NUB_REGNUM),
1603 DEFINE_GPR_IDX(20, x20, NULL, INVALID_NUB_REGNUM),
1604 DEFINE_GPR_IDX(21, x21, NULL, INVALID_NUB_REGNUM),
1605 DEFINE_GPR_IDX(22, x22, NULL, INVALID_NUB_REGNUM),
1606 DEFINE_GPR_IDX(23, x23, NULL, INVALID_NUB_REGNUM),
1607 DEFINE_GPR_IDX(24, x24, NULL, INVALID_NUB_REGNUM),
1608 DEFINE_GPR_IDX(25, x25, NULL, INVALID_NUB_REGNUM),
1609 DEFINE_GPR_IDX(26, x26, NULL, INVALID_NUB_REGNUM),
1610 DEFINE_GPR_IDX(27, x27, NULL, INVALID_NUB_REGNUM),
1611 DEFINE_GPR_IDX(28, x28, NULL, INVALID_NUB_REGNUM),
1612 // For the G/g packet we want to show where the offset into the regctx
1613 // is for fp/lr/sp/pc, but we cannot directly access them on arm64e
1614 // devices (and therefore can't offsetof() them)) - add the offset based
1615 // on the last accessible register by hand for advertising the location
1616 // in the regctx to lldb. We'll go through the accessor functions when
1617 // we read/write them here.
1618 {
1619 e_regSetGPR, gpr_fp, "fp", "x29", Uint, Hex, 8, GPR_OFFSET_IDX(28) + 8,
1620 dwarf_fp, dwarf_fp, GENERIC_REGNUM_FP, debugserver_gpr_fp, NULL, NULL
1621 },
1622 {
1623 e_regSetGPR, gpr_lr, "lr", "x30", Uint, Hex, 8, GPR_OFFSET_IDX(28) + 16,
1624 dwarf_lr, dwarf_lr, GENERIC_REGNUM_RA, debugserver_gpr_lr, NULL, NULL
1625 },
1626 {
1627 e_regSetGPR, gpr_sp, "sp", "xsp", Uint, Hex, 8, GPR_OFFSET_IDX(28) + 24,
1628 dwarf_sp, dwarf_sp, GENERIC_REGNUM_SP, debugserver_gpr_sp, NULL, NULL
1629 },
1630 {
1631 e_regSetGPR, gpr_pc, "pc", NULL, Uint, Hex, 8, GPR_OFFSET_IDX(28) + 32,
1632 dwarf_pc, dwarf_pc, GENERIC_REGNUM_PC, debugserver_gpr_pc, NULL, NULL
1633 },
1634
1635 // in armv7 we specify that writing to the CPSR should invalidate r8-12, sp,
1636 // lr.
1637 // this should be specified for arm64 too even though debugserver is only
1638 // used for
1639 // userland debugging.
1640 {e_regSetGPR, gpr_cpsr, "cpsr", "flags", Uint, Hex, 4,
1641 GPR_OFFSET_NAME(cpsr), dwarf_elr_mode, dwarf_elr_mode, INVALID_NUB_REGNUM,
1642 debugserver_gpr_cpsr, NULL, NULL},
1643
1644 DEFINE_PSEUDO_GPR_IDX(0, w0),
1645 DEFINE_PSEUDO_GPR_IDX(1, w1),
1646 DEFINE_PSEUDO_GPR_IDX(2, w2),
1647 DEFINE_PSEUDO_GPR_IDX(3, w3),
1648 DEFINE_PSEUDO_GPR_IDX(4, w4),
1649 DEFINE_PSEUDO_GPR_IDX(5, w5),
1650 DEFINE_PSEUDO_GPR_IDX(6, w6),
1651 DEFINE_PSEUDO_GPR_IDX(7, w7),
1652 DEFINE_PSEUDO_GPR_IDX(8, w8),
1653 DEFINE_PSEUDO_GPR_IDX(9, w9),
1654 DEFINE_PSEUDO_GPR_IDX(10, w10),
1655 DEFINE_PSEUDO_GPR_IDX(11, w11),
1656 DEFINE_PSEUDO_GPR_IDX(12, w12),
1657 DEFINE_PSEUDO_GPR_IDX(13, w13),
1658 DEFINE_PSEUDO_GPR_IDX(14, w14),
1659 DEFINE_PSEUDO_GPR_IDX(15, w15),
1660 DEFINE_PSEUDO_GPR_IDX(16, w16),
1661 DEFINE_PSEUDO_GPR_IDX(17, w17),
1662 DEFINE_PSEUDO_GPR_IDX(18, w18),
1663 DEFINE_PSEUDO_GPR_IDX(19, w19),
1664 DEFINE_PSEUDO_GPR_IDX(20, w20),
1665 DEFINE_PSEUDO_GPR_IDX(21, w21),
1666 DEFINE_PSEUDO_GPR_IDX(22, w22),
1667 DEFINE_PSEUDO_GPR_IDX(23, w23),
1668 DEFINE_PSEUDO_GPR_IDX(24, w24),
1669 DEFINE_PSEUDO_GPR_IDX(25, w25),
1670 DEFINE_PSEUDO_GPR_IDX(26, w26),
1671 DEFINE_PSEUDO_GPR_IDX(27, w27),
1672 DEFINE_PSEUDO_GPR_IDX(28, w28)};
1673
1674 const char *g_contained_v0[]{"v0", NULL};
1675 const char *g_contained_v1[]{"v1", NULL};
1676 const char *g_contained_v2[]{"v2", NULL};
1677 const char *g_contained_v3[]{"v3", NULL};
1678 const char *g_contained_v4[]{"v4", NULL};
1679 const char *g_contained_v5[]{"v5", NULL};
1680 const char *g_contained_v6[]{"v6", NULL};
1681 const char *g_contained_v7[]{"v7", NULL};
1682 const char *g_contained_v8[]{"v8", NULL};
1683 const char *g_contained_v9[]{"v9", NULL};
1684 const char *g_contained_v10[]{"v10", NULL};
1685 const char *g_contained_v11[]{"v11", NULL};
1686 const char *g_contained_v12[]{"v12", NULL};
1687 const char *g_contained_v13[]{"v13", NULL};
1688 const char *g_contained_v14[]{"v14", NULL};
1689 const char *g_contained_v15[]{"v15", NULL};
1690 const char *g_contained_v16[]{"v16", NULL};
1691 const char *g_contained_v17[]{"v17", NULL};
1692 const char *g_contained_v18[]{"v18", NULL};
1693 const char *g_contained_v19[]{"v19", NULL};
1694 const char *g_contained_v20[]{"v20", NULL};
1695 const char *g_contained_v21[]{"v21", NULL};
1696 const char *g_contained_v22[]{"v22", NULL};
1697 const char *g_contained_v23[]{"v23", NULL};
1698 const char *g_contained_v24[]{"v24", NULL};
1699 const char *g_contained_v25[]{"v25", NULL};
1700 const char *g_contained_v26[]{"v26", NULL};
1701 const char *g_contained_v27[]{"v27", NULL};
1702 const char *g_contained_v28[]{"v28", NULL};
1703 const char *g_contained_v29[]{"v29", NULL};
1704 const char *g_contained_v30[]{"v30", NULL};
1705 const char *g_contained_v31[]{"v31", NULL};
1706
1707 const char *g_invalidate_v0[]{"v0", "d0", "s0", NULL};
1708 const char *g_invalidate_v1[]{"v1", "d1", "s1", NULL};
1709 const char *g_invalidate_v2[]{"v2", "d2", "s2", NULL};
1710 const char *g_invalidate_v3[]{"v3", "d3", "s3", NULL};
1711 const char *g_invalidate_v4[]{"v4", "d4", "s4", NULL};
1712 const char *g_invalidate_v5[]{"v5", "d5", "s5", NULL};
1713 const char *g_invalidate_v6[]{"v6", "d6", "s6", NULL};
1714 const char *g_invalidate_v7[]{"v7", "d7", "s7", NULL};
1715 const char *g_invalidate_v8[]{"v8", "d8", "s8", NULL};
1716 const char *g_invalidate_v9[]{"v9", "d9", "s9", NULL};
1717 const char *g_invalidate_v10[]{"v10", "d10", "s10", NULL};
1718 const char *g_invalidate_v11[]{"v11", "d11", "s11", NULL};
1719 const char *g_invalidate_v12[]{"v12", "d12", "s12", NULL};
1720 const char *g_invalidate_v13[]{"v13", "d13", "s13", NULL};
1721 const char *g_invalidate_v14[]{"v14", "d14", "s14", NULL};
1722 const char *g_invalidate_v15[]{"v15", "d15", "s15", NULL};
1723 const char *g_invalidate_v16[]{"v16", "d16", "s16", NULL};
1724 const char *g_invalidate_v17[]{"v17", "d17", "s17", NULL};
1725 const char *g_invalidate_v18[]{"v18", "d18", "s18", NULL};
1726 const char *g_invalidate_v19[]{"v19", "d19", "s19", NULL};
1727 const char *g_invalidate_v20[]{"v20", "d20", "s20", NULL};
1728 const char *g_invalidate_v21[]{"v21", "d21", "s21", NULL};
1729 const char *g_invalidate_v22[]{"v22", "d22", "s22", NULL};
1730 const char *g_invalidate_v23[]{"v23", "d23", "s23", NULL};
1731 const char *g_invalidate_v24[]{"v24", "d24", "s24", NULL};
1732 const char *g_invalidate_v25[]{"v25", "d25", "s25", NULL};
1733 const char *g_invalidate_v26[]{"v26", "d26", "s26", NULL};
1734 const char *g_invalidate_v27[]{"v27", "d27", "s27", NULL};
1735 const char *g_invalidate_v28[]{"v28", "d28", "s28", NULL};
1736 const char *g_invalidate_v29[]{"v29", "d29", "s29", NULL};
1737 const char *g_invalidate_v30[]{"v30", "d30", "s30", NULL};
1738 const char *g_invalidate_v31[]{"v31", "d31", "s31", NULL};
1739
1740 #if defined(__arm64__) || defined(__aarch64__)
1741 #define VFP_V_OFFSET_IDX(idx) \
1742 (offsetof(DNBArchMachARM64::FPU, __v) + (idx * 16) + \
1743 offsetof(DNBArchMachARM64::Context, vfp))
1744 #else
1745 #define VFP_V_OFFSET_IDX(idx) \
1746 (offsetof(DNBArchMachARM64::FPU, opaque) + (idx * 16) + \
1747 offsetof(DNBArchMachARM64::Context, vfp))
1748 #endif
1749 #define VFP_OFFSET_NAME(reg) \
1750 (offsetof(DNBArchMachARM64::FPU, reg) + \
1751 offsetof(DNBArchMachARM64::Context, vfp))
1752 #define EXC_OFFSET(reg) \
1753 (offsetof(DNBArchMachARM64::EXC, reg) + \
1754 offsetof(DNBArchMachARM64::Context, exc))
1755
1756 //#define FLOAT_FORMAT Float
1757 #define DEFINE_VFP_V_IDX(idx) \
1758 { \
1759 e_regSetVFP, vfp_v##idx, "v" #idx, "q" #idx, Vector, VectorOfUInt8, 16, \
1760 VFP_V_OFFSET_IDX(idx), INVALID_NUB_REGNUM, dwarf_v##idx, \
1761 INVALID_NUB_REGNUM, debugserver_vfp_v##idx, NULL, g_invalidate_v##idx \
1762 }
1763 #define DEFINE_PSEUDO_VFP_S_IDX(idx) \
1764 { \
1765 e_regSetVFP, vfp_s##idx, "s" #idx, NULL, IEEE754, Float, 4, 0, \
1766 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, \
1767 INVALID_NUB_REGNUM, g_contained_v##idx, g_invalidate_v##idx \
1768 }
1769 #define DEFINE_PSEUDO_VFP_D_IDX(idx) \
1770 { \
1771 e_regSetVFP, vfp_d##idx, "d" #idx, NULL, IEEE754, Float, 8, 0, \
1772 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, \
1773 INVALID_NUB_REGNUM, g_contained_v##idx, g_invalidate_v##idx \
1774 }
1775
1776 // Floating point registers
1777 const DNBRegisterInfo DNBArchMachARM64::g_vfp_registers[] = {
1778 DEFINE_VFP_V_IDX(0),
1779 DEFINE_VFP_V_IDX(1),
1780 DEFINE_VFP_V_IDX(2),
1781 DEFINE_VFP_V_IDX(3),
1782 DEFINE_VFP_V_IDX(4),
1783 DEFINE_VFP_V_IDX(5),
1784 DEFINE_VFP_V_IDX(6),
1785 DEFINE_VFP_V_IDX(7),
1786 DEFINE_VFP_V_IDX(8),
1787 DEFINE_VFP_V_IDX(9),
1788 DEFINE_VFP_V_IDX(10),
1789 DEFINE_VFP_V_IDX(11),
1790 DEFINE_VFP_V_IDX(12),
1791 DEFINE_VFP_V_IDX(13),
1792 DEFINE_VFP_V_IDX(14),
1793 DEFINE_VFP_V_IDX(15),
1794 DEFINE_VFP_V_IDX(16),
1795 DEFINE_VFP_V_IDX(17),
1796 DEFINE_VFP_V_IDX(18),
1797 DEFINE_VFP_V_IDX(19),
1798 DEFINE_VFP_V_IDX(20),
1799 DEFINE_VFP_V_IDX(21),
1800 DEFINE_VFP_V_IDX(22),
1801 DEFINE_VFP_V_IDX(23),
1802 DEFINE_VFP_V_IDX(24),
1803 DEFINE_VFP_V_IDX(25),
1804 DEFINE_VFP_V_IDX(26),
1805 DEFINE_VFP_V_IDX(27),
1806 DEFINE_VFP_V_IDX(28),
1807 DEFINE_VFP_V_IDX(29),
1808 DEFINE_VFP_V_IDX(30),
1809 DEFINE_VFP_V_IDX(31),
1810 {e_regSetVFP, vfp_fpsr, "fpsr", NULL, Uint, Hex, 4,
1811 VFP_V_OFFSET_IDX(32) + 0, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM,
1812 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL},
1813 {e_regSetVFP, vfp_fpcr, "fpcr", NULL, Uint, Hex, 4,
1814 VFP_V_OFFSET_IDX(32) + 4, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM,
1815 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL},
1816
1817 DEFINE_PSEUDO_VFP_S_IDX(0),
1818 DEFINE_PSEUDO_VFP_S_IDX(1),
1819 DEFINE_PSEUDO_VFP_S_IDX(2),
1820 DEFINE_PSEUDO_VFP_S_IDX(3),
1821 DEFINE_PSEUDO_VFP_S_IDX(4),
1822 DEFINE_PSEUDO_VFP_S_IDX(5),
1823 DEFINE_PSEUDO_VFP_S_IDX(6),
1824 DEFINE_PSEUDO_VFP_S_IDX(7),
1825 DEFINE_PSEUDO_VFP_S_IDX(8),
1826 DEFINE_PSEUDO_VFP_S_IDX(9),
1827 DEFINE_PSEUDO_VFP_S_IDX(10),
1828 DEFINE_PSEUDO_VFP_S_IDX(11),
1829 DEFINE_PSEUDO_VFP_S_IDX(12),
1830 DEFINE_PSEUDO_VFP_S_IDX(13),
1831 DEFINE_PSEUDO_VFP_S_IDX(14),
1832 DEFINE_PSEUDO_VFP_S_IDX(15),
1833 DEFINE_PSEUDO_VFP_S_IDX(16),
1834 DEFINE_PSEUDO_VFP_S_IDX(17),
1835 DEFINE_PSEUDO_VFP_S_IDX(18),
1836 DEFINE_PSEUDO_VFP_S_IDX(19),
1837 DEFINE_PSEUDO_VFP_S_IDX(20),
1838 DEFINE_PSEUDO_VFP_S_IDX(21),
1839 DEFINE_PSEUDO_VFP_S_IDX(22),
1840 DEFINE_PSEUDO_VFP_S_IDX(23),
1841 DEFINE_PSEUDO_VFP_S_IDX(24),
1842 DEFINE_PSEUDO_VFP_S_IDX(25),
1843 DEFINE_PSEUDO_VFP_S_IDX(26),
1844 DEFINE_PSEUDO_VFP_S_IDX(27),
1845 DEFINE_PSEUDO_VFP_S_IDX(28),
1846 DEFINE_PSEUDO_VFP_S_IDX(29),
1847 DEFINE_PSEUDO_VFP_S_IDX(30),
1848 DEFINE_PSEUDO_VFP_S_IDX(31),
1849
1850 DEFINE_PSEUDO_VFP_D_IDX(0),
1851 DEFINE_PSEUDO_VFP_D_IDX(1),
1852 DEFINE_PSEUDO_VFP_D_IDX(2),
1853 DEFINE_PSEUDO_VFP_D_IDX(3),
1854 DEFINE_PSEUDO_VFP_D_IDX(4),
1855 DEFINE_PSEUDO_VFP_D_IDX(5),
1856 DEFINE_PSEUDO_VFP_D_IDX(6),
1857 DEFINE_PSEUDO_VFP_D_IDX(7),
1858 DEFINE_PSEUDO_VFP_D_IDX(8),
1859 DEFINE_PSEUDO_VFP_D_IDX(9),
1860 DEFINE_PSEUDO_VFP_D_IDX(10),
1861 DEFINE_PSEUDO_VFP_D_IDX(11),
1862 DEFINE_PSEUDO_VFP_D_IDX(12),
1863 DEFINE_PSEUDO_VFP_D_IDX(13),
1864 DEFINE_PSEUDO_VFP_D_IDX(14),
1865 DEFINE_PSEUDO_VFP_D_IDX(15),
1866 DEFINE_PSEUDO_VFP_D_IDX(16),
1867 DEFINE_PSEUDO_VFP_D_IDX(17),
1868 DEFINE_PSEUDO_VFP_D_IDX(18),
1869 DEFINE_PSEUDO_VFP_D_IDX(19),
1870 DEFINE_PSEUDO_VFP_D_IDX(20),
1871 DEFINE_PSEUDO_VFP_D_IDX(21),
1872 DEFINE_PSEUDO_VFP_D_IDX(22),
1873 DEFINE_PSEUDO_VFP_D_IDX(23),
1874 DEFINE_PSEUDO_VFP_D_IDX(24),
1875 DEFINE_PSEUDO_VFP_D_IDX(25),
1876 DEFINE_PSEUDO_VFP_D_IDX(26),
1877 DEFINE_PSEUDO_VFP_D_IDX(27),
1878 DEFINE_PSEUDO_VFP_D_IDX(28),
1879 DEFINE_PSEUDO_VFP_D_IDX(29),
1880 DEFINE_PSEUDO_VFP_D_IDX(30),
1881 DEFINE_PSEUDO_VFP_D_IDX(31)
1882
1883 };
1884
1885 //_STRUCT_ARM_EXCEPTION_STATE64
1886 //{
1887 // uint64_t far; /* Virtual Fault Address */
1888 // uint32_t esr; /* Exception syndrome */
1889 // uint32_t exception; /* number of arm exception taken */
1890 //};
1891
1892 // Exception registers
1893 const DNBRegisterInfo DNBArchMachARM64::g_exc_registers[] = {
1894 {e_regSetEXC, exc_far, "far", NULL, Uint, Hex, 8, EXC_OFFSET(__far),
1895 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM,
1896 INVALID_NUB_REGNUM, NULL, NULL},
1897 {e_regSetEXC, exc_esr, "esr", NULL, Uint, Hex, 4, EXC_OFFSET(__esr),
1898 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM,
1899 INVALID_NUB_REGNUM, NULL, NULL},
1900 {e_regSetEXC, exc_exception, "exception", NULL, Uint, Hex, 4,
1901 EXC_OFFSET(__exception), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM,
1902 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL}};
1903
1904 // Number of registers in each register set
1905 const size_t DNBArchMachARM64::k_num_gpr_registers =
1906 sizeof(g_gpr_registers) / sizeof(DNBRegisterInfo);
1907 const size_t DNBArchMachARM64::k_num_vfp_registers =
1908 sizeof(g_vfp_registers) / sizeof(DNBRegisterInfo);
1909 const size_t DNBArchMachARM64::k_num_exc_registers =
1910 sizeof(g_exc_registers) / sizeof(DNBRegisterInfo);
1911 const size_t DNBArchMachARM64::k_num_all_registers =
1912 k_num_gpr_registers + k_num_vfp_registers + k_num_exc_registers;
1913
1914 // Register set definitions. The first definitions at register set index
1915 // of zero is for all registers, followed by other registers sets. The
1916 // register information for the all register set need not be filled in.
1917 const DNBRegisterSetInfo DNBArchMachARM64::g_reg_sets[] = {
1918 {"ARM64 Registers", NULL, k_num_all_registers},
1919 {"General Purpose Registers", g_gpr_registers, k_num_gpr_registers},
1920 {"Floating Point Registers", g_vfp_registers, k_num_vfp_registers},
1921 {"Exception State Registers", g_exc_registers, k_num_exc_registers}};
1922 // Total number of register sets for this architecture
1923 const size_t DNBArchMachARM64::k_num_register_sets =
1924 sizeof(g_reg_sets) / sizeof(DNBRegisterSetInfo);
1925
1926 const DNBRegisterSetInfo *
GetRegisterSetInfo(nub_size_t * num_reg_sets)1927 DNBArchMachARM64::GetRegisterSetInfo(nub_size_t *num_reg_sets) {
1928 *num_reg_sets = k_num_register_sets;
1929 return g_reg_sets;
1930 }
1931
FixGenericRegisterNumber(uint32_t & set,uint32_t & reg)1932 bool DNBArchMachARM64::FixGenericRegisterNumber(uint32_t &set, uint32_t ®) {
1933 if (set == REGISTER_SET_GENERIC) {
1934 switch (reg) {
1935 case GENERIC_REGNUM_PC: // Program Counter
1936 set = e_regSetGPR;
1937 reg = gpr_pc;
1938 break;
1939
1940 case GENERIC_REGNUM_SP: // Stack Pointer
1941 set = e_regSetGPR;
1942 reg = gpr_sp;
1943 break;
1944
1945 case GENERIC_REGNUM_FP: // Frame Pointer
1946 set = e_regSetGPR;
1947 reg = gpr_fp;
1948 break;
1949
1950 case GENERIC_REGNUM_RA: // Return Address
1951 set = e_regSetGPR;
1952 reg = gpr_lr;
1953 break;
1954
1955 case GENERIC_REGNUM_FLAGS: // Processor flags register
1956 set = e_regSetGPR;
1957 reg = gpr_cpsr;
1958 break;
1959
1960 case GENERIC_REGNUM_ARG1:
1961 case GENERIC_REGNUM_ARG2:
1962 case GENERIC_REGNUM_ARG3:
1963 case GENERIC_REGNUM_ARG4:
1964 case GENERIC_REGNUM_ARG5:
1965 case GENERIC_REGNUM_ARG6:
1966 set = e_regSetGPR;
1967 reg = gpr_x0 + reg - GENERIC_REGNUM_ARG1;
1968 break;
1969
1970 default:
1971 return false;
1972 }
1973 }
1974 return true;
1975 }
GetRegisterValue(uint32_t set,uint32_t reg,DNBRegisterValue * value)1976 bool DNBArchMachARM64::GetRegisterValue(uint32_t set, uint32_t reg,
1977 DNBRegisterValue *value) {
1978 if (!FixGenericRegisterNumber(set, reg))
1979 return false;
1980
1981 if (GetRegisterState(set, false) != KERN_SUCCESS)
1982 return false;
1983
1984 const DNBRegisterInfo *regInfo = m_thread->GetRegisterInfo(set, reg);
1985 if (regInfo) {
1986 value->info = *regInfo;
1987 switch (set) {
1988 case e_regSetGPR:
1989 if (reg <= gpr_pc) {
1990 #if defined(__LP64__)
1991 if (reg == gpr_pc)
1992 value->value.uint64 = arm_thread_state64_get_pc (m_state.context.gpr);
1993 else if (reg == gpr_lr)
1994 value->value.uint64 = arm_thread_state64_get_lr (m_state.context.gpr);
1995 else if (reg == gpr_sp)
1996 value->value.uint64 = arm_thread_state64_get_sp (m_state.context.gpr);
1997 else if (reg == gpr_fp)
1998 value->value.uint64 = arm_thread_state64_get_fp (m_state.context.gpr);
1999 else
2000 value->value.uint64 = m_state.context.gpr.__x[reg];
2001 #else
2002 value->value.uint64 = m_state.context.gpr.__x[reg];
2003 #endif
2004 return true;
2005 } else if (reg == gpr_cpsr) {
2006 value->value.uint32 = m_state.context.gpr.__cpsr;
2007 return true;
2008 }
2009 break;
2010
2011 case e_regSetVFP:
2012
2013 if (reg >= vfp_v0 && reg <= vfp_v31) {
2014 #if defined(__arm64__) || defined(__aarch64__)
2015 memcpy(&value->value.v_uint8, &m_state.context.vfp.__v[reg - vfp_v0],
2016 16);
2017 #else
2018 memcpy(&value->value.v_uint8,
2019 ((uint8_t *)&m_state.context.vfp.opaque) + ((reg - vfp_v0) * 16),
2020 16);
2021 #endif
2022 return true;
2023 } else if (reg == vfp_fpsr) {
2024 #if defined(__arm64__) || defined(__aarch64__)
2025 memcpy(&value->value.uint32, &m_state.context.vfp.__fpsr, 4);
2026 #else
2027 memcpy(&value->value.uint32,
2028 ((uint8_t *)&m_state.context.vfp.opaque) + (32 * 16) + 0, 4);
2029 #endif
2030 return true;
2031 } else if (reg == vfp_fpcr) {
2032 #if defined(__arm64__) || defined(__aarch64__)
2033 memcpy(&value->value.uint32, &m_state.context.vfp.__fpcr, 4);
2034 #else
2035 memcpy(&value->value.uint32,
2036 ((uint8_t *)&m_state.context.vfp.opaque) + (32 * 16) + 4, 4);
2037 #endif
2038 return true;
2039 } else if (reg >= vfp_s0 && reg <= vfp_s31) {
2040 #if defined(__arm64__) || defined(__aarch64__)
2041 memcpy(&value->value.v_uint8, &m_state.context.vfp.__v[reg - vfp_s0],
2042 4);
2043 #else
2044 memcpy(&value->value.v_uint8,
2045 ((uint8_t *)&m_state.context.vfp.opaque) + ((reg - vfp_s0) * 16),
2046 4);
2047 #endif
2048 return true;
2049 } else if (reg >= vfp_d0 && reg <= vfp_d31) {
2050 #if defined(__arm64__) || defined(__aarch64__)
2051 memcpy(&value->value.v_uint8, &m_state.context.vfp.__v[reg - vfp_d0],
2052 8);
2053 #else
2054 memcpy(&value->value.v_uint8,
2055 ((uint8_t *)&m_state.context.vfp.opaque) + ((reg - vfp_d0) * 16),
2056 8);
2057 #endif
2058 return true;
2059 }
2060 break;
2061
2062 case e_regSetEXC:
2063 if (reg == exc_far) {
2064 value->value.uint64 = m_state.context.exc.__far;
2065 return true;
2066 } else if (reg == exc_esr) {
2067 value->value.uint32 = m_state.context.exc.__esr;
2068 return true;
2069 } else if (reg == exc_exception) {
2070 value->value.uint32 = m_state.context.exc.__exception;
2071 return true;
2072 }
2073 break;
2074 }
2075 }
2076 return false;
2077 }
2078
SetRegisterValue(uint32_t set,uint32_t reg,const DNBRegisterValue * value)2079 bool DNBArchMachARM64::SetRegisterValue(uint32_t set, uint32_t reg,
2080 const DNBRegisterValue *value) {
2081 if (!FixGenericRegisterNumber(set, reg))
2082 return false;
2083
2084 if (GetRegisterState(set, false) != KERN_SUCCESS)
2085 return false;
2086
2087 bool success = false;
2088 const DNBRegisterInfo *regInfo = m_thread->GetRegisterInfo(set, reg);
2089 if (regInfo) {
2090 switch (set) {
2091 case e_regSetGPR:
2092 if (reg <= gpr_pc) {
2093 #if defined(__LP64__)
2094 uint64_t signed_value = value->value.uint64;
2095 #if __has_feature(ptrauth_calls)
2096 // The incoming value could be garbage. Strip it to avoid
2097 // trapping when it gets resigned in the thread state.
2098 signed_value = (uint64_t) ptrauth_strip((void*) signed_value, ptrauth_key_function_pointer);
2099 signed_value = (uint64_t) ptrauth_sign_unauthenticated((void*) signed_value, ptrauth_key_function_pointer, 0);
2100 #endif
2101 if (reg == gpr_pc)
2102 arm_thread_state64_set_pc_fptr (m_state.context.gpr, (void*) signed_value);
2103 else if (reg == gpr_lr)
2104 arm_thread_state64_set_lr_fptr (m_state.context.gpr, (void*) signed_value);
2105 else if (reg == gpr_sp)
2106 arm_thread_state64_set_sp (m_state.context.gpr, value->value.uint64);
2107 else if (reg == gpr_fp)
2108 arm_thread_state64_set_fp (m_state.context.gpr, value->value.uint64);
2109 else
2110 m_state.context.gpr.__x[reg] = value->value.uint64;
2111 #else
2112 m_state.context.gpr.__x[reg] = value->value.uint64;
2113 #endif
2114 success = true;
2115 } else if (reg == gpr_cpsr) {
2116 m_state.context.gpr.__cpsr = value->value.uint32;
2117 success = true;
2118 }
2119 break;
2120
2121 case e_regSetVFP:
2122 if (reg >= vfp_v0 && reg <= vfp_v31) {
2123 #if defined(__arm64__) || defined(__aarch64__)
2124 memcpy(&m_state.context.vfp.__v[reg - vfp_v0], &value->value.v_uint8,
2125 16);
2126 #else
2127 memcpy(((uint8_t *)&m_state.context.vfp.opaque) + ((reg - vfp_v0) * 16),
2128 &value->value.v_uint8, 16);
2129 #endif
2130 success = true;
2131 } else if (reg == vfp_fpsr) {
2132 #if defined(__arm64__) || defined(__aarch64__)
2133 memcpy(&m_state.context.vfp.__fpsr, &value->value.uint32, 4);
2134 #else
2135 memcpy(((uint8_t *)&m_state.context.vfp.opaque) + (32 * 16) + 0,
2136 &value->value.uint32, 4);
2137 #endif
2138 success = true;
2139 } else if (reg == vfp_fpcr) {
2140 #if defined(__arm64__) || defined(__aarch64__)
2141 memcpy(&m_state.context.vfp.__fpcr, &value->value.uint32, 4);
2142 #else
2143 memcpy(((uint8_t *)m_state.context.vfp.opaque) + (32 * 16) + 4,
2144 &value->value.uint32, 4);
2145 #endif
2146 success = true;
2147 } else if (reg >= vfp_s0 && reg <= vfp_s31) {
2148 #if defined(__arm64__) || defined(__aarch64__)
2149 memcpy(&m_state.context.vfp.__v[reg - vfp_s0], &value->value.v_uint8,
2150 4);
2151 #else
2152 memcpy(((uint8_t *)&m_state.context.vfp.opaque) + ((reg - vfp_s0) * 16),
2153 &value->value.v_uint8, 4);
2154 #endif
2155 success = true;
2156 } else if (reg >= vfp_d0 && reg <= vfp_d31) {
2157 #if defined(__arm64__) || defined(__aarch64__)
2158 memcpy(&m_state.context.vfp.__v[reg - vfp_d0], &value->value.v_uint8,
2159 8);
2160 #else
2161 memcpy(((uint8_t *)&m_state.context.vfp.opaque) + ((reg - vfp_d0) * 16),
2162 &value->value.v_uint8, 8);
2163 #endif
2164 success = true;
2165 }
2166 break;
2167
2168 case e_regSetEXC:
2169 if (reg == exc_far) {
2170 m_state.context.exc.__far = value->value.uint64;
2171 success = true;
2172 } else if (reg == exc_esr) {
2173 m_state.context.exc.__esr = value->value.uint32;
2174 success = true;
2175 } else if (reg == exc_exception) {
2176 m_state.context.exc.__exception = value->value.uint32;
2177 success = true;
2178 }
2179 break;
2180 }
2181 }
2182 if (success)
2183 return SetRegisterState(set) == KERN_SUCCESS;
2184 return false;
2185 }
2186
GetRegisterState(int set,bool force)2187 kern_return_t DNBArchMachARM64::GetRegisterState(int set, bool force) {
2188 switch (set) {
2189 case e_regSetALL:
2190 return GetGPRState(force) | GetVFPState(force) | GetEXCState(force) |
2191 GetDBGState(force);
2192 case e_regSetGPR:
2193 return GetGPRState(force);
2194 case e_regSetVFP:
2195 return GetVFPState(force);
2196 case e_regSetEXC:
2197 return GetEXCState(force);
2198 case e_regSetDBG:
2199 return GetDBGState(force);
2200 default:
2201 break;
2202 }
2203 return KERN_INVALID_ARGUMENT;
2204 }
2205
SetRegisterState(int set)2206 kern_return_t DNBArchMachARM64::SetRegisterState(int set) {
2207 // Make sure we have a valid context to set.
2208 kern_return_t err = GetRegisterState(set, false);
2209 if (err != KERN_SUCCESS)
2210 return err;
2211
2212 switch (set) {
2213 case e_regSetALL:
2214 return SetGPRState() | SetVFPState() | SetEXCState() | SetDBGState(false);
2215 case e_regSetGPR:
2216 return SetGPRState();
2217 case e_regSetVFP:
2218 return SetVFPState();
2219 case e_regSetEXC:
2220 return SetEXCState();
2221 case e_regSetDBG:
2222 return SetDBGState(false);
2223 default:
2224 break;
2225 }
2226 return KERN_INVALID_ARGUMENT;
2227 }
2228
RegisterSetStateIsValid(int set) const2229 bool DNBArchMachARM64::RegisterSetStateIsValid(int set) const {
2230 return m_state.RegsAreValid(set);
2231 }
2232
GetRegisterContext(void * buf,nub_size_t buf_len)2233 nub_size_t DNBArchMachARM64::GetRegisterContext(void *buf, nub_size_t buf_len) {
2234 nub_size_t size = sizeof(m_state.context.gpr) + sizeof(m_state.context.vfp) +
2235 sizeof(m_state.context.exc);
2236
2237 if (buf && buf_len) {
2238 if (size > buf_len)
2239 size = buf_len;
2240
2241 bool force = false;
2242 if (GetGPRState(force) | GetVFPState(force) | GetEXCState(force))
2243 return 0;
2244
2245 // Copy each struct individually to avoid any padding that might be between
2246 // the structs in m_state.context
2247 uint8_t *p = (uint8_t *)buf;
2248 ::memcpy(p, &m_state.context.gpr, sizeof(m_state.context.gpr));
2249 p += sizeof(m_state.context.gpr);
2250 ::memcpy(p, &m_state.context.vfp, sizeof(m_state.context.vfp));
2251 p += sizeof(m_state.context.vfp);
2252 ::memcpy(p, &m_state.context.exc, sizeof(m_state.context.exc));
2253 p += sizeof(m_state.context.exc);
2254
2255 size_t bytes_written = p - (uint8_t *)buf;
2256 UNUSED_IF_ASSERT_DISABLED(bytes_written);
2257 assert(bytes_written == size);
2258 }
2259 DNBLogThreadedIf(
2260 LOG_THREAD,
2261 "DNBArchMachARM64::GetRegisterContext (buf = %p, len = %zu) => %zu", buf,
2262 buf_len, size);
2263 // Return the size of the register context even if NULL was passed in
2264 return size;
2265 }
2266
SetRegisterContext(const void * buf,nub_size_t buf_len)2267 nub_size_t DNBArchMachARM64::SetRegisterContext(const void *buf,
2268 nub_size_t buf_len) {
2269 nub_size_t size = sizeof(m_state.context.gpr) + sizeof(m_state.context.vfp) +
2270 sizeof(m_state.context.exc);
2271
2272 if (buf == NULL || buf_len == 0)
2273 size = 0;
2274
2275 if (size) {
2276 if (size > buf_len)
2277 size = buf_len;
2278
2279 // Copy each struct individually to avoid any padding that might be between
2280 // the structs in m_state.context
2281 uint8_t *p = (uint8_t *)buf;
2282 ::memcpy(&m_state.context.gpr, p, sizeof(m_state.context.gpr));
2283 p += sizeof(m_state.context.gpr);
2284 ::memcpy(&m_state.context.vfp, p, sizeof(m_state.context.vfp));
2285 p += sizeof(m_state.context.vfp);
2286 ::memcpy(&m_state.context.exc, p, sizeof(m_state.context.exc));
2287 p += sizeof(m_state.context.exc);
2288
2289 size_t bytes_written = p - (uint8_t *)buf;
2290 UNUSED_IF_ASSERT_DISABLED(bytes_written);
2291 assert(bytes_written == size);
2292 SetGPRState();
2293 SetVFPState();
2294 SetEXCState();
2295 }
2296 DNBLogThreadedIf(
2297 LOG_THREAD,
2298 "DNBArchMachARM64::SetRegisterContext (buf = %p, len = %zu) => %zu", buf,
2299 buf_len, size);
2300 return size;
2301 }
2302
SaveRegisterState()2303 uint32_t DNBArchMachARM64::SaveRegisterState() {
2304 kern_return_t kret = ::thread_abort_safely(m_thread->MachPortNumber());
2305 DNBLogThreadedIf(
2306 LOG_THREAD, "thread = 0x%4.4x calling thread_abort_safely (tid) => %u "
2307 "(SetGPRState() for stop_count = %u)",
2308 m_thread->MachPortNumber(), kret, m_thread->Process()->StopCount());
2309
2310 // Always re-read the registers because above we call thread_abort_safely();
2311 bool force = true;
2312
2313 if ((kret = GetGPRState(force)) != KERN_SUCCESS) {
2314 DNBLogThreadedIf(LOG_THREAD, "DNBArchMachARM64::SaveRegisterState () "
2315 "error: GPR regs failed to read: %u ",
2316 kret);
2317 } else if ((kret = GetVFPState(force)) != KERN_SUCCESS) {
2318 DNBLogThreadedIf(LOG_THREAD, "DNBArchMachARM64::SaveRegisterState () "
2319 "error: %s regs failed to read: %u",
2320 "VFP", kret);
2321 } else {
2322 const uint32_t save_id = GetNextRegisterStateSaveID();
2323 m_saved_register_states[save_id] = m_state.context;
2324 return save_id;
2325 }
2326 return UINT32_MAX;
2327 }
2328
RestoreRegisterState(uint32_t save_id)2329 bool DNBArchMachARM64::RestoreRegisterState(uint32_t save_id) {
2330 SaveRegisterStates::iterator pos = m_saved_register_states.find(save_id);
2331 if (pos != m_saved_register_states.end()) {
2332 m_state.context.gpr = pos->second.gpr;
2333 m_state.context.vfp = pos->second.vfp;
2334 kern_return_t kret;
2335 bool success = true;
2336 if ((kret = SetGPRState()) != KERN_SUCCESS) {
2337 DNBLogThreadedIf(LOG_THREAD, "DNBArchMachARM64::RestoreRegisterState "
2338 "(save_id = %u) error: GPR regs failed to "
2339 "write: %u",
2340 save_id, kret);
2341 success = false;
2342 } else if ((kret = SetVFPState()) != KERN_SUCCESS) {
2343 DNBLogThreadedIf(LOG_THREAD, "DNBArchMachARM64::RestoreRegisterState "
2344 "(save_id = %u) error: %s regs failed to "
2345 "write: %u",
2346 save_id, "VFP", kret);
2347 success = false;
2348 }
2349 m_saved_register_states.erase(pos);
2350 return success;
2351 }
2352 return false;
2353 }
2354
2355 #endif // #if defined (ARM_THREAD_STATE64_COUNT)
2356 #endif // #if defined (__arm__) || defined (__arm64__) || defined (__aarch64__)
2357