1 //===-- MachThreadList.cpp --------------------------------------*- C++ -*-===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // Created by Greg Clayton on 6/19/07.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "MachThreadList.h"
15
16 #include <inttypes.h>
17 #include <sys/sysctl.h>
18
19 #include "DNBLog.h"
20 #include "DNBThreadResumeActions.h"
21 #include "MachProcess.h"
22
MachThreadList()23 MachThreadList::MachThreadList() :
24 m_threads(),
25 m_threads_mutex(PTHREAD_MUTEX_RECURSIVE)
26 {
27 }
28
~MachThreadList()29 MachThreadList::~MachThreadList()
30 {
31 }
32
33 nub_state_t
GetState(nub_thread_t tid)34 MachThreadList::GetState(nub_thread_t tid)
35 {
36 MachThreadSP thread_sp (GetThreadByID (tid));
37 if (thread_sp)
38 return thread_sp->GetState();
39 return eStateInvalid;
40 }
41
42 const char *
GetName(nub_thread_t tid)43 MachThreadList::GetName (nub_thread_t tid)
44 {
45 MachThreadSP thread_sp (GetThreadByID (tid));
46 if (thread_sp)
47 return thread_sp->GetName();
48 return NULL;
49 }
50
51 nub_thread_t
SetCurrentThread(nub_thread_t tid)52 MachThreadList::SetCurrentThread(nub_thread_t tid)
53 {
54 MachThreadSP thread_sp (GetThreadByID (tid));
55 if (thread_sp)
56 {
57 m_current_thread = thread_sp;
58 return tid;
59 }
60 return INVALID_NUB_THREAD;
61 }
62
63
64 bool
GetThreadStoppedReason(nub_thread_t tid,struct DNBThreadStopInfo * stop_info) const65 MachThreadList::GetThreadStoppedReason(nub_thread_t tid, struct DNBThreadStopInfo *stop_info) const
66 {
67 MachThreadSP thread_sp (GetThreadByID (tid));
68 if (thread_sp)
69 return thread_sp->GetStopException().GetStopInfo(stop_info);
70 return false;
71 }
72
73 bool
GetIdentifierInfo(nub_thread_t tid,thread_identifier_info_data_t * ident_info)74 MachThreadList::GetIdentifierInfo (nub_thread_t tid, thread_identifier_info_data_t *ident_info)
75 {
76 thread_t mach_port_number = GetMachPortNumberByThreadID (tid);
77
78 mach_msg_type_number_t count = THREAD_IDENTIFIER_INFO_COUNT;
79 return ::thread_info (mach_port_number, THREAD_IDENTIFIER_INFO, (thread_info_t)ident_info, &count) == KERN_SUCCESS;
80 }
81
82 void
DumpThreadStoppedReason(nub_thread_t tid) const83 MachThreadList::DumpThreadStoppedReason (nub_thread_t tid) const
84 {
85 MachThreadSP thread_sp (GetThreadByID (tid));
86 if (thread_sp)
87 thread_sp->GetStopException().DumpStopReason();
88 }
89
90 const char *
GetThreadInfo(nub_thread_t tid) const91 MachThreadList::GetThreadInfo (nub_thread_t tid) const
92 {
93 MachThreadSP thread_sp (GetThreadByID (tid));
94 if (thread_sp)
95 return thread_sp->GetBasicInfoAsString();
96 return NULL;
97 }
98
99 MachThreadSP
GetThreadByID(nub_thread_t tid) const100 MachThreadList::GetThreadByID (nub_thread_t tid) const
101 {
102 PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex);
103 MachThreadSP thread_sp;
104 const size_t num_threads = m_threads.size();
105 for (size_t idx = 0; idx < num_threads; ++idx)
106 {
107 if (m_threads[idx]->ThreadID() == tid)
108 {
109 thread_sp = m_threads[idx];
110 break;
111 }
112 }
113 return thread_sp;
114 }
115
116 MachThreadSP
GetThreadByMachPortNumber(thread_t mach_port_number) const117 MachThreadList::GetThreadByMachPortNumber (thread_t mach_port_number) const
118 {
119 PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex);
120 MachThreadSP thread_sp;
121 const size_t num_threads = m_threads.size();
122 for (size_t idx = 0; idx < num_threads; ++idx)
123 {
124 if (m_threads[idx]->MachPortNumber() == mach_port_number)
125 {
126 thread_sp = m_threads[idx];
127 break;
128 }
129 }
130 return thread_sp;
131 }
132
133 nub_thread_t
GetThreadIDByMachPortNumber(thread_t mach_port_number) const134 MachThreadList::GetThreadIDByMachPortNumber (thread_t mach_port_number) const
135 {
136 PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex);
137 MachThreadSP thread_sp;
138 const size_t num_threads = m_threads.size();
139 for (size_t idx = 0; idx < num_threads; ++idx)
140 {
141 if (m_threads[idx]->MachPortNumber() == mach_port_number)
142 {
143 return m_threads[idx]->ThreadID();
144 }
145 }
146 return INVALID_NUB_THREAD;
147 }
148
149 thread_t
GetMachPortNumberByThreadID(nub_thread_t globally_unique_id) const150 MachThreadList::GetMachPortNumberByThreadID (nub_thread_t globally_unique_id) const
151 {
152 PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex);
153 MachThreadSP thread_sp;
154 const size_t num_threads = m_threads.size();
155 for (size_t idx = 0; idx < num_threads; ++idx)
156 {
157 if (m_threads[idx]->ThreadID() == globally_unique_id)
158 {
159 return m_threads[idx]->MachPortNumber();
160 }
161 }
162 return 0;
163 }
164
165 bool
GetRegisterValue(nub_thread_t tid,uint32_t reg_set_idx,uint32_t reg_idx,DNBRegisterValue * reg_value) const166 MachThreadList::GetRegisterValue (nub_thread_t tid, uint32_t reg_set_idx, uint32_t reg_idx, DNBRegisterValue *reg_value ) const
167 {
168 MachThreadSP thread_sp (GetThreadByID (tid));
169 if (thread_sp)
170 return thread_sp->GetRegisterValue(reg_set_idx, reg_idx, reg_value);
171
172 return false;
173 }
174
175 bool
SetRegisterValue(nub_thread_t tid,uint32_t reg_set_idx,uint32_t reg_idx,const DNBRegisterValue * reg_value) const176 MachThreadList::SetRegisterValue (nub_thread_t tid, uint32_t reg_set_idx, uint32_t reg_idx, const DNBRegisterValue *reg_value ) const
177 {
178 MachThreadSP thread_sp (GetThreadByID (tid));
179 if (thread_sp)
180 return thread_sp->SetRegisterValue(reg_set_idx, reg_idx, reg_value);
181
182 return false;
183 }
184
185 nub_size_t
GetRegisterContext(nub_thread_t tid,void * buf,size_t buf_len)186 MachThreadList::GetRegisterContext (nub_thread_t tid, void *buf, size_t buf_len)
187 {
188 MachThreadSP thread_sp (GetThreadByID (tid));
189 if (thread_sp)
190 return thread_sp->GetRegisterContext (buf, buf_len);
191 return 0;
192 }
193
194 nub_size_t
SetRegisterContext(nub_thread_t tid,const void * buf,size_t buf_len)195 MachThreadList::SetRegisterContext (nub_thread_t tid, const void *buf, size_t buf_len)
196 {
197 MachThreadSP thread_sp (GetThreadByID (tid));
198 if (thread_sp)
199 return thread_sp->SetRegisterContext (buf, buf_len);
200 return 0;
201 }
202
203 nub_size_t
NumThreads() const204 MachThreadList::NumThreads () const
205 {
206 PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex);
207 return m_threads.size();
208 }
209
210 nub_thread_t
ThreadIDAtIndex(nub_size_t idx) const211 MachThreadList::ThreadIDAtIndex (nub_size_t idx) const
212 {
213 PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex);
214 if (idx < m_threads.size())
215 return m_threads[idx]->ThreadID();
216 return INVALID_NUB_THREAD;
217 }
218
219 nub_thread_t
CurrentThreadID()220 MachThreadList::CurrentThreadID ( )
221 {
222 MachThreadSP thread_sp;
223 CurrentThread(thread_sp);
224 if (thread_sp.get())
225 return thread_sp->ThreadID();
226 return INVALID_NUB_THREAD;
227 }
228
229 bool
NotifyException(MachException::Data & exc)230 MachThreadList::NotifyException(MachException::Data& exc)
231 {
232 MachThreadSP thread_sp (GetThreadByMachPortNumber (exc.thread_port));
233 if (thread_sp)
234 {
235 thread_sp->NotifyException(exc);
236 return true;
237 }
238 return false;
239 }
240
241 void
Clear()242 MachThreadList::Clear()
243 {
244 PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex);
245 m_threads.clear();
246 }
247
248 uint32_t
UpdateThreadList(MachProcess * process,bool update,MachThreadList::collection * new_threads)249 MachThreadList::UpdateThreadList(MachProcess *process, bool update, MachThreadList::collection *new_threads)
250 {
251 // locker will keep a mutex locked until it goes out of scope
252 DNBLogThreadedIf (LOG_THREAD, "MachThreadList::UpdateThreadList (pid = %4.4x, update = %u) process stop count = %u", process->ProcessID(), update, process->StopCount());
253 PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex);
254
255 #if defined (__i386__) || defined (__x86_64__)
256 if (process->StopCount() == 0)
257 {
258 int mib[4] = { CTL_KERN, KERN_PROC, KERN_PROC_PID, process->ProcessID() };
259 struct kinfo_proc processInfo;
260 size_t bufsize = sizeof(processInfo);
261 bool is_64_bit = false;
262 if (sysctl(mib, (unsigned)(sizeof(mib)/sizeof(int)), &processInfo, &bufsize, NULL, 0) == 0 && bufsize > 0)
263 {
264 if (processInfo.kp_proc.p_flag & P_LP64)
265 is_64_bit = true;
266 }
267 if (is_64_bit)
268 DNBArchProtocol::SetArchitecture(CPU_TYPE_X86_64);
269 else
270 DNBArchProtocol::SetArchitecture(CPU_TYPE_I386);
271 }
272 #endif
273
274 if (m_threads.empty() || update)
275 {
276 thread_array_t thread_list = NULL;
277 mach_msg_type_number_t thread_list_count = 0;
278 task_t task = process->Task().TaskPort();
279 DNBError err(::task_threads (task, &thread_list, &thread_list_count), DNBError::MachKernel);
280
281 if (DNBLogCheckLogBit(LOG_THREAD) || err.Fail())
282 err.LogThreaded("::task_threads ( task = 0x%4.4x, thread_list => %p, thread_list_count => %u )", task, thread_list, thread_list_count);
283
284 if (err.Error() == KERN_SUCCESS && thread_list_count > 0)
285 {
286 MachThreadList::collection currThreads;
287 size_t idx;
288 // Iterator through the current thread list and see which threads
289 // we already have in our list (keep them), which ones we don't
290 // (add them), and which ones are not around anymore (remove them).
291 for (idx = 0; idx < thread_list_count; ++idx)
292 {
293 const thread_t mach_port_num = thread_list[idx];
294
295 uint64_t unique_thread_id = MachThread::GetGloballyUniqueThreadIDForMachPortID (mach_port_num);
296 MachThreadSP thread_sp (GetThreadByID (unique_thread_id));
297 if (thread_sp)
298 {
299 // Keep the existing thread class
300 currThreads.push_back(thread_sp);
301 }
302 else
303 {
304 // We don't have this thread, lets add it.
305 thread_sp.reset(new MachThread(process, unique_thread_id, mach_port_num));
306
307 // Add the new thread regardless of its is user ready state...
308 // Make sure the thread is ready to be displayed and shown to users
309 // before we add this thread to our list...
310 if (thread_sp->IsUserReady())
311 {
312 if (new_threads)
313 new_threads->push_back(thread_sp);
314
315 currThreads.push_back(thread_sp);
316 }
317 }
318 }
319
320 m_threads.swap(currThreads);
321 m_current_thread.reset();
322
323 // Free the vm memory given to us by ::task_threads()
324 vm_size_t thread_list_size = (vm_size_t) (thread_list_count * sizeof (thread_t));
325 ::vm_deallocate (::mach_task_self(),
326 (vm_address_t)thread_list,
327 thread_list_size);
328 }
329 }
330 return m_threads.size();
331 }
332
333
334 void
CurrentThread(MachThreadSP & thread_sp)335 MachThreadList::CurrentThread (MachThreadSP& thread_sp)
336 {
337 // locker will keep a mutex locked until it goes out of scope
338 PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex);
339 if (m_current_thread.get() == NULL)
340 {
341 // Figure out which thread is going to be our current thread.
342 // This is currently done by finding the first thread in the list
343 // that has a valid exception.
344 const uint32_t num_threads = m_threads.size();
345 for (uint32_t idx = 0; idx < num_threads; ++idx)
346 {
347 if (m_threads[idx]->GetStopException().IsValid())
348 {
349 m_current_thread = m_threads[idx];
350 break;
351 }
352 }
353 }
354 thread_sp = m_current_thread;
355 }
356
357 void
Dump() const358 MachThreadList::Dump() const
359 {
360 PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex);
361 const uint32_t num_threads = m_threads.size();
362 for (uint32_t idx = 0; idx < num_threads; ++idx)
363 {
364 m_threads[idx]->Dump(idx);
365 }
366 }
367
368
369 void
ProcessWillResume(MachProcess * process,const DNBThreadResumeActions & thread_actions)370 MachThreadList::ProcessWillResume(MachProcess *process, const DNBThreadResumeActions &thread_actions)
371 {
372 PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex);
373
374 // Update our thread list, because sometimes libdispatch or the kernel
375 // will spawn threads while a task is suspended.
376 MachThreadList::collection new_threads;
377
378 // First figure out if we were planning on running only one thread, and if so force that thread to resume.
379 bool run_one_thread;
380 nub_thread_t solo_thread = INVALID_NUB_THREAD;
381 if (thread_actions.GetSize() > 0
382 && thread_actions.NumActionsWithState(eStateStepping) + thread_actions.NumActionsWithState (eStateRunning) == 1)
383 {
384 run_one_thread = true;
385 const DNBThreadResumeAction *action_ptr = thread_actions.GetFirst();
386 size_t num_actions = thread_actions.GetSize();
387 for (size_t i = 0; i < num_actions; i++, action_ptr++)
388 {
389 if (action_ptr->state == eStateStepping || action_ptr->state == eStateRunning)
390 {
391 solo_thread = action_ptr->tid;
392 break;
393 }
394 }
395 }
396 else
397 run_one_thread = false;
398
399 UpdateThreadList(process, true, &new_threads);
400
401 DNBThreadResumeAction resume_new_threads = { -1U, eStateRunning, 0, INVALID_NUB_ADDRESS };
402 // If we are planning to run only one thread, any new threads should be suspended.
403 if (run_one_thread)
404 resume_new_threads.state = eStateSuspended;
405
406 const uint32_t num_new_threads = new_threads.size();
407 const uint32_t num_threads = m_threads.size();
408 for (uint32_t idx = 0; idx < num_threads; ++idx)
409 {
410 MachThread *thread = m_threads[idx].get();
411 bool handled = false;
412 for (uint32_t new_idx = 0; new_idx < num_new_threads; ++new_idx)
413 {
414 if (thread == new_threads[new_idx].get())
415 {
416 thread->ThreadWillResume(&resume_new_threads);
417 handled = true;
418 break;
419 }
420 }
421
422 if (!handled)
423 {
424 const DNBThreadResumeAction *thread_action = thread_actions.GetActionForThread (thread->ThreadID(), true);
425 // There must always be a thread action for every thread.
426 assert (thread_action);
427 bool others_stopped = false;
428 if (solo_thread == thread->ThreadID())
429 others_stopped = true;
430 thread->ThreadWillResume (thread_action, others_stopped);
431 }
432 }
433
434 if (new_threads.size())
435 {
436 for (uint32_t idx = 0; idx < num_new_threads; ++idx)
437 {
438 DNBLogThreadedIf (LOG_THREAD, "MachThreadList::ProcessWillResume (pid = %4.4x) stop-id=%u, resuming newly discovered thread: 0x%8.8" PRIx64 ", thread-is-user-ready=%i)",
439 process->ProcessID(),
440 process->StopCount(),
441 new_threads[idx]->ThreadID(),
442 new_threads[idx]->IsUserReady());
443 }
444 }
445 }
446
447 uint32_t
ProcessDidStop(MachProcess * process)448 MachThreadList::ProcessDidStop(MachProcess *process)
449 {
450 PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex);
451 // Update our thread list
452 const uint32_t num_threads = UpdateThreadList(process, true);
453 for (uint32_t idx = 0; idx < num_threads; ++idx)
454 {
455 m_threads[idx]->ThreadDidStop();
456 }
457 return num_threads;
458 }
459
460 //----------------------------------------------------------------------
461 // Check each thread in our thread list to see if we should notify our
462 // client of the current halt in execution.
463 //
464 // Breakpoints can have callback functions associated with them than
465 // can return true to stop, or false to continue executing the inferior.
466 //
467 // RETURNS
468 // true if we should stop and notify our clients
469 // false if we should resume our child process and skip notification
470 //----------------------------------------------------------------------
471 bool
ShouldStop(bool & step_more)472 MachThreadList::ShouldStop(bool &step_more)
473 {
474 PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex);
475 uint32_t should_stop = false;
476 const uint32_t num_threads = m_threads.size();
477 for (uint32_t idx = 0; !should_stop && idx < num_threads; ++idx)
478 {
479 should_stop = m_threads[idx]->ShouldStop(step_more);
480 }
481 return should_stop;
482 }
483
484
485 void
NotifyBreakpointChanged(const DNBBreakpoint * bp)486 MachThreadList::NotifyBreakpointChanged (const DNBBreakpoint *bp)
487 {
488 PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex);
489 const uint32_t num_threads = m_threads.size();
490 for (uint32_t idx = 0; idx < num_threads; ++idx)
491 {
492 m_threads[idx]->NotifyBreakpointChanged(bp);
493 }
494 }
495
496
497 uint32_t
EnableHardwareBreakpoint(const DNBBreakpoint * bp) const498 MachThreadList::EnableHardwareBreakpoint (const DNBBreakpoint* bp) const
499 {
500 if (bp != NULL)
501 {
502 const uint32_t num_threads = m_threads.size();
503 for (uint32_t idx = 0; idx < num_threads; ++idx)
504 m_threads[idx]->EnableHardwareBreakpoint(bp);
505 }
506 return INVALID_NUB_HW_INDEX;
507 }
508
509 bool
DisableHardwareBreakpoint(const DNBBreakpoint * bp) const510 MachThreadList::DisableHardwareBreakpoint (const DNBBreakpoint* bp) const
511 {
512 if (bp != NULL)
513 {
514 const uint32_t num_threads = m_threads.size();
515 for (uint32_t idx = 0; idx < num_threads; ++idx)
516 m_threads[idx]->DisableHardwareBreakpoint(bp);
517 }
518 return false;
519 }
520
521 // DNBWatchpointSet() -> MachProcess::CreateWatchpoint() -> MachProcess::EnableWatchpoint()
522 // -> MachThreadList::EnableHardwareWatchpoint().
523 uint32_t
EnableHardwareWatchpoint(const DNBBreakpoint * wp) const524 MachThreadList::EnableHardwareWatchpoint (const DNBBreakpoint* wp) const
525 {
526 uint32_t hw_index = INVALID_NUB_HW_INDEX;
527 if (wp != NULL)
528 {
529 PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex);
530 const uint32_t num_threads = m_threads.size();
531 // On Mac OS X we have to prime the control registers for new threads. We do this
532 // using the control register data for the first thread, for lack of a better way of choosing.
533 bool also_set_on_task = true;
534 for (uint32_t idx = 0; idx < num_threads; ++idx)
535 {
536 if ((hw_index = m_threads[idx]->EnableHardwareWatchpoint(wp, also_set_on_task)) == INVALID_NUB_HW_INDEX)
537 {
538 // We know that idx failed for some reason. Let's rollback the transaction for [0, idx).
539 for (uint32_t i = 0; i < idx; ++i)
540 m_threads[i]->RollbackTransForHWP();
541 return INVALID_NUB_HW_INDEX;
542 }
543 also_set_on_task = false;
544 }
545 // Notify each thread to commit the pending transaction.
546 for (uint32_t idx = 0; idx < num_threads; ++idx)
547 m_threads[idx]->FinishTransForHWP();
548
549 }
550 return hw_index;
551 }
552
553 bool
DisableHardwareWatchpoint(const DNBBreakpoint * wp) const554 MachThreadList::DisableHardwareWatchpoint (const DNBBreakpoint* wp) const
555 {
556 if (wp != NULL)
557 {
558 PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex);
559 const uint32_t num_threads = m_threads.size();
560
561 // On Mac OS X we have to prime the control registers for new threads. We do this
562 // using the control register data for the first thread, for lack of a better way of choosing.
563 bool also_set_on_task = true;
564 for (uint32_t idx = 0; idx < num_threads; ++idx)
565 {
566 if (!m_threads[idx]->DisableHardwareWatchpoint(wp, also_set_on_task))
567 {
568 // We know that idx failed for some reason. Let's rollback the transaction for [0, idx).
569 for (uint32_t i = 0; i < idx; ++i)
570 m_threads[i]->RollbackTransForHWP();
571 return false;
572 }
573 also_set_on_task = false;
574 }
575 // Notify each thread to commit the pending transaction.
576 for (uint32_t idx = 0; idx < num_threads; ++idx)
577 m_threads[idx]->FinishTransForHWP();
578
579 return true;
580 }
581 return false;
582 }
583
584 uint32_t
NumSupportedHardwareWatchpoints() const585 MachThreadList::NumSupportedHardwareWatchpoints () const
586 {
587 PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex);
588 const uint32_t num_threads = m_threads.size();
589 // Use an arbitrary thread to retrieve the number of supported hardware watchpoints.
590 if (num_threads)
591 return m_threads[0]->NumSupportedHardwareWatchpoints();
592 return 0;
593 }
594
595 uint32_t
GetThreadIndexForThreadStoppedWithSignal(const int signo) const596 MachThreadList::GetThreadIndexForThreadStoppedWithSignal (const int signo) const
597 {
598 PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex);
599 uint32_t should_stop = false;
600 const uint32_t num_threads = m_threads.size();
601 for (uint32_t idx = 0; !should_stop && idx < num_threads; ++idx)
602 {
603 if (m_threads[idx]->GetStopException().SoftSignal () == signo)
604 return idx;
605 }
606 return UINT32_MAX;
607 }
608
609