1//===-- MachThreadList.cpp --------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Created by Greg Clayton on 6/19/07.
10//
11//===----------------------------------------------------------------------===//
12
13#include "MachThreadList.h"
14
15#include "DNB.h"
16#include "DNBLog.h"
17#include "DNBThreadResumeActions.h"
18#include "MachProcess.h"
19
20#include <cinttypes>
21#include <sys/sysctl.h>
22
23#include <memory>
24
25MachThreadList::MachThreadList()
26 : m_threads(), m_threads_mutex(), m_is_64_bit(false) {}
27
28MachThreadList::~MachThreadList() = default;
29
30nub_state_t MachThreadList::GetState(nub_thread_t tid) {
31 MachThreadSP thread_sp(GetThreadByID(tid));
32 if (thread_sp)
33 return thread_sp->GetState();
34 return eStateInvalid;
35}
36
37const char *MachThreadList::GetName(nub_thread_t tid) {
38 MachThreadSP thread_sp(GetThreadByID(tid));
39 if (thread_sp)
40 return thread_sp->GetName();
41 return NULL;
42}
43
44ThreadInfo::QoS MachThreadList::GetRequestedQoS(nub_thread_t tid,
45 nub_addr_t tsd,
46 uint64_t dti_qos_class_index) {
47 MachThreadSP thread_sp(GetThreadByID(tid));
48 if (thread_sp)
49 return thread_sp->GetRequestedQoS(tsd, dti_qos_class_index);
50 return ThreadInfo::QoS();
51}
52
53nub_addr_t MachThreadList::GetPThreadT(nub_thread_t tid) {
54 MachThreadSP thread_sp(GetThreadByID(tid));
55 if (thread_sp)
56 return thread_sp->GetPThreadT();
57 return INVALID_NUB_ADDRESS;
58}
59
60nub_addr_t MachThreadList::GetDispatchQueueT(nub_thread_t tid) {
61 MachThreadSP thread_sp(GetThreadByID(tid));
62 if (thread_sp)
63 return thread_sp->GetDispatchQueueT();
64 return INVALID_NUB_ADDRESS;
65}
66
67nub_addr_t MachThreadList::GetTSDAddressForThread(
68 nub_thread_t tid, uint64_t plo_pthread_tsd_base_address_offset,
69 uint64_t plo_pthread_tsd_base_offset, uint64_t plo_pthread_tsd_entry_size) {
70 MachThreadSP thread_sp(GetThreadByID(tid));
71 if (thread_sp)
72 return thread_sp->GetTSDAddressForThread(
73 plo_pthread_tsd_base_address_offset, plo_pthread_tsd_base_offset,
74 plo_pthread_tsd_entry_size);
75 return INVALID_NUB_ADDRESS;
76}
77
78nub_thread_t MachThreadList::SetCurrentThread(nub_thread_t tid) {
79 MachThreadSP thread_sp(GetThreadByID(tid));
80 if (thread_sp) {
81 m_current_thread = thread_sp;
82 return tid;
83 }
84 return INVALID_NUB_THREAD;
85}
86
87bool MachThreadList::GetThreadStoppedReason(
88 nub_thread_t tid, struct DNBThreadStopInfo *stop_info) const {
89 MachThreadSP thread_sp(GetThreadByID(tid));
90 if (thread_sp)
91 return thread_sp->GetStopException().GetStopInfo(stop_info);
92 return false;
93}
94
95bool MachThreadList::GetIdentifierInfo(
96 nub_thread_t tid, thread_identifier_info_data_t *ident_info) {
97 thread_t mach_port_number = GetMachPortNumberByThreadID(tid);
98
99 mach_msg_type_number_t count = THREAD_IDENTIFIER_INFO_COUNT;
100 return ::thread_info(mach_port_number, THREAD_IDENTIFIER_INFO,
101 (thread_info_t)ident_info, &count) == KERN_SUCCESS;
102}
103
104void MachThreadList::DumpThreadStoppedReason(nub_thread_t tid) const {
105 MachThreadSP thread_sp(GetThreadByID(tid));
106 if (thread_sp)
107 thread_sp->GetStopException().DumpStopReason();
108}
109
110const char *MachThreadList::GetThreadInfo(nub_thread_t tid) const {
111 MachThreadSP thread_sp(GetThreadByID(tid));
112 if (thread_sp)
113 return thread_sp->GetBasicInfoAsString();
114 return NULL;
115}
116
117MachThreadSP MachThreadList::GetThreadByID(nub_thread_t tid) const {
118 std::lock_guard<std::recursive_mutex> guard(m_threads_mutex);
119 MachThreadSP thread_sp;
120 const size_t num_threads = m_threads.size();
121 for (size_t idx = 0; idx < num_threads; ++idx) {
122 if (m_threads[idx]->ThreadID() == tid) {
123 thread_sp = m_threads[idx];
124 break;
125 }
126 }
127 return thread_sp;
128}
129
130MachThreadSP
131MachThreadList::GetThreadByMachPortNumber(thread_t mach_port_number) const {
132 std::lock_guard<std::recursive_mutex> guard(m_threads_mutex);
133 MachThreadSP thread_sp;
134 const size_t num_threads = m_threads.size();
135 for (size_t idx = 0; idx < num_threads; ++idx) {
136 if (m_threads[idx]->MachPortNumber() == mach_port_number) {
137 thread_sp = m_threads[idx];
138 break;
139 }
140 }
141 return thread_sp;
142}
143
144nub_thread_t
145MachThreadList::GetThreadIDByMachPortNumber(thread_t mach_port_number) const {
146 std::lock_guard<std::recursive_mutex> guard(m_threads_mutex);
147 MachThreadSP thread_sp;
148 const size_t num_threads = m_threads.size();
149 for (size_t idx = 0; idx < num_threads; ++idx) {
150 if (m_threads[idx]->MachPortNumber() == mach_port_number) {
151 return m_threads[idx]->ThreadID();
152 }
153 }
154 return INVALID_NUB_THREAD;
155}
156
157thread_t MachThreadList::GetMachPortNumberByThreadID(
158 nub_thread_t globally_unique_id) const {
159 std::lock_guard<std::recursive_mutex> guard(m_threads_mutex);
160 MachThreadSP thread_sp;
161 const size_t num_threads = m_threads.size();
162 for (size_t idx = 0; idx < num_threads; ++idx) {
163 if (m_threads[idx]->ThreadID() == globally_unique_id) {
164 return m_threads[idx]->MachPortNumber();
165 }
166 }
167 return 0;
168}
169
170bool MachThreadList::GetRegisterValue(nub_thread_t tid, uint32_t set,
171 uint32_t reg,
172 DNBRegisterValue *reg_value) const {
173 MachThreadSP thread_sp(GetThreadByID(tid));
174 if (thread_sp)
175 return thread_sp->GetRegisterValue(set, reg, reg_value);
176
177 return false;
178}
179
180bool MachThreadList::SetRegisterValue(nub_thread_t tid, uint32_t set,
181 uint32_t reg,
182 const DNBRegisterValue *reg_value) const {
183 MachThreadSP thread_sp(GetThreadByID(tid));
184 if (thread_sp)
185 return thread_sp->SetRegisterValue(set, reg, reg_value);
186
187 return false;
188}
189
190nub_size_t MachThreadList::GetRegisterContext(nub_thread_t tid, void *buf,
191 size_t buf_len) {
192 MachThreadSP thread_sp(GetThreadByID(tid));
193 if (thread_sp)
194 return thread_sp->GetRegisterContext(buf, buf_len);
195 return 0;
196}
197
198nub_size_t MachThreadList::SetRegisterContext(nub_thread_t tid, const void *buf,
199 size_t buf_len) {
200 MachThreadSP thread_sp(GetThreadByID(tid));
201 if (thread_sp)
202 return thread_sp->SetRegisterContext(buf, buf_len);
203 return 0;
204}
205
206uint32_t MachThreadList::SaveRegisterState(nub_thread_t tid) {
207 MachThreadSP thread_sp(GetThreadByID(tid));
208 if (thread_sp)
209 return thread_sp->SaveRegisterState();
210 return 0;
211}
212
213bool MachThreadList::RestoreRegisterState(nub_thread_t tid, uint32_t save_id) {
214 MachThreadSP thread_sp(GetThreadByID(tid));
215 if (thread_sp)
216 return thread_sp->RestoreRegisterState(save_id);
217 return false;
218}
219
220nub_size_t MachThreadList::NumThreads() const {
221 std::lock_guard<std::recursive_mutex> guard(m_threads_mutex);
222 return m_threads.size();
223}
224
225nub_thread_t MachThreadList::ThreadIDAtIndex(nub_size_t idx) const {
226 std::lock_guard<std::recursive_mutex> guard(m_threads_mutex);
227 if (idx < m_threads.size())
228 return m_threads[idx]->ThreadID();
229 return INVALID_NUB_THREAD;
230}
231
232nub_thread_t MachThreadList::CurrentThreadID() {
233 MachThreadSP thread_sp;
234 CurrentThread(threadSP&: thread_sp);
235 if (thread_sp.get())
236 return thread_sp->ThreadID();
237 return INVALID_NUB_THREAD;
238}
239
240bool MachThreadList::NotifyException(MachException::Data &exc) {
241 MachThreadSP thread_sp(GetThreadByMachPortNumber(exc.thread_port));
242 if (thread_sp) {
243 thread_sp->NotifyException(exc);
244 return true;
245 }
246 return false;
247}
248
249void MachThreadList::Clear() {
250 std::lock_guard<std::recursive_mutex> guard(m_threads_mutex);
251 m_threads.clear();
252}
253
254uint32_t
255MachThreadList::UpdateThreadList(MachProcess *process, bool update,
256 MachThreadList::collection *new_threads) {
257 // locker will keep a mutex locked until it goes out of scope
258 DNBLogThreadedIf(LOG_THREAD, "MachThreadList::UpdateThreadList (pid = %4.4x, "
259 "update = %u) process stop count = %u",
260 process->ProcessID(), update, process->StopCount());
261 std::lock_guard<std::recursive_mutex> guard(m_threads_mutex);
262
263 if (process->StopCount() == 0) {
264 int mib[4] = {CTL_KERN, KERN_PROC, KERN_PROC_PID, process->ProcessID()};
265 struct kinfo_proc processInfo;
266 size_t bufsize = sizeof(processInfo);
267 if (sysctl(mib, (unsigned)(sizeof(mib) / sizeof(int)), &processInfo,
268 &bufsize, NULL, 0) == 0 &&
269 bufsize > 0) {
270 if (processInfo.kp_proc.p_flag & P_LP64)
271 m_is_64_bit = true;
272 }
273#if defined(__i386__) || defined(__x86_64__)
274 if (m_is_64_bit)
275 DNBArchProtocol::SetArchitecture(CPU_TYPE_X86_64);
276 else
277 DNBArchProtocol::SetArchitecture(CPU_TYPE_I386);
278#elif defined(__arm__) || defined(__arm64__) || defined(__aarch64__)
279 if (m_is_64_bit)
280 DNBArchProtocol::SetArchitecture(CPU_TYPE_ARM64);
281 else {
282 if (process->GetCPUType() == CPU_TYPE_ARM64_32)
283 DNBArchProtocol::SetArchitecture(CPU_TYPE_ARM64_32);
284 else
285 DNBArchProtocol::SetArchitecture(CPU_TYPE_ARM);
286 }
287#endif
288 }
289
290 if (m_threads.empty() || update) {
291 thread_array_t thread_list = NULL;
292 mach_msg_type_number_t thread_list_count = 0;
293 task_t task = process->Task().TaskPort();
294 DNBError err(::task_threads(task, &thread_list, &thread_list_count),
295 DNBError::MachKernel);
296
297 if (DNBLogCheckLogBit(LOG_THREAD) || err.Fail())
298 err.LogThreaded("::task_threads ( task = 0x%4.4x, thread_list => %p, "
299 "thread_list_count => %u )",
300 task, thread_list, thread_list_count);
301
302 if (err.Status() == KERN_SUCCESS && thread_list_count > 0) {
303 MachThreadList::collection currThreads;
304 size_t idx;
305 // Iterator through the current thread list and see which threads
306 // we already have in our list (keep them), which ones we don't
307 // (add them), and which ones are not around anymore (remove them).
308 for (idx = 0; idx < thread_list_count; ++idx) {
309 const thread_t mach_port_num = thread_list[idx];
310
311 uint64_t unique_thread_id =
312 MachThread::GetGloballyUniqueThreadIDForMachPortID(mach_port_num);
313 MachThreadSP thread_sp(GetThreadByID(unique_thread_id));
314 if (thread_sp) {
315 // Keep the existing thread class
316 currThreads.push_back(x: thread_sp);
317 } else {
318 // We don't have this thread, lets add it.
319 thread_sp = std::make_shared<MachThread>(
320 process, m_is_64_bit, unique_thread_id, mach_port_num);
321
322 // Add the new thread regardless of its is user ready state...
323 // Make sure the thread is ready to be displayed and shown to users
324 // before we add this thread to our list...
325 if (thread_sp->IsUserReady()) {
326 if (new_threads)
327 new_threads->push_back(x: thread_sp);
328
329 currThreads.push_back(x: thread_sp);
330 }
331 }
332 }
333
334 m_threads.swap(x&: currThreads);
335 m_current_thread.reset();
336
337 // Free the vm memory given to us by ::task_threads()
338 vm_size_t thread_list_size =
339 (vm_size_t)(thread_list_count * sizeof(thread_t));
340 ::vm_deallocate(::mach_task_self(), (vm_address_t)thread_list,
341 thread_list_size);
342 }
343 }
344 return static_cast<uint32_t>(m_threads.size());
345}
346
347void MachThreadList::CurrentThread(MachThreadSP &thread_sp) {
348 std::lock_guard<std::recursive_mutex> guard(m_threads_mutex);
349 if (m_current_thread.get() == NULL) {
350 // Figure out which thread is going to be our current thread.
351 // This is currently done by finding the first thread in the list
352 // that has a valid exception.
353 const size_t num_threads = m_threads.size();
354 for (uint32_t idx = 0; idx < num_threads; ++idx) {
355 if (m_threads[idx]->GetStopException().IsValid()) {
356 m_current_thread = m_threads[idx];
357 break;
358 }
359 }
360 }
361 thread_sp = m_current_thread;
362}
363
364void MachThreadList::Dump() const {
365 std::lock_guard<std::recursive_mutex> guard(m_threads_mutex);
366 const size_t num_threads = m_threads.size();
367 for (uint32_t idx = 0; idx < num_threads; ++idx) {
368 m_threads[idx]->Dump(idx);
369 }
370}
371
372void MachThreadList::ProcessWillResume(
373 MachProcess *process, const DNBThreadResumeActions &thread_actions) {
374 std::lock_guard<std::recursive_mutex> guard(m_threads_mutex);
375
376 // Update our thread list, because sometimes libdispatch or the kernel
377 // will spawn threads while a task is suspended.
378 MachThreadList::collection new_threads;
379
380 // First figure out if we were planning on running only one thread, and if so
381 // force that thread to resume.
382 bool run_one_thread;
383 nub_thread_t solo_thread = INVALID_NUB_THREAD;
384 if (thread_actions.GetSize() > 0 &&
385 thread_actions.NumActionsWithState(eStateStepping) +
386 thread_actions.NumActionsWithState(eStateRunning) ==
387 1) {
388 run_one_thread = true;
389 const DNBThreadResumeAction *action_ptr = thread_actions.GetFirst();
390 size_t num_actions = thread_actions.GetSize();
391 for (size_t i = 0; i < num_actions; i++, action_ptr++) {
392 if (action_ptr->state == eStateStepping ||
393 action_ptr->state == eStateRunning) {
394 solo_thread = action_ptr->tid;
395 break;
396 }
397 }
398 } else
399 run_one_thread = false;
400
401 UpdateThreadList(process, update: true, new_threads: &new_threads);
402
403 DNBThreadResumeAction resume_new_threads = {-1U, eStateRunning, 0,
404 INVALID_NUB_ADDRESS};
405 // If we are planning to run only one thread, any new threads should be
406 // suspended.
407 if (run_one_thread)
408 resume_new_threads.state = eStateSuspended;
409
410 const size_t num_new_threads = new_threads.size();
411 const size_t num_threads = m_threads.size();
412 for (uint32_t idx = 0; idx < num_threads; ++idx) {
413 MachThread *thread = m_threads[idx].get();
414 bool handled = false;
415 for (uint32_t new_idx = 0; new_idx < num_new_threads; ++new_idx) {
416 if (thread == new_threads[new_idx].get()) {
417 thread->ThreadWillResume(&resume_new_threads);
418 handled = true;
419 break;
420 }
421 }
422
423 if (!handled) {
424 const DNBThreadResumeAction *thread_action =
425 thread_actions.GetActionForThread(thread->ThreadID(), true);
426 // There must always be a thread action for every thread.
427 assert(thread_action);
428 bool others_stopped = false;
429 if (solo_thread == thread->ThreadID())
430 others_stopped = true;
431 thread->ThreadWillResume(thread_action, others_stopped);
432 }
433 }
434
435 if (new_threads.size()) {
436 for (uint32_t idx = 0; idx < num_new_threads; ++idx) {
437 DNBLogThreadedIf(
438 LOG_THREAD, "MachThreadList::ProcessWillResume (pid = %4.4x) "
439 "stop-id=%u, resuming newly discovered thread: "
440 "0x%8.8" PRIx64 ", thread-is-user-ready=%i)",
441 process->ProcessID(), process->StopCount(),
442 new_threads[idx]->ThreadID(), new_threads[idx]->IsUserReady());
443 }
444 }
445}
446
447uint32_t MachThreadList::ProcessDidStop(MachProcess *process) {
448 std::lock_guard<std::recursive_mutex> guard(m_threads_mutex);
449 // Update our thread list
450 const uint32_t num_threads = UpdateThreadList(process, update: true);
451 for (uint32_t idx = 0; idx < num_threads; ++idx) {
452 m_threads[idx]->ThreadDidStop();
453 }
454 return num_threads;
455}
456
457// Check each thread in our thread list to see if we should notify our
458// client of the current halt in execution.
459//
460// Breakpoints can have callback functions associated with them than
461// can return true to stop, or false to continue executing the inferior.
462//
463// RETURNS
464// true if we should stop and notify our clients
465// false if we should resume our child process and skip notification
466bool MachThreadList::ShouldStop(bool &step_more) {
467 std::lock_guard<std::recursive_mutex> guard(m_threads_mutex);
468 uint32_t should_stop = false;
469 const size_t num_threads = m_threads.size();
470 for (uint32_t idx = 0; !should_stop && idx < num_threads; ++idx) {
471 should_stop = m_threads[idx]->ShouldStop(step_more);
472 }
473 return should_stop;
474}
475
476void MachThreadList::NotifyBreakpointChanged(const DNBBreakpoint *bp) {
477 std::lock_guard<std::recursive_mutex> guard(m_threads_mutex);
478 const size_t num_threads = m_threads.size();
479 for (uint32_t idx = 0; idx < num_threads; ++idx) {
480 m_threads[idx]->NotifyBreakpointChanged(bp);
481 }
482}
483
484uint32_t MachThreadList::DoHardwareBreakpointAction(
485 const DNBBreakpoint *bp, HardwareBreakpointAction action) const {
486 if (bp == NULL)
487 return INVALID_NUB_HW_INDEX;
488
489 uint32_t hw_index = INVALID_NUB_HW_INDEX;
490 std::lock_guard<std::recursive_mutex> guard(m_threads_mutex);
491 const size_t num_threads = m_threads.size();
492 // On Mac OS X we have to prime the control registers for new threads. We do
493 // this using the control register data for the first thread, for lack of a
494 // better way of choosing.
495 bool also_set_on_task = true;
496 for (uint32_t idx = 0; idx < num_threads; ++idx) {
497 switch (action) {
498 case HardwareBreakpointAction::EnableWatchpoint:
499 hw_index = m_threads[idx]->EnableHardwareWatchpoint(bp, also_set_on_task);
500 break;
501 case HardwareBreakpointAction::DisableWatchpoint:
502 hw_index =
503 m_threads[idx]->DisableHardwareWatchpoint(bp, also_set_on_task);
504 break;
505 case HardwareBreakpointAction::EnableBreakpoint:
506 hw_index = m_threads[idx]->EnableHardwareBreakpoint(bp, also_set_on_task);
507 break;
508 case HardwareBreakpointAction::DisableBreakpoint:
509 hw_index =
510 m_threads[idx]->DisableHardwareBreakpoint(bp, also_set_on_task);
511 break;
512 }
513 if (hw_index == INVALID_NUB_HW_INDEX) {
514 // We know that idx failed for some reason. Let's rollback the
515 // transaction for [0, idx).
516 for (uint32_t i = 0; i < idx; ++i)
517 m_threads[i]->RollbackTransForHWP();
518 return INVALID_NUB_HW_INDEX;
519 }
520 also_set_on_task = false;
521 }
522 // Notify each thread to commit the pending transaction.
523 for (uint32_t idx = 0; idx < num_threads; ++idx)
524 m_threads[idx]->FinishTransForHWP();
525 return hw_index;
526}
527
528// DNBWatchpointSet() -> MachProcess::CreateWatchpoint() ->
529// MachProcess::EnableWatchpoint()
530// -> MachThreadList::EnableHardwareWatchpoint().
531uint32_t
532MachThreadList::EnableHardwareWatchpoint(const DNBBreakpoint *wp) const {
533 return DoHardwareBreakpointAction(bp: wp,
534 action: HardwareBreakpointAction::EnableWatchpoint);
535}
536
537bool MachThreadList::DisableHardwareWatchpoint(const DNBBreakpoint *wp) const {
538 return DoHardwareBreakpointAction(
539 wp, HardwareBreakpointAction::DisableWatchpoint) !=
540 INVALID_NUB_HW_INDEX;
541}
542
543uint32_t
544MachThreadList::EnableHardwareBreakpoint(const DNBBreakpoint *bp) const {
545 return DoHardwareBreakpointAction(bp,
546 action: HardwareBreakpointAction::EnableBreakpoint);
547}
548
549bool MachThreadList::DisableHardwareBreakpoint(const DNBBreakpoint *bp) const {
550 return DoHardwareBreakpointAction(
551 bp, HardwareBreakpointAction::DisableBreakpoint) !=
552 INVALID_NUB_HW_INDEX;
553}
554
555uint32_t MachThreadList::NumSupportedHardwareWatchpoints() const {
556 std::lock_guard<std::recursive_mutex> guard(m_threads_mutex);
557 const size_t num_threads = m_threads.size();
558 // Use an arbitrary thread to retrieve the number of supported hardware
559 // watchpoints.
560 if (num_threads)
561 return m_threads[0]->NumSupportedHardwareWatchpoints();
562 return 0;
563}
564
565uint32_t MachThreadList::GetThreadIndexForThreadStoppedWithSignal(
566 const int signo) const {
567 std::lock_guard<std::recursive_mutex> guard(m_threads_mutex);
568 uint32_t should_stop = false;
569 const size_t num_threads = m_threads.size();
570 for (uint32_t idx = 0; !should_stop && idx < num_threads; ++idx) {
571 if (m_threads[idx]->GetStopException().SoftSignal() == signo)
572 return idx;
573 }
574 return UINT32_MAX;
575}
576

Provided by KDAB

Privacy Policy
Improve your Profiling and Debugging skills
Find out more

source code of lldb/tools/debugserver/source/MacOSX/MachThreadList.cpp