1 | //===-- ThreadList.cpp ----------------------------------------------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | |
9 | #include <cstdlib> |
10 | |
11 | #include <algorithm> |
12 | |
13 | #include "lldb/Target/Process.h" |
14 | #include "lldb/Target/RegisterContext.h" |
15 | #include "lldb/Target/Thread.h" |
16 | #include "lldb/Target/ThreadList.h" |
17 | #include "lldb/Target/ThreadPlan.h" |
18 | #include "lldb/Utility/LLDBAssert.h" |
19 | #include "lldb/Utility/LLDBLog.h" |
20 | #include "lldb/Utility/Log.h" |
21 | #include "lldb/Utility/State.h" |
22 | |
23 | using namespace lldb; |
24 | using namespace lldb_private; |
25 | |
26 | ThreadList::ThreadList(Process *process) |
27 | : ThreadCollection(), m_process(process), m_stop_id(0), |
28 | m_selected_tid(LLDB_INVALID_THREAD_ID) {} |
29 | |
30 | ThreadList::ThreadList(const ThreadList &rhs) |
31 | : ThreadCollection(), m_process(rhs.m_process), m_stop_id(rhs.m_stop_id), |
32 | m_selected_tid() { |
33 | // Use the assignment operator since it uses the mutex |
34 | *this = rhs; |
35 | } |
36 | |
37 | const ThreadList &ThreadList::operator=(const ThreadList &rhs) { |
38 | if (this != &rhs) { |
39 | // Lock both mutexes to make sure neither side changes anyone on us while |
40 | // the assignment occurs |
41 | std::lock(l1&: GetMutex(), l2&: rhs.GetMutex()); |
42 | std::lock_guard<std::recursive_mutex> guard(GetMutex(), std::adopt_lock); |
43 | std::lock_guard<std::recursive_mutex> rhs_guard(rhs.GetMutex(), |
44 | std::adopt_lock); |
45 | |
46 | m_process = rhs.m_process; |
47 | m_stop_id = rhs.m_stop_id; |
48 | m_threads = rhs.m_threads; |
49 | m_selected_tid = rhs.m_selected_tid; |
50 | } |
51 | return *this; |
52 | } |
53 | |
54 | ThreadList::~ThreadList() { |
55 | // Clear the thread list. Clear will take the mutex lock which will ensure |
56 | // that if anyone is using the list they won't get it removed while using it. |
57 | Clear(); |
58 | } |
59 | |
60 | lldb::ThreadSP ThreadList::GetExpressionExecutionThread() { |
61 | if (m_expression_tid_stack.empty()) |
62 | return GetSelectedThread(); |
63 | ThreadSP expr_thread_sp = FindThreadByID(tid: m_expression_tid_stack.back()); |
64 | if (expr_thread_sp) |
65 | return expr_thread_sp; |
66 | else |
67 | return GetSelectedThread(); |
68 | } |
69 | |
70 | void ThreadList::PushExpressionExecutionThread(lldb::tid_t tid) { |
71 | m_expression_tid_stack.push_back(x: tid); |
72 | } |
73 | |
74 | void ThreadList::PopExpressionExecutionThread(lldb::tid_t tid) { |
75 | assert(m_expression_tid_stack.back() == tid); |
76 | m_expression_tid_stack.pop_back(); |
77 | } |
78 | |
79 | uint32_t ThreadList::GetStopID() const { return m_stop_id; } |
80 | |
81 | void ThreadList::SetStopID(uint32_t stop_id) { m_stop_id = stop_id; } |
82 | |
83 | uint32_t ThreadList::GetSize(bool can_update) { |
84 | std::lock_guard<std::recursive_mutex> guard(GetMutex()); |
85 | |
86 | if (can_update) |
87 | m_process->UpdateThreadListIfNeeded(); |
88 | return m_threads.size(); |
89 | } |
90 | |
91 | ThreadSP ThreadList::GetThreadAtIndex(uint32_t idx, bool can_update) { |
92 | std::lock_guard<std::recursive_mutex> guard(GetMutex()); |
93 | |
94 | if (can_update) |
95 | m_process->UpdateThreadListIfNeeded(); |
96 | |
97 | ThreadSP thread_sp; |
98 | if (idx < m_threads.size()) |
99 | thread_sp = m_threads[idx]; |
100 | return thread_sp; |
101 | } |
102 | |
103 | ThreadSP ThreadList::FindThreadByID(lldb::tid_t tid, bool can_update) { |
104 | std::lock_guard<std::recursive_mutex> guard(GetMutex()); |
105 | |
106 | if (can_update) |
107 | m_process->UpdateThreadListIfNeeded(); |
108 | |
109 | ThreadSP thread_sp; |
110 | uint32_t idx = 0; |
111 | const uint32_t num_threads = m_threads.size(); |
112 | for (idx = 0; idx < num_threads; ++idx) { |
113 | if (m_threads[idx]->GetID() == tid) { |
114 | thread_sp = m_threads[idx]; |
115 | break; |
116 | } |
117 | } |
118 | return thread_sp; |
119 | } |
120 | |
121 | ThreadSP ThreadList::FindThreadByProtocolID(lldb::tid_t tid, bool can_update) { |
122 | std::lock_guard<std::recursive_mutex> guard(GetMutex()); |
123 | |
124 | if (can_update) |
125 | m_process->UpdateThreadListIfNeeded(); |
126 | |
127 | ThreadSP thread_sp; |
128 | uint32_t idx = 0; |
129 | const uint32_t num_threads = m_threads.size(); |
130 | for (idx = 0; idx < num_threads; ++idx) { |
131 | if (m_threads[idx]->GetProtocolID() == tid) { |
132 | thread_sp = m_threads[idx]; |
133 | break; |
134 | } |
135 | } |
136 | return thread_sp; |
137 | } |
138 | |
139 | ThreadSP ThreadList::RemoveThreadByID(lldb::tid_t tid, bool can_update) { |
140 | std::lock_guard<std::recursive_mutex> guard(GetMutex()); |
141 | |
142 | if (can_update) |
143 | m_process->UpdateThreadListIfNeeded(); |
144 | |
145 | ThreadSP thread_sp; |
146 | uint32_t idx = 0; |
147 | const uint32_t num_threads = m_threads.size(); |
148 | for (idx = 0; idx < num_threads; ++idx) { |
149 | if (m_threads[idx]->GetID() == tid) { |
150 | thread_sp = m_threads[idx]; |
151 | m_threads.erase(position: m_threads.begin() + idx); |
152 | break; |
153 | } |
154 | } |
155 | return thread_sp; |
156 | } |
157 | |
158 | ThreadSP ThreadList::RemoveThreadByProtocolID(lldb::tid_t tid, |
159 | bool can_update) { |
160 | std::lock_guard<std::recursive_mutex> guard(GetMutex()); |
161 | |
162 | if (can_update) |
163 | m_process->UpdateThreadListIfNeeded(); |
164 | |
165 | ThreadSP thread_sp; |
166 | uint32_t idx = 0; |
167 | const uint32_t num_threads = m_threads.size(); |
168 | for (idx = 0; idx < num_threads; ++idx) { |
169 | if (m_threads[idx]->GetProtocolID() == tid) { |
170 | thread_sp = m_threads[idx]; |
171 | m_threads.erase(position: m_threads.begin() + idx); |
172 | break; |
173 | } |
174 | } |
175 | return thread_sp; |
176 | } |
177 | |
178 | ThreadSP ThreadList::GetThreadSPForThreadPtr(Thread *thread_ptr) { |
179 | ThreadSP thread_sp; |
180 | if (thread_ptr) { |
181 | std::lock_guard<std::recursive_mutex> guard(GetMutex()); |
182 | |
183 | uint32_t idx = 0; |
184 | const uint32_t num_threads = m_threads.size(); |
185 | for (idx = 0; idx < num_threads; ++idx) { |
186 | if (m_threads[idx].get() == thread_ptr) { |
187 | thread_sp = m_threads[idx]; |
188 | break; |
189 | } |
190 | } |
191 | } |
192 | return thread_sp; |
193 | } |
194 | |
195 | ThreadSP ThreadList::GetBackingThread(const ThreadSP &real_thread) { |
196 | std::lock_guard<std::recursive_mutex> guard(GetMutex()); |
197 | |
198 | ThreadSP thread_sp; |
199 | const uint32_t num_threads = m_threads.size(); |
200 | for (uint32_t idx = 0; idx < num_threads; ++idx) { |
201 | if (m_threads[idx]->GetBackingThread() == real_thread) { |
202 | thread_sp = m_threads[idx]; |
203 | break; |
204 | } |
205 | } |
206 | return thread_sp; |
207 | } |
208 | |
209 | ThreadSP ThreadList::FindThreadByIndexID(uint32_t index_id, bool can_update) { |
210 | std::lock_guard<std::recursive_mutex> guard(GetMutex()); |
211 | |
212 | if (can_update) |
213 | m_process->UpdateThreadListIfNeeded(); |
214 | |
215 | ThreadSP thread_sp; |
216 | const uint32_t num_threads = m_threads.size(); |
217 | for (uint32_t idx = 0; idx < num_threads; ++idx) { |
218 | if (m_threads[idx]->GetIndexID() == index_id) { |
219 | thread_sp = m_threads[idx]; |
220 | break; |
221 | } |
222 | } |
223 | return thread_sp; |
224 | } |
225 | |
226 | bool ThreadList::ShouldStop(Event *event_ptr) { |
227 | // Running events should never stop, obviously... |
228 | |
229 | Log *log = GetLog(mask: LLDBLog::Step); |
230 | |
231 | // The ShouldStop method of the threads can do a whole lot of work, figuring |
232 | // out whether the thread plan conditions are met. So we don't want to keep |
233 | // the ThreadList locked the whole time we are doing this. |
234 | // FIXME: It is possible that running code could cause new threads |
235 | // to be created. If that happens, we will miss asking them whether they |
236 | // should stop. This is not a big deal since we haven't had a chance to hang |
237 | // any interesting operations on those threads yet. |
238 | |
239 | collection threads_copy; |
240 | { |
241 | // Scope for locker |
242 | std::lock_guard<std::recursive_mutex> guard(GetMutex()); |
243 | |
244 | m_process->UpdateThreadListIfNeeded(); |
245 | for (lldb::ThreadSP thread_sp : m_threads) { |
246 | // This is an optimization... If we didn't let a thread run in between |
247 | // the previous stop and this one, we shouldn't have to consult it for |
248 | // ShouldStop. So just leave it off the list we are going to inspect. |
249 | // If the thread didn't run but had work to do before declaring a public |
250 | // stop, then also include it. |
251 | // On Linux, if a thread-specific conditional breakpoint was hit, it won't |
252 | // necessarily be the thread that hit the breakpoint itself that |
253 | // evaluates the conditional expression, so the thread that hit the |
254 | // breakpoint could still be asked to stop, even though it hasn't been |
255 | // allowed to run since the previous stop. |
256 | if (thread_sp->GetTemporaryResumeState() != eStateSuspended || |
257 | thread_sp->IsStillAtLastBreakpointHit() |
258 | || thread_sp->ShouldRunBeforePublicStop()) |
259 | threads_copy.push_back(x: thread_sp); |
260 | } |
261 | |
262 | // It is possible the threads we were allowing to run all exited and then |
263 | // maybe the user interrupted or something, then fall back on looking at |
264 | // all threads: |
265 | |
266 | if (threads_copy.size() == 0) |
267 | threads_copy = m_threads; |
268 | } |
269 | |
270 | collection::iterator pos, end = threads_copy.end(); |
271 | |
272 | if (log) { |
273 | log->PutCString(cstr: "" ); |
274 | LLDB_LOGF(log, |
275 | "ThreadList::%s: %" PRIu64 " threads, %" PRIu64 |
276 | " unsuspended threads" , |
277 | __FUNCTION__, (uint64_t)m_threads.size(), |
278 | (uint64_t)threads_copy.size()); |
279 | } |
280 | |
281 | bool did_anybody_stop_for_a_reason = false; |
282 | |
283 | // If the event is an Interrupt event, then we're going to stop no matter |
284 | // what. Otherwise, presume we won't stop. |
285 | bool should_stop = false; |
286 | if (Process::ProcessEventData::GetInterruptedFromEvent(event_ptr)) { |
287 | LLDB_LOGF( |
288 | log, "ThreadList::%s handling interrupt event, should stop set to true" , |
289 | __FUNCTION__); |
290 | |
291 | should_stop = true; |
292 | } |
293 | |
294 | // Now we run through all the threads and get their stop info's. We want to |
295 | // make sure to do this first before we start running the ShouldStop, because |
296 | // one thread's ShouldStop could destroy information (like deleting a thread |
297 | // specific breakpoint another thread had stopped at) which could lead us to |
298 | // compute the StopInfo incorrectly. We don't need to use it here, we just |
299 | // want to make sure it gets computed. |
300 | |
301 | for (pos = threads_copy.begin(); pos != end; ++pos) { |
302 | ThreadSP thread_sp(*pos); |
303 | thread_sp->GetStopInfo(); |
304 | } |
305 | |
306 | // If a thread needs to finish some job that can be done just on this thread |
307 | // before broadcastion the stop, it will signal that by returning true for |
308 | // ShouldRunBeforePublicStop. This variable gathers the results from that. |
309 | bool a_thread_needs_to_run = false; |
310 | for (pos = threads_copy.begin(); pos != end; ++pos) { |
311 | ThreadSP thread_sp(*pos); |
312 | |
313 | // We should never get a stop for which no thread had a stop reason, but |
314 | // sometimes we do see this - for instance when we first connect to a |
315 | // remote stub. In that case we should stop, since we can't figure out the |
316 | // right thing to do and stopping gives the user control over what to do in |
317 | // this instance. |
318 | // |
319 | // Note, this causes a problem when you have a thread specific breakpoint, |
320 | // and a bunch of threads hit the breakpoint, but not the thread which we |
321 | // are waiting for. All the threads that are not "supposed" to hit the |
322 | // breakpoint are marked as having no stop reason, which is right, they |
323 | // should not show a stop reason. But that triggers this code and causes |
324 | // us to stop seemingly for no reason. |
325 | // |
326 | // Since the only way we ever saw this error was on first attach, I'm only |
327 | // going to trigger set did_anybody_stop_for_a_reason to true unless this |
328 | // is the first stop. |
329 | // |
330 | // If this becomes a problem, we'll have to have another StopReason like |
331 | // "StopInfoHidden" which will look invalid everywhere but at this check. |
332 | |
333 | if (thread_sp->GetProcess()->GetStopID() > 1) |
334 | did_anybody_stop_for_a_reason = true; |
335 | else |
336 | did_anybody_stop_for_a_reason |= thread_sp->ThreadStoppedForAReason(); |
337 | |
338 | const bool thread_should_stop = thread_sp->ShouldStop(event_ptr); |
339 | |
340 | if (thread_should_stop) |
341 | should_stop |= true; |
342 | else { |
343 | bool this_thread_forces_run = thread_sp->ShouldRunBeforePublicStop(); |
344 | a_thread_needs_to_run |= this_thread_forces_run; |
345 | if (this_thread_forces_run) |
346 | LLDB_LOG(log, |
347 | "ThreadList::{0} thread: {1:x}, " |
348 | "says it needs to run before public stop." , |
349 | __FUNCTION__, thread_sp->GetID()); |
350 | } |
351 | } |
352 | |
353 | if (a_thread_needs_to_run) { |
354 | should_stop = false; |
355 | } else if (!should_stop && !did_anybody_stop_for_a_reason) { |
356 | should_stop = true; |
357 | LLDB_LOGF(log, |
358 | "ThreadList::%s we stopped but no threads had a stop reason, " |
359 | "overriding should_stop and stopping." , |
360 | __FUNCTION__); |
361 | } |
362 | |
363 | LLDB_LOGF(log, "ThreadList::%s overall should_stop = %i" , __FUNCTION__, |
364 | should_stop); |
365 | |
366 | if (should_stop) { |
367 | for (pos = threads_copy.begin(); pos != end; ++pos) { |
368 | ThreadSP thread_sp(*pos); |
369 | thread_sp->WillStop(); |
370 | } |
371 | } |
372 | |
373 | return should_stop; |
374 | } |
375 | |
376 | Vote ThreadList::ShouldReportStop(Event *event_ptr) { |
377 | std::lock_guard<std::recursive_mutex> guard(GetMutex()); |
378 | |
379 | Vote result = eVoteNoOpinion; |
380 | m_process->UpdateThreadListIfNeeded(); |
381 | collection::iterator pos, end = m_threads.end(); |
382 | |
383 | Log *log = GetLog(mask: LLDBLog::Step); |
384 | |
385 | LLDB_LOGF(log, "ThreadList::%s %" PRIu64 " threads" , __FUNCTION__, |
386 | (uint64_t)m_threads.size()); |
387 | |
388 | // Run through the threads and ask whether we should report this event. For |
389 | // stopping, a YES vote wins over everything. A NO vote wins over NO |
390 | // opinion. The exception is if a thread has work it needs to force before |
391 | // a public stop, which overrides everyone else's opinion: |
392 | for (pos = m_threads.begin(); pos != end; ++pos) { |
393 | ThreadSP thread_sp(*pos); |
394 | if (thread_sp->ShouldRunBeforePublicStop()) { |
395 | LLDB_LOG(log, "Thread {0:x} has private business to complete, overrode " |
396 | "the should report stop." , thread_sp->GetID()); |
397 | result = eVoteNo; |
398 | break; |
399 | } |
400 | |
401 | const Vote vote = thread_sp->ShouldReportStop(event_ptr); |
402 | switch (vote) { |
403 | case eVoteNoOpinion: |
404 | continue; |
405 | |
406 | case eVoteYes: |
407 | result = eVoteYes; |
408 | break; |
409 | |
410 | case eVoteNo: |
411 | if (result == eVoteNoOpinion) { |
412 | result = eVoteNo; |
413 | } else { |
414 | LLDB_LOG(log, |
415 | "Thread {0:x} voted {1}, but lost out because result was {2}" , |
416 | thread_sp->GetID(), vote, result); |
417 | } |
418 | break; |
419 | } |
420 | } |
421 | LLDB_LOG(log, "Returning {0}" , result); |
422 | return result; |
423 | } |
424 | |
425 | void ThreadList::SetShouldReportStop(Vote vote) { |
426 | std::lock_guard<std::recursive_mutex> guard(GetMutex()); |
427 | |
428 | m_process->UpdateThreadListIfNeeded(); |
429 | collection::iterator pos, end = m_threads.end(); |
430 | for (pos = m_threads.begin(); pos != end; ++pos) { |
431 | ThreadSP thread_sp(*pos); |
432 | thread_sp->SetShouldReportStop(vote); |
433 | } |
434 | } |
435 | |
436 | Vote ThreadList::ShouldReportRun(Event *event_ptr) { |
437 | |
438 | std::lock_guard<std::recursive_mutex> guard(GetMutex()); |
439 | |
440 | Vote result = eVoteNoOpinion; |
441 | m_process->UpdateThreadListIfNeeded(); |
442 | collection::iterator pos, end = m_threads.end(); |
443 | |
444 | // Run through the threads and ask whether we should report this event. The |
445 | // rule is NO vote wins over everything, a YES vote wins over no opinion. |
446 | |
447 | Log *log = GetLog(mask: LLDBLog::Step); |
448 | |
449 | for (pos = m_threads.begin(); pos != end; ++pos) { |
450 | if ((*pos)->GetResumeState() != eStateSuspended) { |
451 | switch ((*pos)->ShouldReportRun(event_ptr)) { |
452 | case eVoteNoOpinion: |
453 | continue; |
454 | case eVoteYes: |
455 | if (result == eVoteNoOpinion) |
456 | result = eVoteYes; |
457 | break; |
458 | case eVoteNo: |
459 | LLDB_LOGF(log, |
460 | "ThreadList::ShouldReportRun() thread %d (0x%4.4" PRIx64 |
461 | ") says don't report." , |
462 | (*pos)->GetIndexID(), (*pos)->GetID()); |
463 | result = eVoteNo; |
464 | break; |
465 | } |
466 | } |
467 | } |
468 | return result; |
469 | } |
470 | |
471 | void ThreadList::Clear() { |
472 | std::lock_guard<std::recursive_mutex> guard(GetMutex()); |
473 | m_stop_id = 0; |
474 | m_threads.clear(); |
475 | m_selected_tid = LLDB_INVALID_THREAD_ID; |
476 | } |
477 | |
478 | void ThreadList::Destroy() { |
479 | std::lock_guard<std::recursive_mutex> guard(GetMutex()); |
480 | const uint32_t num_threads = m_threads.size(); |
481 | for (uint32_t idx = 0; idx < num_threads; ++idx) { |
482 | m_threads[idx]->DestroyThread(); |
483 | } |
484 | } |
485 | |
486 | void ThreadList::RefreshStateAfterStop() { |
487 | std::lock_guard<std::recursive_mutex> guard(GetMutex()); |
488 | |
489 | m_process->UpdateThreadListIfNeeded(); |
490 | |
491 | Log *log = GetLog(mask: LLDBLog::Step); |
492 | if (log && log->GetVerbose()) |
493 | LLDB_LOGF(log, |
494 | "Turning off notification of new threads while single stepping " |
495 | "a thread." ); |
496 | |
497 | collection::iterator pos, end = m_threads.end(); |
498 | for (pos = m_threads.begin(); pos != end; ++pos) |
499 | (*pos)->RefreshStateAfterStop(); |
500 | } |
501 | |
502 | void ThreadList::DiscardThreadPlans() { |
503 | // You don't need to update the thread list here, because only threads that |
504 | // you currently know about have any thread plans. |
505 | std::lock_guard<std::recursive_mutex> guard(GetMutex()); |
506 | |
507 | collection::iterator pos, end = m_threads.end(); |
508 | for (pos = m_threads.begin(); pos != end; ++pos) |
509 | (*pos)->DiscardThreadPlans(force: true); |
510 | } |
511 | |
512 | bool ThreadList::WillResume() { |
513 | // Run through the threads and perform their momentary actions. But we only |
514 | // do this for threads that are running, user suspended threads stay where |
515 | // they are. |
516 | |
517 | std::lock_guard<std::recursive_mutex> guard(GetMutex()); |
518 | m_process->UpdateThreadListIfNeeded(); |
519 | |
520 | collection::iterator pos, end = m_threads.end(); |
521 | |
522 | // See if any thread wants to run stopping others. If it does, then we won't |
523 | // setup the other threads for resume, since they aren't going to get a |
524 | // chance to run. This is necessary because the SetupForResume might add |
525 | // "StopOthers" plans which would then get to be part of the who-gets-to-run |
526 | // negotiation, but they're coming in after the fact, and the threads that |
527 | // are already set up should take priority. |
528 | |
529 | bool wants_solo_run = false; |
530 | |
531 | for (pos = m_threads.begin(); pos != end; ++pos) { |
532 | lldbassert((*pos)->GetCurrentPlan() && |
533 | "thread should not have null thread plan" ); |
534 | if ((*pos)->GetResumeState() != eStateSuspended && |
535 | (*pos)->GetCurrentPlan()->StopOthers()) { |
536 | if ((*pos)->IsOperatingSystemPluginThread() && |
537 | !(*pos)->GetBackingThread()) |
538 | continue; |
539 | wants_solo_run = true; |
540 | break; |
541 | } |
542 | } |
543 | |
544 | if (wants_solo_run) { |
545 | Log *log = GetLog(mask: LLDBLog::Step); |
546 | if (log && log->GetVerbose()) |
547 | LLDB_LOGF(log, "Turning on notification of new threads while single " |
548 | "stepping a thread." ); |
549 | m_process->StartNoticingNewThreads(); |
550 | } else { |
551 | Log *log = GetLog(mask: LLDBLog::Step); |
552 | if (log && log->GetVerbose()) |
553 | LLDB_LOGF(log, "Turning off notification of new threads while single " |
554 | "stepping a thread." ); |
555 | m_process->StopNoticingNewThreads(); |
556 | } |
557 | |
558 | // Give all the threads that are likely to run a last chance to set up their |
559 | // state before we negotiate who is actually going to get a chance to run... |
560 | // Don't set to resume suspended threads, and if any thread wanted to stop |
561 | // others, only call setup on the threads that request StopOthers... |
562 | |
563 | for (pos = m_threads.begin(); pos != end; ++pos) { |
564 | if ((*pos)->GetResumeState() != eStateSuspended && |
565 | (!wants_solo_run || (*pos)->GetCurrentPlan()->StopOthers())) { |
566 | if ((*pos)->IsOperatingSystemPluginThread() && |
567 | !(*pos)->GetBackingThread()) |
568 | continue; |
569 | (*pos)->SetupForResume(); |
570 | } |
571 | } |
572 | |
573 | // Now go through the threads and see if any thread wants to run just itself. |
574 | // if so then pick one and run it. |
575 | |
576 | ThreadList run_me_only_list(m_process); |
577 | |
578 | run_me_only_list.SetStopID(m_process->GetStopID()); |
579 | |
580 | // One or more threads might want to "Stop Others". We want to handle all |
581 | // those requests first. And if there is a thread that wanted to "resume |
582 | // before a public stop", let it get the first crack: |
583 | // There are two special kinds of thread that have priority for "StopOthers": |
584 | // a "ShouldRunBeforePublicStop thread, or the currently selected thread. If |
585 | // we find one satisfying that critereon, put it here. |
586 | ThreadSP stop_others_thread_sp; |
587 | |
588 | for (pos = m_threads.begin(); pos != end; ++pos) { |
589 | ThreadSP thread_sp(*pos); |
590 | if (thread_sp->GetResumeState() != eStateSuspended && |
591 | thread_sp->GetCurrentPlan()->StopOthers()) { |
592 | if ((*pos)->IsOperatingSystemPluginThread() && |
593 | !(*pos)->GetBackingThread()) |
594 | continue; |
595 | |
596 | // You can't say "stop others" and also want yourself to be suspended. |
597 | assert(thread_sp->GetCurrentPlan()->RunState() != eStateSuspended); |
598 | run_me_only_list.AddThread(thread_sp); |
599 | |
600 | if (thread_sp == GetSelectedThread()) |
601 | stop_others_thread_sp = thread_sp; |
602 | |
603 | if (thread_sp->ShouldRunBeforePublicStop()) { |
604 | // This takes precedence, so if we find one of these, service it: |
605 | stop_others_thread_sp = thread_sp; |
606 | break; |
607 | } |
608 | } |
609 | } |
610 | |
611 | bool need_to_resume = true; |
612 | |
613 | if (run_me_only_list.GetSize(can_update: false) == 0) { |
614 | // Everybody runs as they wish: |
615 | for (pos = m_threads.begin(); pos != end; ++pos) { |
616 | ThreadSP thread_sp(*pos); |
617 | StateType run_state; |
618 | if (thread_sp->GetResumeState() != eStateSuspended) |
619 | run_state = thread_sp->GetCurrentPlan()->RunState(); |
620 | else |
621 | run_state = eStateSuspended; |
622 | if (!thread_sp->ShouldResume(resume_state: run_state)) |
623 | need_to_resume = false; |
624 | } |
625 | } else { |
626 | ThreadSP thread_to_run; |
627 | |
628 | if (stop_others_thread_sp) { |
629 | thread_to_run = stop_others_thread_sp; |
630 | } else if (run_me_only_list.GetSize(can_update: false) == 1) { |
631 | thread_to_run = run_me_only_list.GetThreadAtIndex(idx: 0); |
632 | } else { |
633 | int random_thread = |
634 | (int)((run_me_only_list.GetSize(can_update: false) * (double)rand()) / |
635 | (RAND_MAX + 1.0)); |
636 | thread_to_run = run_me_only_list.GetThreadAtIndex(idx: random_thread); |
637 | } |
638 | |
639 | for (pos = m_threads.begin(); pos != end; ++pos) { |
640 | ThreadSP thread_sp(*pos); |
641 | if (thread_sp == thread_to_run) { |
642 | // Note, a thread might be able to fulfil it's plan w/o actually |
643 | // resuming. An example of this is a step that changes the current |
644 | // inlined function depth w/o moving the PC. Check that here: |
645 | if (!thread_sp->ShouldResume(resume_state: thread_sp->GetCurrentPlan()->RunState())) |
646 | need_to_resume = false; |
647 | } else |
648 | thread_sp->ShouldResume(resume_state: eStateSuspended); |
649 | } |
650 | } |
651 | |
652 | return need_to_resume; |
653 | } |
654 | |
655 | void ThreadList::DidResume() { |
656 | std::lock_guard<std::recursive_mutex> guard(GetMutex()); |
657 | collection::iterator pos, end = m_threads.end(); |
658 | for (pos = m_threads.begin(); pos != end; ++pos) { |
659 | // Don't clear out threads that aren't going to get a chance to run, rather |
660 | // leave their state for the next time around. |
661 | ThreadSP thread_sp(*pos); |
662 | if (thread_sp->GetTemporaryResumeState() != eStateSuspended) |
663 | thread_sp->DidResume(); |
664 | } |
665 | } |
666 | |
667 | void ThreadList::DidStop() { |
668 | std::lock_guard<std::recursive_mutex> guard(GetMutex()); |
669 | collection::iterator pos, end = m_threads.end(); |
670 | for (pos = m_threads.begin(); pos != end; ++pos) { |
671 | // Notify threads that the process just stopped. Note, this currently |
672 | // assumes that all threads in the list stop when the process stops. In |
673 | // the future we will want to support a debugging model where some threads |
674 | // continue to run while others are stopped. We either need to handle that |
675 | // somehow here or create a special thread list containing only threads |
676 | // which will stop in the code that calls this method (currently |
677 | // Process::SetPrivateState). |
678 | ThreadSP thread_sp(*pos); |
679 | if (StateIsRunningState(state: thread_sp->GetState())) |
680 | thread_sp->DidStop(); |
681 | } |
682 | } |
683 | |
684 | ThreadSP ThreadList::GetSelectedThread() { |
685 | std::lock_guard<std::recursive_mutex> guard(GetMutex()); |
686 | ThreadSP thread_sp = FindThreadByID(tid: m_selected_tid); |
687 | if (!thread_sp.get()) { |
688 | if (m_threads.size() == 0) |
689 | return thread_sp; |
690 | m_selected_tid = m_threads[0]->GetID(); |
691 | thread_sp = m_threads[0]; |
692 | } |
693 | return thread_sp; |
694 | } |
695 | |
696 | bool ThreadList::SetSelectedThreadByID(lldb::tid_t tid, bool notify) { |
697 | std::lock_guard<std::recursive_mutex> guard(GetMutex()); |
698 | ThreadSP selected_thread_sp(FindThreadByID(tid)); |
699 | if (selected_thread_sp) { |
700 | m_selected_tid = tid; |
701 | selected_thread_sp->SetDefaultFileAndLineToSelectedFrame(); |
702 | } else |
703 | m_selected_tid = LLDB_INVALID_THREAD_ID; |
704 | |
705 | if (notify) |
706 | NotifySelectedThreadChanged(tid: m_selected_tid); |
707 | |
708 | return m_selected_tid != LLDB_INVALID_THREAD_ID; |
709 | } |
710 | |
711 | bool ThreadList::SetSelectedThreadByIndexID(uint32_t index_id, bool notify) { |
712 | std::lock_guard<std::recursive_mutex> guard(GetMutex()); |
713 | ThreadSP selected_thread_sp(FindThreadByIndexID(index_id)); |
714 | if (selected_thread_sp.get()) { |
715 | m_selected_tid = selected_thread_sp->GetID(); |
716 | selected_thread_sp->SetDefaultFileAndLineToSelectedFrame(); |
717 | } else |
718 | m_selected_tid = LLDB_INVALID_THREAD_ID; |
719 | |
720 | if (notify) |
721 | NotifySelectedThreadChanged(tid: m_selected_tid); |
722 | |
723 | return m_selected_tid != LLDB_INVALID_THREAD_ID; |
724 | } |
725 | |
726 | void ThreadList::NotifySelectedThreadChanged(lldb::tid_t tid) { |
727 | ThreadSP selected_thread_sp(FindThreadByID(tid)); |
728 | if (selected_thread_sp->EventTypeHasListeners( |
729 | event_type: Thread::eBroadcastBitThreadSelected)) { |
730 | auto data_sp = |
731 | std::make_shared<Thread::ThreadEventData>(args&: selected_thread_sp); |
732 | selected_thread_sp->BroadcastEvent(event_type: Thread::eBroadcastBitThreadSelected, |
733 | event_data_sp: data_sp); |
734 | } |
735 | } |
736 | |
737 | void ThreadList::Update(ThreadList &rhs) { |
738 | if (this != &rhs) { |
739 | // Lock both mutexes to make sure neither side changes anyone on us while |
740 | // the assignment occurs |
741 | std::scoped_lock<std::recursive_mutex, std::recursive_mutex> guard(GetMutex(), rhs.GetMutex()); |
742 | |
743 | m_process = rhs.m_process; |
744 | m_stop_id = rhs.m_stop_id; |
745 | m_threads.swap(x&: rhs.m_threads); |
746 | m_selected_tid = rhs.m_selected_tid; |
747 | |
748 | // Now we look for threads that we are done with and make sure to clear |
749 | // them up as much as possible so anyone with a shared pointer will still |
750 | // have a reference, but the thread won't be of much use. Using |
751 | // std::weak_ptr for all backward references (such as a thread to a |
752 | // process) will eventually solve this issue for us, but for now, we need |
753 | // to work around the issue |
754 | collection::iterator rhs_pos, rhs_end = rhs.m_threads.end(); |
755 | for (rhs_pos = rhs.m_threads.begin(); rhs_pos != rhs_end; ++rhs_pos) { |
756 | // If this thread has already been destroyed, we don't need to look for |
757 | // it to destroy it again. |
758 | if (!(*rhs_pos)->IsValid()) |
759 | continue; |
760 | |
761 | const lldb::tid_t tid = (*rhs_pos)->GetID(); |
762 | bool thread_is_alive = false; |
763 | const uint32_t num_threads = m_threads.size(); |
764 | for (uint32_t idx = 0; idx < num_threads; ++idx) { |
765 | ThreadSP backing_thread = m_threads[idx]->GetBackingThread(); |
766 | if (m_threads[idx]->GetID() == tid || |
767 | (backing_thread && backing_thread->GetID() == tid)) { |
768 | thread_is_alive = true; |
769 | break; |
770 | } |
771 | } |
772 | if (!thread_is_alive) { |
773 | (*rhs_pos)->DestroyThread(); |
774 | } |
775 | } |
776 | } |
777 | } |
778 | |
779 | void ThreadList::Flush() { |
780 | std::lock_guard<std::recursive_mutex> guard(GetMutex()); |
781 | collection::iterator pos, end = m_threads.end(); |
782 | for (pos = m_threads.begin(); pos != end; ++pos) |
783 | (*pos)->Flush(); |
784 | } |
785 | |
786 | std::recursive_mutex &ThreadList::GetMutex() const { |
787 | return m_process->m_thread_mutex; |
788 | } |
789 | |
790 | ThreadList::ExpressionExecutionThreadPusher::ExpressionExecutionThreadPusher( |
791 | lldb::ThreadSP thread_sp) |
792 | : m_thread_list(nullptr), m_tid(LLDB_INVALID_THREAD_ID) { |
793 | if (thread_sp) { |
794 | m_tid = thread_sp->GetID(); |
795 | m_thread_list = &thread_sp->GetProcess()->GetThreadList(); |
796 | m_thread_list->PushExpressionExecutionThread(tid: m_tid); |
797 | } |
798 | } |
799 | |