| 1 | //===-- ThreadList.cpp ----------------------------------------------------===// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | |
| 9 | #include <cstdlib> |
| 10 | |
| 11 | #include <algorithm> |
| 12 | |
| 13 | #include "lldb/Target/Process.h" |
| 14 | #include "lldb/Target/RegisterContext.h" |
| 15 | #include "lldb/Target/Thread.h" |
| 16 | #include "lldb/Target/ThreadList.h" |
| 17 | #include "lldb/Target/ThreadPlan.h" |
| 18 | #include "lldb/Utility/LLDBAssert.h" |
| 19 | #include "lldb/Utility/LLDBLog.h" |
| 20 | #include "lldb/Utility/Log.h" |
| 21 | #include "lldb/Utility/State.h" |
| 22 | |
| 23 | using namespace lldb; |
| 24 | using namespace lldb_private; |
| 25 | |
| 26 | ThreadList::ThreadList(Process &process) |
| 27 | : ThreadCollection(), m_process(process), m_stop_id(0), |
| 28 | m_selected_tid(LLDB_INVALID_THREAD_ID) {} |
| 29 | |
| 30 | ThreadList::ThreadList(const ThreadList &rhs) |
| 31 | : ThreadCollection(), m_process(rhs.m_process), m_stop_id(rhs.m_stop_id), |
| 32 | m_selected_tid() { |
| 33 | // Use the assignment operator since it uses the mutex |
| 34 | *this = rhs; |
| 35 | } |
| 36 | |
| 37 | const ThreadList &ThreadList::operator=(const ThreadList &rhs) { |
| 38 | if (this != &rhs) { |
| 39 | // We only allow assignments between thread lists describing the same |
| 40 | // process. Same process implies same mutex, which means it's enough to lock |
| 41 | // just the current object. |
| 42 | assert(&m_process == &rhs.m_process); |
| 43 | assert(&GetMutex() == &rhs.GetMutex()); |
| 44 | std::lock_guard<std::recursive_mutex> guard(GetMutex()); |
| 45 | |
| 46 | m_stop_id = rhs.m_stop_id; |
| 47 | m_threads = rhs.m_threads; |
| 48 | m_selected_tid = rhs.m_selected_tid; |
| 49 | } |
| 50 | return *this; |
| 51 | } |
| 52 | |
| 53 | ThreadList::~ThreadList() { |
| 54 | // Clear the thread list. Clear will take the mutex lock which will ensure |
| 55 | // that if anyone is using the list they won't get it removed while using it. |
| 56 | Clear(); |
| 57 | } |
| 58 | |
| 59 | lldb::ThreadSP ThreadList::GetExpressionExecutionThread() { |
| 60 | if (m_expression_tid_stack.empty()) |
| 61 | return GetSelectedThread(); |
| 62 | ThreadSP expr_thread_sp = FindThreadByID(tid: m_expression_tid_stack.back()); |
| 63 | if (expr_thread_sp) |
| 64 | return expr_thread_sp; |
| 65 | else |
| 66 | return GetSelectedThread(); |
| 67 | } |
| 68 | |
| 69 | void ThreadList::PushExpressionExecutionThread(lldb::tid_t tid) { |
| 70 | m_expression_tid_stack.push_back(x: tid); |
| 71 | } |
| 72 | |
| 73 | void ThreadList::PopExpressionExecutionThread(lldb::tid_t tid) { |
| 74 | assert(m_expression_tid_stack.back() == tid); |
| 75 | m_expression_tid_stack.pop_back(); |
| 76 | } |
| 77 | |
| 78 | uint32_t ThreadList::GetStopID() const { return m_stop_id; } |
| 79 | |
| 80 | void ThreadList::SetStopID(uint32_t stop_id) { m_stop_id = stop_id; } |
| 81 | |
| 82 | uint32_t ThreadList::GetSize(bool can_update) { |
| 83 | std::lock_guard<std::recursive_mutex> guard(GetMutex()); |
| 84 | |
| 85 | if (can_update) |
| 86 | m_process.UpdateThreadListIfNeeded(); |
| 87 | return m_threads.size(); |
| 88 | } |
| 89 | |
| 90 | ThreadSP ThreadList::GetThreadAtIndex(uint32_t idx, bool can_update) { |
| 91 | std::lock_guard<std::recursive_mutex> guard(GetMutex()); |
| 92 | |
| 93 | if (can_update) |
| 94 | m_process.UpdateThreadListIfNeeded(); |
| 95 | |
| 96 | ThreadSP thread_sp; |
| 97 | if (idx < m_threads.size()) |
| 98 | thread_sp = m_threads[idx]; |
| 99 | return thread_sp; |
| 100 | } |
| 101 | |
| 102 | ThreadSP ThreadList::FindThreadByID(lldb::tid_t tid, bool can_update) { |
| 103 | std::lock_guard<std::recursive_mutex> guard(GetMutex()); |
| 104 | |
| 105 | if (can_update) |
| 106 | m_process.UpdateThreadListIfNeeded(); |
| 107 | |
| 108 | ThreadSP thread_sp; |
| 109 | uint32_t idx = 0; |
| 110 | const uint32_t num_threads = m_threads.size(); |
| 111 | for (idx = 0; idx < num_threads; ++idx) { |
| 112 | if (m_threads[idx]->GetID() == tid) { |
| 113 | thread_sp = m_threads[idx]; |
| 114 | break; |
| 115 | } |
| 116 | } |
| 117 | return thread_sp; |
| 118 | } |
| 119 | |
| 120 | ThreadSP ThreadList::FindThreadByProtocolID(lldb::tid_t tid, bool can_update) { |
| 121 | std::lock_guard<std::recursive_mutex> guard(GetMutex()); |
| 122 | |
| 123 | if (can_update) |
| 124 | m_process.UpdateThreadListIfNeeded(); |
| 125 | |
| 126 | ThreadSP thread_sp; |
| 127 | uint32_t idx = 0; |
| 128 | const uint32_t num_threads = m_threads.size(); |
| 129 | for (idx = 0; idx < num_threads; ++idx) { |
| 130 | if (m_threads[idx]->GetProtocolID() == tid) { |
| 131 | thread_sp = m_threads[idx]; |
| 132 | break; |
| 133 | } |
| 134 | } |
| 135 | return thread_sp; |
| 136 | } |
| 137 | |
| 138 | ThreadSP ThreadList::RemoveThreadByID(lldb::tid_t tid, bool can_update) { |
| 139 | std::lock_guard<std::recursive_mutex> guard(GetMutex()); |
| 140 | |
| 141 | if (can_update) |
| 142 | m_process.UpdateThreadListIfNeeded(); |
| 143 | |
| 144 | ThreadSP thread_sp; |
| 145 | uint32_t idx = 0; |
| 146 | const uint32_t num_threads = m_threads.size(); |
| 147 | for (idx = 0; idx < num_threads; ++idx) { |
| 148 | if (m_threads[idx]->GetID() == tid) { |
| 149 | thread_sp = m_threads[idx]; |
| 150 | m_threads.erase(position: m_threads.begin() + idx); |
| 151 | break; |
| 152 | } |
| 153 | } |
| 154 | return thread_sp; |
| 155 | } |
| 156 | |
| 157 | ThreadSP ThreadList::RemoveThreadByProtocolID(lldb::tid_t tid, |
| 158 | bool can_update) { |
| 159 | std::lock_guard<std::recursive_mutex> guard(GetMutex()); |
| 160 | |
| 161 | if (can_update) |
| 162 | m_process.UpdateThreadListIfNeeded(); |
| 163 | |
| 164 | ThreadSP thread_sp; |
| 165 | uint32_t idx = 0; |
| 166 | const uint32_t num_threads = m_threads.size(); |
| 167 | for (idx = 0; idx < num_threads; ++idx) { |
| 168 | if (m_threads[idx]->GetProtocolID() == tid) { |
| 169 | thread_sp = m_threads[idx]; |
| 170 | m_threads.erase(position: m_threads.begin() + idx); |
| 171 | break; |
| 172 | } |
| 173 | } |
| 174 | return thread_sp; |
| 175 | } |
| 176 | |
| 177 | ThreadSP ThreadList::GetThreadSPForThreadPtr(Thread *thread_ptr) { |
| 178 | ThreadSP thread_sp; |
| 179 | if (thread_ptr) { |
| 180 | std::lock_guard<std::recursive_mutex> guard(GetMutex()); |
| 181 | |
| 182 | uint32_t idx = 0; |
| 183 | const uint32_t num_threads = m_threads.size(); |
| 184 | for (idx = 0; idx < num_threads; ++idx) { |
| 185 | if (m_threads[idx].get() == thread_ptr) { |
| 186 | thread_sp = m_threads[idx]; |
| 187 | break; |
| 188 | } |
| 189 | } |
| 190 | } |
| 191 | return thread_sp; |
| 192 | } |
| 193 | |
| 194 | ThreadSP ThreadList::FindThreadByIndexID(uint32_t index_id, bool can_update) { |
| 195 | std::lock_guard<std::recursive_mutex> guard(GetMutex()); |
| 196 | |
| 197 | if (can_update) |
| 198 | m_process.UpdateThreadListIfNeeded(); |
| 199 | |
| 200 | ThreadSP thread_sp; |
| 201 | const uint32_t num_threads = m_threads.size(); |
| 202 | for (uint32_t idx = 0; idx < num_threads; ++idx) { |
| 203 | if (m_threads[idx]->GetIndexID() == index_id) { |
| 204 | thread_sp = m_threads[idx]; |
| 205 | break; |
| 206 | } |
| 207 | } |
| 208 | return thread_sp; |
| 209 | } |
| 210 | |
| 211 | bool ThreadList::ShouldStop(Event *event_ptr) { |
| 212 | // Running events should never stop, obviously... |
| 213 | |
| 214 | Log *log = GetLog(mask: LLDBLog::Step); |
| 215 | |
| 216 | // The ShouldStop method of the threads can do a whole lot of work, figuring |
| 217 | // out whether the thread plan conditions are met. So we don't want to keep |
| 218 | // the ThreadList locked the whole time we are doing this. |
| 219 | // FIXME: It is possible that running code could cause new threads |
| 220 | // to be created. If that happens, we will miss asking them whether they |
| 221 | // should stop. This is not a big deal since we haven't had a chance to hang |
| 222 | // any interesting operations on those threads yet. |
| 223 | |
| 224 | collection threads_copy; |
| 225 | { |
| 226 | // Scope for locker |
| 227 | std::lock_guard<std::recursive_mutex> guard(GetMutex()); |
| 228 | |
| 229 | m_process.UpdateThreadListIfNeeded(); |
| 230 | for (lldb::ThreadSP thread_sp : m_threads) { |
| 231 | // This is an optimization... If we didn't let a thread run in between |
| 232 | // the previous stop and this one, we shouldn't have to consult it for |
| 233 | // ShouldStop. So just leave it off the list we are going to inspect. |
| 234 | // If the thread didn't run but had work to do before declaring a public |
| 235 | // stop, then also include it. |
| 236 | // On Linux, if a thread-specific conditional breakpoint was hit, it won't |
| 237 | // necessarily be the thread that hit the breakpoint itself that |
| 238 | // evaluates the conditional expression, so the thread that hit the |
| 239 | // breakpoint could still be asked to stop, even though it hasn't been |
| 240 | // allowed to run since the previous stop. |
| 241 | if (thread_sp->GetTemporaryResumeState() != eStateSuspended || |
| 242 | thread_sp->IsStillAtLastBreakpointHit() |
| 243 | || thread_sp->ShouldRunBeforePublicStop()) |
| 244 | threads_copy.push_back(x: thread_sp); |
| 245 | } |
| 246 | |
| 247 | // It is possible the threads we were allowing to run all exited and then |
| 248 | // maybe the user interrupted or something, then fall back on looking at |
| 249 | // all threads: |
| 250 | |
| 251 | if (threads_copy.size() == 0) |
| 252 | threads_copy = m_threads; |
| 253 | } |
| 254 | |
| 255 | collection::iterator pos, end = threads_copy.end(); |
| 256 | |
| 257 | if (log) { |
| 258 | log->PutCString(cstr: "" ); |
| 259 | LLDB_LOGF(log, |
| 260 | "ThreadList::%s: %" PRIu64 " threads, %" PRIu64 |
| 261 | " unsuspended threads" , |
| 262 | __FUNCTION__, (uint64_t)m_threads.size(), |
| 263 | (uint64_t)threads_copy.size()); |
| 264 | } |
| 265 | |
| 266 | bool did_anybody_stop_for_a_reason = false; |
| 267 | |
| 268 | // If the event is an Interrupt event, then we're going to stop no matter |
| 269 | // what. Otherwise, presume we won't stop. |
| 270 | bool should_stop = false; |
| 271 | if (Process::ProcessEventData::GetInterruptedFromEvent(event_ptr)) { |
| 272 | LLDB_LOGF( |
| 273 | log, "ThreadList::%s handling interrupt event, should stop set to true" , |
| 274 | __FUNCTION__); |
| 275 | |
| 276 | should_stop = true; |
| 277 | } |
| 278 | |
| 279 | // Now we run through all the threads and get their stop info's. We want to |
| 280 | // make sure to do this first before we start running the ShouldStop, because |
| 281 | // one thread's ShouldStop could destroy information (like deleting a thread |
| 282 | // specific breakpoint another thread had stopped at) which could lead us to |
| 283 | // compute the StopInfo incorrectly. We don't need to use it here, we just |
| 284 | // want to make sure it gets computed. |
| 285 | |
| 286 | for (pos = threads_copy.begin(); pos != end; ++pos) { |
| 287 | ThreadSP thread_sp(*pos); |
| 288 | thread_sp->GetStopInfo(); |
| 289 | } |
| 290 | |
| 291 | // If a thread needs to finish some job that can be done just on this thread |
| 292 | // before broadcastion the stop, it will signal that by returning true for |
| 293 | // ShouldRunBeforePublicStop. This variable gathers the results from that. |
| 294 | bool a_thread_needs_to_run = false; |
| 295 | for (pos = threads_copy.begin(); pos != end; ++pos) { |
| 296 | ThreadSP thread_sp(*pos); |
| 297 | |
| 298 | // We should never get a stop for which no thread had a stop reason, but |
| 299 | // sometimes we do see this - for instance when we first connect to a |
| 300 | // remote stub. In that case we should stop, since we can't figure out the |
| 301 | // right thing to do and stopping gives the user control over what to do in |
| 302 | // this instance. |
| 303 | // |
| 304 | // Note, this causes a problem when you have a thread specific breakpoint, |
| 305 | // and a bunch of threads hit the breakpoint, but not the thread which we |
| 306 | // are waiting for. All the threads that are not "supposed" to hit the |
| 307 | // breakpoint are marked as having no stop reason, which is right, they |
| 308 | // should not show a stop reason. But that triggers this code and causes |
| 309 | // us to stop seemingly for no reason. |
| 310 | // |
| 311 | // Since the only way we ever saw this error was on first attach, I'm only |
| 312 | // going to trigger set did_anybody_stop_for_a_reason to true unless this |
| 313 | // is the first stop. |
| 314 | // |
| 315 | // If this becomes a problem, we'll have to have another StopReason like |
| 316 | // "StopInfoHidden" which will look invalid everywhere but at this check. |
| 317 | |
| 318 | if (thread_sp->GetProcess()->GetStopID() > 1) |
| 319 | did_anybody_stop_for_a_reason = true; |
| 320 | else |
| 321 | did_anybody_stop_for_a_reason |= thread_sp->ThreadStoppedForAReason(); |
| 322 | |
| 323 | const bool thread_should_stop = thread_sp->ShouldStop(event_ptr); |
| 324 | |
| 325 | if (thread_should_stop) |
| 326 | should_stop |= true; |
| 327 | else { |
| 328 | bool this_thread_forces_run = thread_sp->ShouldRunBeforePublicStop(); |
| 329 | a_thread_needs_to_run |= this_thread_forces_run; |
| 330 | if (this_thread_forces_run) |
| 331 | LLDB_LOG(log, |
| 332 | "ThreadList::{0} thread: {1:x}, " |
| 333 | "says it needs to run before public stop." , |
| 334 | __FUNCTION__, thread_sp->GetID()); |
| 335 | } |
| 336 | } |
| 337 | |
| 338 | if (a_thread_needs_to_run) { |
| 339 | should_stop = false; |
| 340 | } else if (!should_stop && !did_anybody_stop_for_a_reason) { |
| 341 | should_stop = true; |
| 342 | LLDB_LOGF(log, |
| 343 | "ThreadList::%s we stopped but no threads had a stop reason, " |
| 344 | "overriding should_stop and stopping." , |
| 345 | __FUNCTION__); |
| 346 | } |
| 347 | |
| 348 | LLDB_LOGF(log, "ThreadList::%s overall should_stop = %i" , __FUNCTION__, |
| 349 | should_stop); |
| 350 | |
| 351 | if (should_stop) { |
| 352 | for (pos = threads_copy.begin(); pos != end; ++pos) { |
| 353 | ThreadSP thread_sp(*pos); |
| 354 | thread_sp->WillStop(); |
| 355 | } |
| 356 | } |
| 357 | |
| 358 | return should_stop; |
| 359 | } |
| 360 | |
| 361 | Vote ThreadList::ShouldReportStop(Event *event_ptr) { |
| 362 | std::lock_guard<std::recursive_mutex> guard(GetMutex()); |
| 363 | |
| 364 | Vote result = eVoteNoOpinion; |
| 365 | m_process.UpdateThreadListIfNeeded(); |
| 366 | collection::iterator pos, end = m_threads.end(); |
| 367 | |
| 368 | Log *log = GetLog(mask: LLDBLog::Step); |
| 369 | |
| 370 | LLDB_LOGF(log, "ThreadList::%s %" PRIu64 " threads" , __FUNCTION__, |
| 371 | (uint64_t)m_threads.size()); |
| 372 | |
| 373 | // Run through the threads and ask whether we should report this event. For |
| 374 | // stopping, a YES vote wins over everything. A NO vote wins over NO |
| 375 | // opinion. The exception is if a thread has work it needs to force before |
| 376 | // a public stop, which overrides everyone else's opinion: |
| 377 | for (pos = m_threads.begin(); pos != end; ++pos) { |
| 378 | ThreadSP thread_sp(*pos); |
| 379 | if (thread_sp->ShouldRunBeforePublicStop()) { |
| 380 | LLDB_LOG(log, "Thread {0:x} has private business to complete, overrode " |
| 381 | "the should report stop." , thread_sp->GetID()); |
| 382 | result = eVoteNo; |
| 383 | break; |
| 384 | } |
| 385 | |
| 386 | const Vote vote = thread_sp->ShouldReportStop(event_ptr); |
| 387 | switch (vote) { |
| 388 | case eVoteNoOpinion: |
| 389 | continue; |
| 390 | |
| 391 | case eVoteYes: |
| 392 | result = eVoteYes; |
| 393 | break; |
| 394 | |
| 395 | case eVoteNo: |
| 396 | if (result == eVoteNoOpinion) { |
| 397 | result = eVoteNo; |
| 398 | } else { |
| 399 | LLDB_LOG(log, |
| 400 | "Thread {0:x} voted {1}, but lost out because result was {2}" , |
| 401 | thread_sp->GetID(), vote, result); |
| 402 | } |
| 403 | break; |
| 404 | } |
| 405 | } |
| 406 | LLDB_LOG(log, "Returning {0}" , result); |
| 407 | return result; |
| 408 | } |
| 409 | |
| 410 | void ThreadList::SetShouldReportStop(Vote vote) { |
| 411 | std::lock_guard<std::recursive_mutex> guard(GetMutex()); |
| 412 | |
| 413 | m_process.UpdateThreadListIfNeeded(); |
| 414 | collection::iterator pos, end = m_threads.end(); |
| 415 | for (pos = m_threads.begin(); pos != end; ++pos) { |
| 416 | ThreadSP thread_sp(*pos); |
| 417 | thread_sp->SetShouldReportStop(vote); |
| 418 | } |
| 419 | } |
| 420 | |
| 421 | Vote ThreadList::ShouldReportRun(Event *event_ptr) { |
| 422 | |
| 423 | std::lock_guard<std::recursive_mutex> guard(GetMutex()); |
| 424 | |
| 425 | Vote result = eVoteNoOpinion; |
| 426 | m_process.UpdateThreadListIfNeeded(); |
| 427 | collection::iterator pos, end = m_threads.end(); |
| 428 | |
| 429 | // Run through the threads and ask whether we should report this event. The |
| 430 | // rule is NO vote wins over everything, a YES vote wins over no opinion. |
| 431 | |
| 432 | Log *log = GetLog(mask: LLDBLog::Step); |
| 433 | |
| 434 | for (pos = m_threads.begin(); pos != end; ++pos) { |
| 435 | if ((*pos)->GetResumeState() != eStateSuspended) { |
| 436 | switch ((*pos)->ShouldReportRun(event_ptr)) { |
| 437 | case eVoteNoOpinion: |
| 438 | continue; |
| 439 | case eVoteYes: |
| 440 | if (result == eVoteNoOpinion) |
| 441 | result = eVoteYes; |
| 442 | break; |
| 443 | case eVoteNo: |
| 444 | LLDB_LOGF(log, |
| 445 | "ThreadList::ShouldReportRun() thread %d (0x%4.4" PRIx64 |
| 446 | ") says don't report." , |
| 447 | (*pos)->GetIndexID(), (*pos)->GetID()); |
| 448 | result = eVoteNo; |
| 449 | break; |
| 450 | } |
| 451 | } |
| 452 | } |
| 453 | return result; |
| 454 | } |
| 455 | |
| 456 | void ThreadList::Clear() { |
| 457 | std::lock_guard<std::recursive_mutex> guard(GetMutex()); |
| 458 | m_stop_id = 0; |
| 459 | m_threads.clear(); |
| 460 | m_selected_tid = LLDB_INVALID_THREAD_ID; |
| 461 | } |
| 462 | |
| 463 | void ThreadList::Destroy() { |
| 464 | std::lock_guard<std::recursive_mutex> guard(GetMutex()); |
| 465 | const uint32_t num_threads = m_threads.size(); |
| 466 | for (uint32_t idx = 0; idx < num_threads; ++idx) { |
| 467 | m_threads[idx]->DestroyThread(); |
| 468 | } |
| 469 | } |
| 470 | |
| 471 | void ThreadList::RefreshStateAfterStop() { |
| 472 | std::lock_guard<std::recursive_mutex> guard(GetMutex()); |
| 473 | |
| 474 | m_process.UpdateThreadListIfNeeded(); |
| 475 | |
| 476 | Log *log = GetLog(mask: LLDBLog::Step); |
| 477 | if (log && log->GetVerbose()) |
| 478 | LLDB_LOGF(log, |
| 479 | "Turning off notification of new threads while single stepping " |
| 480 | "a thread." ); |
| 481 | |
| 482 | collection::iterator pos, end = m_threads.end(); |
| 483 | for (pos = m_threads.begin(); pos != end; ++pos) |
| 484 | (*pos)->RefreshStateAfterStop(); |
| 485 | } |
| 486 | |
| 487 | void ThreadList::DiscardThreadPlans() { |
| 488 | // You don't need to update the thread list here, because only threads that |
| 489 | // you currently know about have any thread plans. |
| 490 | std::lock_guard<std::recursive_mutex> guard(GetMutex()); |
| 491 | |
| 492 | collection::iterator pos, end = m_threads.end(); |
| 493 | for (pos = m_threads.begin(); pos != end; ++pos) |
| 494 | (*pos)->DiscardThreadPlans(force: true); |
| 495 | } |
| 496 | |
| 497 | bool ThreadList::WillResume(RunDirection &direction) { |
| 498 | // Run through the threads and perform their momentary actions. But we only |
| 499 | // do this for threads that are running, user suspended threads stay where |
| 500 | // they are. |
| 501 | |
| 502 | std::lock_guard<std::recursive_mutex> guard(GetMutex()); |
| 503 | m_process.UpdateThreadListIfNeeded(); |
| 504 | |
| 505 | collection::iterator pos, end = m_threads.end(); |
| 506 | |
| 507 | // Go through the threads and see if any thread wants to run just itself. |
| 508 | // if so then pick one and run it. |
| 509 | |
| 510 | ThreadList run_me_only_list(m_process); |
| 511 | |
| 512 | run_me_only_list.SetStopID(m_process.GetStopID()); |
| 513 | |
| 514 | // One or more threads might want to "Stop Others". We want to handle all |
| 515 | // those requests first. And if there is a thread that wanted to "resume |
| 516 | // before a public stop", let it get the first crack: |
| 517 | // There are two special kinds of thread that have priority for "StopOthers": |
| 518 | // a "ShouldRunBeforePublicStop thread, or the currently selected thread. If |
| 519 | // we find one satisfying that critereon, put it here. |
| 520 | ThreadSP thread_to_run; |
| 521 | for (pos = m_threads.begin(); pos != end; ++pos) { |
| 522 | ThreadSP thread_sp(*pos); |
| 523 | if (thread_sp->GetResumeState() != eStateSuspended && |
| 524 | thread_sp->GetCurrentPlan()->StopOthers()) { |
| 525 | if (thread_sp->IsOperatingSystemPluginThread() && |
| 526 | !thread_sp->GetBackingThread()) |
| 527 | continue; |
| 528 | |
| 529 | // You can't say "stop others" and also want yourself to be suspended. |
| 530 | assert(thread_sp->GetCurrentPlan()->RunState() != eStateSuspended); |
| 531 | run_me_only_list.AddThread(thread_sp); |
| 532 | |
| 533 | if (thread_sp == GetSelectedThread()) |
| 534 | thread_to_run = thread_sp; |
| 535 | |
| 536 | if (thread_sp->ShouldRunBeforePublicStop()) { |
| 537 | // This takes precedence, so if we find one of these, service it: |
| 538 | thread_to_run = thread_sp; |
| 539 | break; |
| 540 | } |
| 541 | } |
| 542 | } |
| 543 | |
| 544 | if (run_me_only_list.GetSize(can_update: false) > 0 && !thread_to_run) { |
| 545 | if (run_me_only_list.GetSize(can_update: false) == 1) { |
| 546 | thread_to_run = run_me_only_list.GetThreadAtIndex(idx: 0); |
| 547 | } else { |
| 548 | int random_thread = |
| 549 | (int)((run_me_only_list.GetSize(can_update: false) * (double)rand()) / |
| 550 | (RAND_MAX + 1.0)); |
| 551 | thread_to_run = run_me_only_list.GetThreadAtIndex(idx: random_thread); |
| 552 | } |
| 553 | } |
| 554 | |
| 555 | if (thread_to_run != nullptr) { |
| 556 | direction = thread_to_run->GetCurrentPlan()->GetDirection(); |
| 557 | } else { |
| 558 | direction = m_process.GetBaseDirection(); |
| 559 | } |
| 560 | |
| 561 | // Give all the threads that are likely to run a last chance to set up their |
| 562 | // state before we negotiate who is actually going to get a chance to run... |
| 563 | // Don't set to resume suspended threads, and if any thread wanted to stop |
| 564 | // others, only call setup on the threads that request StopOthers... |
| 565 | if (thread_to_run != nullptr) { |
| 566 | // See if any thread wants to run stopping others. If it does, then we |
| 567 | // won't setup the other threads for resume, since they aren't going to get |
| 568 | // a chance to run. This is necessary because the SetupForResume might add |
| 569 | // "StopOthers" plans which would then get to be part of the who-gets-to-run |
| 570 | // negotiation, but they're coming in after the fact, and the threads that |
| 571 | // are already set up should take priority. |
| 572 | if (thread_to_run->SetupToStepOverBreakpointIfNeeded(direction)) { |
| 573 | // We only need to step over breakpoints when running forward, and the |
| 574 | // step-over-breakpoint plan itself wants to run forward, so this |
| 575 | // keeps our desired direction. |
| 576 | assert(thread_to_run->GetCurrentPlan()->GetDirection() == direction); |
| 577 | } |
| 578 | } else { |
| 579 | for (pos = m_threads.begin(); pos != end; ++pos) { |
| 580 | ThreadSP thread_sp(*pos); |
| 581 | if (thread_sp->GetResumeState() != eStateSuspended) { |
| 582 | if (thread_sp->IsOperatingSystemPluginThread() && |
| 583 | !thread_sp->GetBackingThread()) |
| 584 | continue; |
| 585 | if (thread_sp->SetupToStepOverBreakpointIfNeeded(direction)) { |
| 586 | // We only need to step over breakpoints when running forward, and the |
| 587 | // step-over-breakpoint plan itself wants to run forward, so this |
| 588 | // keeps our desired direction. |
| 589 | assert(thread_sp->GetCurrentPlan()->GetDirection() == direction); |
| 590 | // You can't say "stop others" and also want yourself to be suspended. |
| 591 | assert(thread_sp->GetCurrentPlan()->RunState() != eStateSuspended); |
| 592 | thread_to_run = thread_sp; |
| 593 | if (thread_sp->ShouldRunBeforePublicStop()) { |
| 594 | // This takes precedence, so if we find one of these, service it: |
| 595 | break; |
| 596 | } |
| 597 | } |
| 598 | } |
| 599 | } |
| 600 | } |
| 601 | |
| 602 | if (thread_to_run != nullptr) { |
| 603 | Log *log = GetLog(mask: LLDBLog::Step); |
| 604 | if (log && log->GetVerbose()) |
| 605 | LLDB_LOGF(log, "Turning on notification of new threads while single " |
| 606 | "stepping a thread." ); |
| 607 | m_process.StartNoticingNewThreads(); |
| 608 | } else { |
| 609 | Log *log = GetLog(mask: LLDBLog::Step); |
| 610 | if (log && log->GetVerbose()) |
| 611 | LLDB_LOGF(log, "Turning off notification of new threads while single " |
| 612 | "stepping a thread." ); |
| 613 | m_process.StopNoticingNewThreads(); |
| 614 | } |
| 615 | |
| 616 | bool need_to_resume = true; |
| 617 | |
| 618 | if (thread_to_run == nullptr) { |
| 619 | // Everybody runs as they wish: |
| 620 | for (pos = m_threads.begin(); pos != end; ++pos) { |
| 621 | ThreadSP thread_sp(*pos); |
| 622 | StateType run_state; |
| 623 | if (thread_sp->GetResumeState() != eStateSuspended) |
| 624 | run_state = thread_sp->GetCurrentPlan()->RunState(); |
| 625 | else |
| 626 | run_state = eStateSuspended; |
| 627 | if (!thread_sp->ShouldResume(resume_state: run_state)) |
| 628 | need_to_resume = false; |
| 629 | } |
| 630 | if (need_to_resume) { |
| 631 | // Ensure all threads are running in the right direction |
| 632 | for (pos = m_threads.begin(); pos != end; ++pos) { |
| 633 | ThreadSP thread_sp(*pos); |
| 634 | while (thread_sp->GetCurrentPlan()->GetDirection() != direction) { |
| 635 | // This can't discard the base plan because its direction is |
| 636 | // m_process.GetBaseDirection() i.e. `direction`. |
| 637 | thread_sp->DiscardPlan(); |
| 638 | } |
| 639 | } |
| 640 | } |
| 641 | } else { |
| 642 | for (pos = m_threads.begin(); pos != end; ++pos) { |
| 643 | ThreadSP thread_sp(*pos); |
| 644 | if (thread_sp == thread_to_run) { |
| 645 | // Note, a thread might be able to fulfil it's plan w/o actually |
| 646 | // resuming. An example of this is a step that changes the current |
| 647 | // inlined function depth w/o moving the PC. Check that here: |
| 648 | if (!thread_sp->ShouldResume(resume_state: thread_sp->GetCurrentPlan()->RunState())) |
| 649 | need_to_resume = false; |
| 650 | } else |
| 651 | thread_sp->ShouldResume(resume_state: eStateSuspended); |
| 652 | } |
| 653 | } |
| 654 | |
| 655 | return need_to_resume; |
| 656 | } |
| 657 | |
| 658 | void ThreadList::DidResume() { |
| 659 | std::lock_guard<std::recursive_mutex> guard(GetMutex()); |
| 660 | collection::iterator pos, end = m_threads.end(); |
| 661 | for (pos = m_threads.begin(); pos != end; ++pos) { |
| 662 | // Don't clear out threads that aren't going to get a chance to run, rather |
| 663 | // leave their state for the next time around. |
| 664 | ThreadSP thread_sp(*pos); |
| 665 | if (thread_sp->GetTemporaryResumeState() != eStateSuspended) |
| 666 | thread_sp->DidResume(); |
| 667 | } |
| 668 | } |
| 669 | |
| 670 | void ThreadList::DidStop() { |
| 671 | std::lock_guard<std::recursive_mutex> guard(GetMutex()); |
| 672 | collection::iterator pos, end = m_threads.end(); |
| 673 | for (pos = m_threads.begin(); pos != end; ++pos) { |
| 674 | // Notify threads that the process just stopped. Note, this currently |
| 675 | // assumes that all threads in the list stop when the process stops. In |
| 676 | // the future we will want to support a debugging model where some threads |
| 677 | // continue to run while others are stopped. We either need to handle that |
| 678 | // somehow here or create a special thread list containing only threads |
| 679 | // which will stop in the code that calls this method (currently |
| 680 | // Process::SetPrivateState). |
| 681 | ThreadSP thread_sp(*pos); |
| 682 | if (StateIsRunningState(state: thread_sp->GetState())) |
| 683 | thread_sp->DidStop(); |
| 684 | } |
| 685 | } |
| 686 | |
| 687 | ThreadSP ThreadList::GetSelectedThread() { |
| 688 | std::lock_guard<std::recursive_mutex> guard(GetMutex()); |
| 689 | ThreadSP thread_sp = FindThreadByID(tid: m_selected_tid); |
| 690 | if (!thread_sp.get()) { |
| 691 | if (m_threads.size() == 0) |
| 692 | return thread_sp; |
| 693 | m_selected_tid = m_threads[0]->GetID(); |
| 694 | thread_sp = m_threads[0]; |
| 695 | } |
| 696 | return thread_sp; |
| 697 | } |
| 698 | |
| 699 | bool ThreadList::SetSelectedThreadByID(lldb::tid_t tid, bool notify) { |
| 700 | std::lock_guard<std::recursive_mutex> guard(GetMutex()); |
| 701 | ThreadSP selected_thread_sp(FindThreadByID(tid)); |
| 702 | if (selected_thread_sp) { |
| 703 | m_selected_tid = tid; |
| 704 | selected_thread_sp->SetDefaultFileAndLineToSelectedFrame(); |
| 705 | } else |
| 706 | m_selected_tid = LLDB_INVALID_THREAD_ID; |
| 707 | |
| 708 | if (notify) |
| 709 | NotifySelectedThreadChanged(tid: m_selected_tid); |
| 710 | |
| 711 | return m_selected_tid != LLDB_INVALID_THREAD_ID; |
| 712 | } |
| 713 | |
| 714 | bool ThreadList::SetSelectedThreadByIndexID(uint32_t index_id, bool notify) { |
| 715 | std::lock_guard<std::recursive_mutex> guard(GetMutex()); |
| 716 | ThreadSP selected_thread_sp(FindThreadByIndexID(index_id)); |
| 717 | if (selected_thread_sp.get()) { |
| 718 | m_selected_tid = selected_thread_sp->GetID(); |
| 719 | selected_thread_sp->SetDefaultFileAndLineToSelectedFrame(); |
| 720 | } else |
| 721 | m_selected_tid = LLDB_INVALID_THREAD_ID; |
| 722 | |
| 723 | if (notify) |
| 724 | NotifySelectedThreadChanged(tid: m_selected_tid); |
| 725 | |
| 726 | return m_selected_tid != LLDB_INVALID_THREAD_ID; |
| 727 | } |
| 728 | |
| 729 | void ThreadList::NotifySelectedThreadChanged(lldb::tid_t tid) { |
| 730 | ThreadSP selected_thread_sp(FindThreadByID(tid)); |
| 731 | if (selected_thread_sp->EventTypeHasListeners( |
| 732 | event_type: Thread::eBroadcastBitThreadSelected)) { |
| 733 | auto data_sp = |
| 734 | std::make_shared<Thread::ThreadEventData>(args&: selected_thread_sp); |
| 735 | selected_thread_sp->BroadcastEvent(event_type: Thread::eBroadcastBitThreadSelected, |
| 736 | event_data_sp: data_sp); |
| 737 | } |
| 738 | } |
| 739 | |
| 740 | void ThreadList::Update(ThreadList &rhs) { |
| 741 | if (this != &rhs) { |
| 742 | // We only allow assignments between thread lists describing the same |
| 743 | // process. Same process implies same mutex, which means it's enough to lock |
| 744 | // just the current object. |
| 745 | assert(&m_process == &rhs.m_process); |
| 746 | assert(&GetMutex() == &rhs.GetMutex()); |
| 747 | std::lock_guard<std::recursive_mutex> guard(GetMutex()); |
| 748 | |
| 749 | m_stop_id = rhs.m_stop_id; |
| 750 | m_threads.swap(x&: rhs.m_threads); |
| 751 | m_selected_tid = rhs.m_selected_tid; |
| 752 | |
| 753 | // Now we look for threads that we are done with and make sure to clear |
| 754 | // them up as much as possible so anyone with a shared pointer will still |
| 755 | // have a reference, but the thread won't be of much use. Using |
| 756 | // std::weak_ptr for all backward references (such as a thread to a |
| 757 | // process) will eventually solve this issue for us, but for now, we need |
| 758 | // to work around the issue |
| 759 | collection::iterator rhs_pos, rhs_end = rhs.m_threads.end(); |
| 760 | for (rhs_pos = rhs.m_threads.begin(); rhs_pos != rhs_end; ++rhs_pos) { |
| 761 | // If this thread has already been destroyed, we don't need to look for |
| 762 | // it to destroy it again. |
| 763 | if (!(*rhs_pos)->IsValid()) |
| 764 | continue; |
| 765 | |
| 766 | const lldb::tid_t tid = (*rhs_pos)->GetID(); |
| 767 | bool thread_is_alive = false; |
| 768 | const uint32_t num_threads = m_threads.size(); |
| 769 | for (uint32_t idx = 0; idx < num_threads; ++idx) { |
| 770 | ThreadSP backing_thread = m_threads[idx]->GetBackingThread(); |
| 771 | if (m_threads[idx]->GetID() == tid || |
| 772 | (backing_thread && backing_thread->GetID() == tid)) { |
| 773 | thread_is_alive = true; |
| 774 | break; |
| 775 | } |
| 776 | } |
| 777 | if (!thread_is_alive) { |
| 778 | (*rhs_pos)->DestroyThread(); |
| 779 | } |
| 780 | } |
| 781 | } |
| 782 | } |
| 783 | |
| 784 | void ThreadList::Flush() { |
| 785 | std::lock_guard<std::recursive_mutex> guard(GetMutex()); |
| 786 | collection::iterator pos, end = m_threads.end(); |
| 787 | for (pos = m_threads.begin(); pos != end; ++pos) |
| 788 | (*pos)->Flush(); |
| 789 | } |
| 790 | |
| 791 | std::recursive_mutex &ThreadList::GetMutex() const { |
| 792 | return m_process.m_thread_mutex; |
| 793 | } |
| 794 | |
| 795 | ThreadList::ExpressionExecutionThreadPusher::ExpressionExecutionThreadPusher( |
| 796 | lldb::ThreadSP thread_sp) |
| 797 | : m_thread_list(nullptr), m_tid(LLDB_INVALID_THREAD_ID) { |
| 798 | if (thread_sp) { |
| 799 | m_tid = thread_sp->GetID(); |
| 800 | m_thread_list = &thread_sp->GetProcess()->GetThreadList(); |
| 801 | m_thread_list->PushExpressionExecutionThread(tid: m_tid); |
| 802 | } |
| 803 | } |
| 804 | |