1 | //===-- ThreadPlanStack.cpp -------------------------------------*- C++ -*-===// |
---|---|
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | |
9 | #include "lldb/Target/ThreadPlanStack.h" |
10 | #include "lldb/Target/Process.h" |
11 | #include "lldb/Target/Target.h" |
12 | #include "lldb/Target/Thread.h" |
13 | #include "lldb/Target/ThreadPlan.h" |
14 | #include "lldb/Utility/Log.h" |
15 | |
16 | using namespace lldb; |
17 | using namespace lldb_private; |
18 | |
19 | static void PrintPlanElement(Stream &s, const ThreadPlanSP &plan, |
20 | lldb::DescriptionLevel desc_level, |
21 | int32_t elem_idx) { |
22 | s.IndentMore(); |
23 | s.Indent(); |
24 | s.Printf(format: "Element %d: ", elem_idx); |
25 | plan->GetDescription(s: &s, level: desc_level); |
26 | s.EOL(); |
27 | s.IndentLess(); |
28 | } |
29 | |
30 | ThreadPlanStack::ThreadPlanStack(const Thread &thread, bool make_null) { |
31 | if (make_null) { |
32 | // The ThreadPlanNull doesn't do anything to the Thread, so this is actually |
33 | // still a const operation. |
34 | m_plans.push_back( |
35 | x: ThreadPlanSP(new ThreadPlanNull(const_cast<Thread &>(thread)))); |
36 | } |
37 | } |
38 | |
39 | void ThreadPlanStack::DumpThreadPlans(Stream &s, |
40 | lldb::DescriptionLevel desc_level, |
41 | bool include_internal) const { |
42 | llvm::sys::ScopedReader guard(m_stack_mutex); |
43 | s.IndentMore(); |
44 | PrintOneStackNoLock(s, stack_name: "Active plan stack", stack: m_plans, desc_level, |
45 | include_internal); |
46 | PrintOneStackNoLock(s, stack_name: "Completed plan stack", stack: m_completed_plans, desc_level, |
47 | include_internal); |
48 | PrintOneStackNoLock(s, stack_name: "Discarded plan stack", stack: m_discarded_plans, desc_level, |
49 | include_internal); |
50 | s.IndentLess(); |
51 | } |
52 | |
53 | void ThreadPlanStack::PrintOneStackNoLock(Stream &s, llvm::StringRef stack_name, |
54 | const PlanStack &stack, |
55 | lldb::DescriptionLevel desc_level, |
56 | bool include_internal) const { |
57 | // If the stack is empty, just exit: |
58 | if (stack.empty()) |
59 | return; |
60 | |
61 | // Make sure there are public completed plans: |
62 | bool any_public = false; |
63 | if (!include_internal) { |
64 | for (auto plan : stack) { |
65 | if (!plan->GetPrivate()) { |
66 | any_public = true; |
67 | break; |
68 | } |
69 | } |
70 | } |
71 | |
72 | if (include_internal || any_public) { |
73 | int print_idx = 0; |
74 | s.Indent(); |
75 | s << stack_name << ":\n"; |
76 | for (auto plan : stack) { |
77 | if (!include_internal && plan->GetPrivate()) |
78 | continue; |
79 | PrintPlanElement(s, plan, desc_level, elem_idx: print_idx++); |
80 | } |
81 | } |
82 | } |
83 | |
84 | size_t ThreadPlanStack::CheckpointCompletedPlans() { |
85 | llvm::sys::ScopedWriter guard(m_stack_mutex); |
86 | m_completed_plan_checkpoint++; |
87 | m_completed_plan_store.insert( |
88 | x: std::make_pair(x&: m_completed_plan_checkpoint, y&: m_completed_plans)); |
89 | return m_completed_plan_checkpoint; |
90 | } |
91 | |
92 | void ThreadPlanStack::RestoreCompletedPlanCheckpoint(size_t checkpoint) { |
93 | llvm::sys::ScopedWriter guard(m_stack_mutex); |
94 | auto result = m_completed_plan_store.find(x: checkpoint); |
95 | assert(result != m_completed_plan_store.end() && |
96 | "Asked for a checkpoint that didn't exist"); |
97 | m_completed_plans.swap(x&: (*result).second); |
98 | m_completed_plan_store.erase(position: result); |
99 | } |
100 | |
101 | void ThreadPlanStack::DiscardCompletedPlanCheckpoint(size_t checkpoint) { |
102 | llvm::sys::ScopedWriter guard(m_stack_mutex); |
103 | m_completed_plan_store.erase(x: checkpoint); |
104 | } |
105 | |
106 | void ThreadPlanStack::ThreadDestroyed(Thread *thread) { |
107 | // Tell the plan stacks that this thread is going away: |
108 | llvm::sys::ScopedWriter guard(m_stack_mutex); |
109 | for (ThreadPlanSP plan : m_plans) |
110 | plan->ThreadDestroyed(); |
111 | |
112 | for (ThreadPlanSP plan : m_discarded_plans) |
113 | plan->ThreadDestroyed(); |
114 | |
115 | for (ThreadPlanSP plan : m_completed_plans) |
116 | plan->ThreadDestroyed(); |
117 | |
118 | // Now clear the current plan stacks: |
119 | m_plans.clear(); |
120 | m_discarded_plans.clear(); |
121 | m_completed_plans.clear(); |
122 | |
123 | // Push a ThreadPlanNull on the plan stack. That way we can continue |
124 | // assuming that the plan stack is never empty, but if somebody errantly asks |
125 | // questions of a destroyed thread without checking first whether it is |
126 | // destroyed, they won't crash. |
127 | if (thread != nullptr) { |
128 | lldb::ThreadPlanSP null_plan_sp(new ThreadPlanNull(*thread)); |
129 | m_plans.push_back(x: null_plan_sp); |
130 | } |
131 | } |
132 | |
133 | void ThreadPlanStack::PushPlan(lldb::ThreadPlanSP new_plan_sp) { |
134 | // If the thread plan doesn't already have a tracer, give it its parent's |
135 | // tracer: |
136 | // The first plan has to be a base plan: |
137 | { // Scope for Lock - DidPush often adds plans to the stack: |
138 | llvm::sys::ScopedWriter guard(m_stack_mutex); |
139 | assert((m_plans.size() > 0 || new_plan_sp->IsBasePlan()) && |
140 | "Zeroth plan must be a base plan"); |
141 | |
142 | if (!new_plan_sp->GetThreadPlanTracer()) { |
143 | assert(!m_plans.empty()); |
144 | new_plan_sp->SetThreadPlanTracer(m_plans.back()->GetThreadPlanTracer()); |
145 | } |
146 | m_plans.push_back(x: new_plan_sp); |
147 | } |
148 | new_plan_sp->DidPush(); |
149 | } |
150 | |
151 | lldb::ThreadPlanSP ThreadPlanStack::PopPlan() { |
152 | llvm::sys::ScopedWriter guard(m_stack_mutex); |
153 | assert(m_plans.size() > 1 && "Can't pop the base thread plan"); |
154 | |
155 | // Note that moving the top element of the vector would leave it in an |
156 | // undefined state, and break the guarantee that the stack's thread plans are |
157 | // all valid. |
158 | lldb::ThreadPlanSP plan_sp = m_plans.back(); |
159 | m_plans.pop_back(); |
160 | m_completed_plans.push_back(x: plan_sp); |
161 | plan_sp->DidPop(); |
162 | return plan_sp; |
163 | } |
164 | |
165 | lldb::ThreadPlanSP ThreadPlanStack::DiscardPlan() { |
166 | llvm::sys::ScopedWriter guard(m_stack_mutex); |
167 | return DiscardPlanNoLock(); |
168 | } |
169 | |
170 | lldb::ThreadPlanSP ThreadPlanStack::DiscardPlanNoLock() { |
171 | assert(m_plans.size() > 1 && "Can't discard the base thread plan"); |
172 | |
173 | // Note that moving the top element of the vector would leave it in an |
174 | // undefined state, and break the guarantee that the stack's thread plans are |
175 | // all valid. |
176 | lldb::ThreadPlanSP plan_sp = m_plans.back(); |
177 | m_plans.pop_back(); |
178 | m_discarded_plans.push_back(x: plan_sp); |
179 | plan_sp->DidPop(); |
180 | return plan_sp; |
181 | } |
182 | |
183 | // If the input plan is nullptr, discard all plans. Otherwise make sure this |
184 | // plan is in the stack, and if so discard up to and including it. |
185 | void ThreadPlanStack::DiscardPlansUpToPlan(ThreadPlan *up_to_plan_ptr) { |
186 | llvm::sys::ScopedWriter guard(m_stack_mutex); |
187 | int stack_size = m_plans.size(); |
188 | |
189 | if (up_to_plan_ptr == nullptr) { |
190 | for (int i = stack_size - 1; i > 0; i--) |
191 | DiscardPlanNoLock(); |
192 | return; |
193 | } |
194 | |
195 | bool found_it = false; |
196 | for (int i = stack_size - 1; i > 0; i--) { |
197 | if (m_plans[i].get() == up_to_plan_ptr) { |
198 | found_it = true; |
199 | break; |
200 | } |
201 | } |
202 | |
203 | if (found_it) { |
204 | bool last_one = false; |
205 | for (int i = stack_size - 1; i > 0 && !last_one; i--) { |
206 | if (GetCurrentPlanNoLock().get() == up_to_plan_ptr) |
207 | last_one = true; |
208 | DiscardPlanNoLock(); |
209 | } |
210 | } |
211 | } |
212 | |
213 | void ThreadPlanStack::DiscardAllPlans() { |
214 | llvm::sys::ScopedWriter guard(m_stack_mutex); |
215 | int stack_size = m_plans.size(); |
216 | for (int i = stack_size - 1; i > 0; i--) { |
217 | DiscardPlanNoLock(); |
218 | } |
219 | } |
220 | |
221 | void ThreadPlanStack::DiscardConsultingControllingPlans() { |
222 | llvm::sys::ScopedWriter guard(m_stack_mutex); |
223 | while (true) { |
224 | int controlling_plan_idx; |
225 | bool discard = true; |
226 | |
227 | // Find the first controlling plan, see if it wants discarding, and if yes |
228 | // discard up to it. |
229 | for (controlling_plan_idx = m_plans.size() - 1; controlling_plan_idx >= 0; |
230 | controlling_plan_idx--) { |
231 | if (m_plans[controlling_plan_idx]->IsControllingPlan()) { |
232 | discard = m_plans[controlling_plan_idx]->OkayToDiscard(); |
233 | break; |
234 | } |
235 | } |
236 | |
237 | // If the controlling plan doesn't want to get discarded, then we're done. |
238 | if (!discard) |
239 | return; |
240 | |
241 | // First pop all the dependent plans: |
242 | for (int i = m_plans.size() - 1; i > controlling_plan_idx; i--) { |
243 | DiscardPlanNoLock(); |
244 | } |
245 | |
246 | // Now discard the controlling plan itself. |
247 | // The bottom-most plan never gets discarded. "OkayToDiscard" for it |
248 | // means discard it's dependent plans, but not it... |
249 | if (controlling_plan_idx > 0) { |
250 | DiscardPlanNoLock(); |
251 | } |
252 | } |
253 | } |
254 | |
255 | lldb::ThreadPlanSP ThreadPlanStack::GetCurrentPlan() const { |
256 | llvm::sys::ScopedReader guard(m_stack_mutex); |
257 | return GetCurrentPlanNoLock(); |
258 | } |
259 | |
260 | lldb::ThreadPlanSP ThreadPlanStack::GetCurrentPlanNoLock() const { |
261 | assert(m_plans.size() != 0 && "There will always be a base plan."); |
262 | return m_plans.back(); |
263 | } |
264 | |
265 | lldb::ThreadPlanSP ThreadPlanStack::GetCompletedPlan(bool skip_private) const { |
266 | llvm::sys::ScopedReader guard(m_stack_mutex); |
267 | if (m_completed_plans.empty()) |
268 | return {}; |
269 | |
270 | if (!skip_private) |
271 | return m_completed_plans.back(); |
272 | |
273 | for (int i = m_completed_plans.size() - 1; i >= 0; i--) { |
274 | lldb::ThreadPlanSP completed_plan_sp; |
275 | completed_plan_sp = m_completed_plans[i]; |
276 | if (!completed_plan_sp->GetPrivate()) |
277 | return completed_plan_sp; |
278 | } |
279 | return {}; |
280 | } |
281 | |
282 | lldb::ThreadPlanSP ThreadPlanStack::GetPlanByIndex(uint32_t plan_idx, |
283 | bool skip_private) const { |
284 | llvm::sys::ScopedReader guard(m_stack_mutex); |
285 | uint32_t idx = 0; |
286 | |
287 | for (lldb::ThreadPlanSP plan_sp : m_plans) { |
288 | if (skip_private && plan_sp->GetPrivate()) |
289 | continue; |
290 | if (idx == plan_idx) |
291 | return plan_sp; |
292 | idx++; |
293 | } |
294 | return {}; |
295 | } |
296 | |
297 | lldb::ValueObjectSP ThreadPlanStack::GetReturnValueObject() const { |
298 | llvm::sys::ScopedReader guard(m_stack_mutex); |
299 | if (m_completed_plans.empty()) |
300 | return {}; |
301 | |
302 | for (int i = m_completed_plans.size() - 1; i >= 0; i--) { |
303 | lldb::ValueObjectSP return_valobj_sp; |
304 | return_valobj_sp = m_completed_plans[i]->GetReturnValueObject(); |
305 | if (return_valobj_sp) |
306 | return return_valobj_sp; |
307 | } |
308 | return {}; |
309 | } |
310 | |
311 | lldb::ExpressionVariableSP ThreadPlanStack::GetExpressionVariable() const { |
312 | llvm::sys::ScopedReader guard(m_stack_mutex); |
313 | if (m_completed_plans.empty()) |
314 | return {}; |
315 | |
316 | for (int i = m_completed_plans.size() - 1; i >= 0; i--) { |
317 | lldb::ExpressionVariableSP expression_variable_sp; |
318 | expression_variable_sp = m_completed_plans[i]->GetExpressionVariable(); |
319 | if (expression_variable_sp) |
320 | return expression_variable_sp; |
321 | } |
322 | return {}; |
323 | } |
324 | bool ThreadPlanStack::AnyPlans() const { |
325 | llvm::sys::ScopedReader guard(m_stack_mutex); |
326 | // There is always a base plan... |
327 | return m_plans.size() > 1; |
328 | } |
329 | |
330 | bool ThreadPlanStack::AnyCompletedPlans() const { |
331 | llvm::sys::ScopedReader guard(m_stack_mutex); |
332 | return !m_completed_plans.empty(); |
333 | } |
334 | |
335 | bool ThreadPlanStack::AnyDiscardedPlans() const { |
336 | llvm::sys::ScopedReader guard(m_stack_mutex); |
337 | return !m_discarded_plans.empty(); |
338 | } |
339 | |
340 | bool ThreadPlanStack::IsPlanDone(ThreadPlan *in_plan) const { |
341 | llvm::sys::ScopedReader guard(m_stack_mutex); |
342 | for (auto plan : m_completed_plans) { |
343 | if (plan.get() == in_plan) |
344 | return true; |
345 | } |
346 | return false; |
347 | } |
348 | |
349 | bool ThreadPlanStack::WasPlanDiscarded(ThreadPlan *in_plan) const { |
350 | llvm::sys::ScopedReader guard(m_stack_mutex); |
351 | for (auto plan : m_discarded_plans) { |
352 | if (plan.get() == in_plan) |
353 | return true; |
354 | } |
355 | return false; |
356 | } |
357 | |
358 | ThreadPlan *ThreadPlanStack::GetPreviousPlan(ThreadPlan *current_plan) const { |
359 | llvm::sys::ScopedReader guard(m_stack_mutex); |
360 | if (current_plan == nullptr) |
361 | return nullptr; |
362 | |
363 | // Look first in the completed plans, if the plan is here and there is |
364 | // a completed plan above it, return that. |
365 | int stack_size = m_completed_plans.size(); |
366 | for (int i = stack_size - 1; i > 0; i--) { |
367 | if (current_plan == m_completed_plans[i].get()) |
368 | return m_completed_plans[i - 1].get(); |
369 | } |
370 | |
371 | // If this is the first completed plan, the previous one is the |
372 | // bottom of the regular plan stack. |
373 | if (stack_size > 0 && m_completed_plans[0].get() == current_plan) { |
374 | return GetCurrentPlanNoLock().get(); |
375 | } |
376 | |
377 | // Otherwise look for it in the regular plans. |
378 | stack_size = m_plans.size(); |
379 | for (int i = stack_size - 1; i > 0; i--) { |
380 | if (current_plan == m_plans[i].get()) |
381 | return m_plans[i - 1].get(); |
382 | } |
383 | return nullptr; |
384 | } |
385 | |
386 | ThreadPlan *ThreadPlanStack::GetInnermostExpression() const { |
387 | llvm::sys::ScopedReader guard(m_stack_mutex); |
388 | int stack_size = m_plans.size(); |
389 | |
390 | for (int i = stack_size - 1; i > 0; i--) { |
391 | if (m_plans[i]->GetKind() == ThreadPlan::eKindCallFunction) |
392 | return m_plans[i].get(); |
393 | } |
394 | return nullptr; |
395 | } |
396 | |
397 | void ThreadPlanStack::ClearThreadCache() { |
398 | llvm::sys::ScopedReader guard(m_stack_mutex); |
399 | for (lldb::ThreadPlanSP thread_plan_sp : m_plans) |
400 | thread_plan_sp->ClearThreadCache(); |
401 | } |
402 | |
403 | void ThreadPlanStack::WillResume() { |
404 | llvm::sys::ScopedWriter guard(m_stack_mutex); |
405 | m_completed_plans.clear(); |
406 | m_discarded_plans.clear(); |
407 | } |
408 | |
409 | void ThreadPlanStackMap::Update(ThreadList ¤t_threads, |
410 | bool delete_missing, |
411 | bool check_for_new) { |
412 | |
413 | std::lock_guard<std::recursive_mutex> guard(m_stack_map_mutex); |
414 | // Now find all the new threads and add them to the map: |
415 | if (check_for_new) { |
416 | for (auto thread : current_threads.Threads()) { |
417 | lldb::tid_t cur_tid = thread->GetID(); |
418 | if (!Find(tid: cur_tid)) { |
419 | AddThread(thread&: *thread); |
420 | thread->QueueBasePlan(abort_other_plans: true); |
421 | } |
422 | } |
423 | } |
424 | |
425 | // If we aren't reaping missing threads at this point, |
426 | // we are done. |
427 | if (!delete_missing) |
428 | return; |
429 | // Otherwise scan for absent TID's. |
430 | std::vector<lldb::tid_t> missing_threads; |
431 | // If we are going to delete plans from the plan stack, |
432 | // then scan for absent TID's: |
433 | for (auto &thread_plans : m_plans_list) { |
434 | lldb::tid_t cur_tid = thread_plans.first; |
435 | ThreadSP thread_sp = current_threads.FindThreadByID(tid: cur_tid); |
436 | if (!thread_sp) |
437 | missing_threads.push_back(x: cur_tid); |
438 | } |
439 | for (lldb::tid_t tid : missing_threads) { |
440 | RemoveTID(tid); |
441 | } |
442 | } |
443 | |
444 | void ThreadPlanStackMap::DumpPlans(Stream &strm, |
445 | lldb::DescriptionLevel desc_level, |
446 | bool internal, bool condense_if_trivial, |
447 | bool skip_unreported) { |
448 | std::lock_guard<std::recursive_mutex> guard(m_stack_map_mutex); |
449 | for (auto &elem : m_plans_list) { |
450 | lldb::tid_t tid = elem.first; |
451 | uint32_t index_id = 0; |
452 | ThreadSP thread_sp = m_process.GetThreadList().FindThreadByID(tid); |
453 | |
454 | if (skip_unreported) { |
455 | if (!thread_sp) |
456 | continue; |
457 | } |
458 | if (thread_sp) |
459 | index_id = thread_sp->GetIndexID(); |
460 | |
461 | if (condense_if_trivial) { |
462 | if (!elem.second.AnyPlans() && !elem.second.AnyCompletedPlans() && |
463 | !elem.second.AnyDiscardedPlans()) { |
464 | strm.Printf(format: "thread #%u: tid = 0x%4.4"PRIx64 "\n", index_id, tid); |
465 | strm.IndentMore(); |
466 | strm.Indent(); |
467 | strm.Printf(format: "No active thread plans\n"); |
468 | strm.IndentLess(); |
469 | return; |
470 | } |
471 | } |
472 | |
473 | strm.Indent(); |
474 | strm.Printf(format: "thread #%u: tid = 0x%4.4"PRIx64 ":\n", index_id, tid); |
475 | |
476 | elem.second.DumpThreadPlans(s&: strm, desc_level, include_internal: internal); |
477 | } |
478 | } |
479 | |
480 | bool ThreadPlanStackMap::DumpPlansForTID(Stream &strm, lldb::tid_t tid, |
481 | lldb::DescriptionLevel desc_level, |
482 | bool internal, |
483 | bool condense_if_trivial, |
484 | bool skip_unreported) { |
485 | std::lock_guard<std::recursive_mutex> guard(m_stack_map_mutex); |
486 | uint32_t index_id = 0; |
487 | ThreadSP thread_sp = m_process.GetThreadList().FindThreadByID(tid); |
488 | |
489 | if (skip_unreported) { |
490 | if (!thread_sp) { |
491 | strm.Format(format: "Unknown TID: {0}", args&: tid); |
492 | return false; |
493 | } |
494 | } |
495 | |
496 | if (thread_sp) |
497 | index_id = thread_sp->GetIndexID(); |
498 | ThreadPlanStack *stack = Find(tid); |
499 | if (!stack) { |
500 | strm.Format(format: "Unknown TID: {0}\n", args&: tid); |
501 | return false; |
502 | } |
503 | |
504 | if (condense_if_trivial) { |
505 | if (!stack->AnyPlans() && !stack->AnyCompletedPlans() && |
506 | !stack->AnyDiscardedPlans()) { |
507 | strm.Printf(format: "thread #%u: tid = 0x%4.4"PRIx64 "\n", index_id, tid); |
508 | strm.IndentMore(); |
509 | strm.Indent(); |
510 | strm.Printf(format: "No active thread plans\n"); |
511 | strm.IndentLess(); |
512 | return true; |
513 | } |
514 | } |
515 | |
516 | strm.Indent(); |
517 | strm.Printf(format: "thread #%u: tid = 0x%4.4"PRIx64 ":\n", index_id, tid); |
518 | |
519 | stack->DumpThreadPlans(s&: strm, desc_level, include_internal: internal); |
520 | return true; |
521 | } |
522 | |
523 | bool ThreadPlanStackMap::PrunePlansForTID(lldb::tid_t tid) { |
524 | // We only remove the plans for unreported TID's. |
525 | std::lock_guard<std::recursive_mutex> guard(m_stack_map_mutex); |
526 | ThreadSP thread_sp = m_process.GetThreadList().FindThreadByID(tid); |
527 | if (thread_sp) |
528 | return false; |
529 | |
530 | return RemoveTID(tid); |
531 | } |
532 |
Definitions
- PrintPlanElement
- ThreadPlanStack
- DumpThreadPlans
- PrintOneStackNoLock
- CheckpointCompletedPlans
- RestoreCompletedPlanCheckpoint
- DiscardCompletedPlanCheckpoint
- ThreadDestroyed
- PushPlan
- PopPlan
- DiscardPlan
- DiscardPlanNoLock
- DiscardPlansUpToPlan
- DiscardAllPlans
- DiscardConsultingControllingPlans
- GetCurrentPlan
- GetCurrentPlanNoLock
- GetCompletedPlan
- GetPlanByIndex
- GetReturnValueObject
- GetExpressionVariable
- AnyPlans
- AnyCompletedPlans
- AnyDiscardedPlans
- IsPlanDone
- WasPlanDiscarded
- GetPreviousPlan
- GetInnermostExpression
- ClearThreadCache
- WillResume
- Update
- DumpPlans
- DumpPlansForTID
Update your C++ knowledge – Modern C++11/14/17 Training
Find out more