1//===-- ThreadPlanStack.cpp -------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "lldb/Target/ThreadPlanStack.h"
10#include "lldb/Target/Process.h"
11#include "lldb/Target/Target.h"
12#include "lldb/Target/Thread.h"
13#include "lldb/Target/ThreadPlan.h"
14#include "lldb/Utility/Log.h"
15
16using namespace lldb;
17using namespace lldb_private;
18
19static void PrintPlanElement(Stream &s, const ThreadPlanSP &plan,
20 lldb::DescriptionLevel desc_level,
21 int32_t elem_idx) {
22 s.IndentMore();
23 s.Indent();
24 s.Printf(format: "Element %d: ", elem_idx);
25 plan->GetDescription(s: &s, level: desc_level);
26 s.EOL();
27 s.IndentLess();
28}
29
30ThreadPlanStack::ThreadPlanStack(const Thread &thread, bool make_null) {
31 if (make_null) {
32 // The ThreadPlanNull doesn't do anything to the Thread, so this is actually
33 // still a const operation.
34 m_plans.push_back(
35 x: ThreadPlanSP(new ThreadPlanNull(const_cast<Thread &>(thread))));
36 }
37}
38
39void ThreadPlanStack::DumpThreadPlans(Stream &s,
40 lldb::DescriptionLevel desc_level,
41 bool include_internal) const {
42 std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
43 s.IndentMore();
44 PrintOneStack(s, stack_name: "Active plan stack", stack: m_plans, desc_level, include_internal);
45 PrintOneStack(s, stack_name: "Completed plan stack", stack: m_completed_plans, desc_level,
46 include_internal);
47 PrintOneStack(s, stack_name: "Discarded plan stack", stack: m_discarded_plans, desc_level,
48 include_internal);
49 s.IndentLess();
50}
51
52void ThreadPlanStack::PrintOneStack(Stream &s, llvm::StringRef stack_name,
53 const PlanStack &stack,
54 lldb::DescriptionLevel desc_level,
55 bool include_internal) const {
56 std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
57 // If the stack is empty, just exit:
58 if (stack.empty())
59 return;
60
61 // Make sure there are public completed plans:
62 bool any_public = false;
63 if (!include_internal) {
64 for (auto plan : stack) {
65 if (!plan->GetPrivate()) {
66 any_public = true;
67 break;
68 }
69 }
70 }
71
72 if (include_internal || any_public) {
73 int print_idx = 0;
74 s.Indent();
75 s << stack_name << ":\n";
76 for (auto plan : stack) {
77 if (!include_internal && plan->GetPrivate())
78 continue;
79 PrintPlanElement(s, plan, desc_level, elem_idx: print_idx++);
80 }
81 }
82}
83
84size_t ThreadPlanStack::CheckpointCompletedPlans() {
85 std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
86 m_completed_plan_checkpoint++;
87 m_completed_plan_store.insert(
88 x: std::make_pair(x&: m_completed_plan_checkpoint, y&: m_completed_plans));
89 return m_completed_plan_checkpoint;
90}
91
92void ThreadPlanStack::RestoreCompletedPlanCheckpoint(size_t checkpoint) {
93 std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
94 auto result = m_completed_plan_store.find(x: checkpoint);
95 assert(result != m_completed_plan_store.end() &&
96 "Asked for a checkpoint that didn't exist");
97 m_completed_plans.swap(x&: (*result).second);
98 m_completed_plan_store.erase(position: result);
99}
100
101void ThreadPlanStack::DiscardCompletedPlanCheckpoint(size_t checkpoint) {
102 std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
103 m_completed_plan_store.erase(x: checkpoint);
104}
105
106void ThreadPlanStack::ThreadDestroyed(Thread *thread) {
107 // Tell the plan stacks that this thread is going away:
108 std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
109 for (ThreadPlanSP plan : m_plans)
110 plan->ThreadDestroyed();
111
112 for (ThreadPlanSP plan : m_discarded_plans)
113 plan->ThreadDestroyed();
114
115 for (ThreadPlanSP plan : m_completed_plans)
116 plan->ThreadDestroyed();
117
118 // Now clear the current plan stacks:
119 m_plans.clear();
120 m_discarded_plans.clear();
121 m_completed_plans.clear();
122
123 // Push a ThreadPlanNull on the plan stack. That way we can continue
124 // assuming that the plan stack is never empty, but if somebody errantly asks
125 // questions of a destroyed thread without checking first whether it is
126 // destroyed, they won't crash.
127 if (thread != nullptr) {
128 lldb::ThreadPlanSP null_plan_sp(new ThreadPlanNull(*thread));
129 m_plans.push_back(x: null_plan_sp);
130 }
131}
132
133void ThreadPlanStack::PushPlan(lldb::ThreadPlanSP new_plan_sp) {
134 // If the thread plan doesn't already have a tracer, give it its parent's
135 // tracer:
136 // The first plan has to be a base plan:
137 std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
138 assert((m_plans.size() > 0 || new_plan_sp->IsBasePlan()) &&
139 "Zeroth plan must be a base plan");
140
141 if (!new_plan_sp->GetThreadPlanTracer()) {
142 assert(!m_plans.empty());
143 new_plan_sp->SetThreadPlanTracer(m_plans.back()->GetThreadPlanTracer());
144 }
145 m_plans.push_back(x: new_plan_sp);
146 new_plan_sp->DidPush();
147}
148
149lldb::ThreadPlanSP ThreadPlanStack::PopPlan() {
150 std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
151 assert(m_plans.size() > 1 && "Can't pop the base thread plan");
152
153 // Note that moving the top element of the vector would leave it in an
154 // undefined state, and break the guarantee that the stack's thread plans are
155 // all valid.
156 lldb::ThreadPlanSP plan_sp = m_plans.back();
157 m_plans.pop_back();
158 m_completed_plans.push_back(x: plan_sp);
159 plan_sp->DidPop();
160 return plan_sp;
161}
162
163lldb::ThreadPlanSP ThreadPlanStack::DiscardPlan() {
164 std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
165 assert(m_plans.size() > 1 && "Can't discard the base thread plan");
166
167 // Note that moving the top element of the vector would leave it in an
168 // undefined state, and break the guarantee that the stack's thread plans are
169 // all valid.
170 lldb::ThreadPlanSP plan_sp = m_plans.back();
171 m_plans.pop_back();
172 m_discarded_plans.push_back(x: plan_sp);
173 plan_sp->DidPop();
174 return plan_sp;
175}
176
177// If the input plan is nullptr, discard all plans. Otherwise make sure this
178// plan is in the stack, and if so discard up to and including it.
179void ThreadPlanStack::DiscardPlansUpToPlan(ThreadPlan *up_to_plan_ptr) {
180 std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
181 int stack_size = m_plans.size();
182
183 if (up_to_plan_ptr == nullptr) {
184 for (int i = stack_size - 1; i > 0; i--)
185 DiscardPlan();
186 return;
187 }
188
189 bool found_it = false;
190 for (int i = stack_size - 1; i > 0; i--) {
191 if (m_plans[i].get() == up_to_plan_ptr) {
192 found_it = true;
193 break;
194 }
195 }
196
197 if (found_it) {
198 bool last_one = false;
199 for (int i = stack_size - 1; i > 0 && !last_one; i--) {
200 if (GetCurrentPlan().get() == up_to_plan_ptr)
201 last_one = true;
202 DiscardPlan();
203 }
204 }
205}
206
207void ThreadPlanStack::DiscardAllPlans() {
208 std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
209 int stack_size = m_plans.size();
210 for (int i = stack_size - 1; i > 0; i--) {
211 DiscardPlan();
212 }
213}
214
215void ThreadPlanStack::DiscardConsultingControllingPlans() {
216 std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
217 while (true) {
218 int controlling_plan_idx;
219 bool discard = true;
220
221 // Find the first controlling plan, see if it wants discarding, and if yes
222 // discard up to it.
223 for (controlling_plan_idx = m_plans.size() - 1; controlling_plan_idx >= 0;
224 controlling_plan_idx--) {
225 if (m_plans[controlling_plan_idx]->IsControllingPlan()) {
226 discard = m_plans[controlling_plan_idx]->OkayToDiscard();
227 break;
228 }
229 }
230
231 // If the controlling plan doesn't want to get discarded, then we're done.
232 if (!discard)
233 return;
234
235 // First pop all the dependent plans:
236 for (int i = m_plans.size() - 1; i > controlling_plan_idx; i--) {
237 DiscardPlan();
238 }
239
240 // Now discard the controlling plan itself.
241 // The bottom-most plan never gets discarded. "OkayToDiscard" for it
242 // means discard it's dependent plans, but not it...
243 if (controlling_plan_idx > 0) {
244 DiscardPlan();
245 }
246 }
247}
248
249lldb::ThreadPlanSP ThreadPlanStack::GetCurrentPlan() const {
250 std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
251 assert(m_plans.size() != 0 && "There will always be a base plan.");
252 return m_plans.back();
253}
254
255lldb::ThreadPlanSP ThreadPlanStack::GetCompletedPlan(bool skip_private) const {
256 std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
257 if (m_completed_plans.empty())
258 return {};
259
260 if (!skip_private)
261 return m_completed_plans.back();
262
263 for (int i = m_completed_plans.size() - 1; i >= 0; i--) {
264 lldb::ThreadPlanSP completed_plan_sp;
265 completed_plan_sp = m_completed_plans[i];
266 if (!completed_plan_sp->GetPrivate())
267 return completed_plan_sp;
268 }
269 return {};
270}
271
272lldb::ThreadPlanSP ThreadPlanStack::GetPlanByIndex(uint32_t plan_idx,
273 bool skip_private) const {
274 std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
275 uint32_t idx = 0;
276
277 for (lldb::ThreadPlanSP plan_sp : m_plans) {
278 if (skip_private && plan_sp->GetPrivate())
279 continue;
280 if (idx == plan_idx)
281 return plan_sp;
282 idx++;
283 }
284 return {};
285}
286
287lldb::ValueObjectSP ThreadPlanStack::GetReturnValueObject() const {
288 std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
289 if (m_completed_plans.empty())
290 return {};
291
292 for (int i = m_completed_plans.size() - 1; i >= 0; i--) {
293 lldb::ValueObjectSP return_valobj_sp;
294 return_valobj_sp = m_completed_plans[i]->GetReturnValueObject();
295 if (return_valobj_sp)
296 return return_valobj_sp;
297 }
298 return {};
299}
300
301lldb::ExpressionVariableSP ThreadPlanStack::GetExpressionVariable() const {
302 std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
303 if (m_completed_plans.empty())
304 return {};
305
306 for (int i = m_completed_plans.size() - 1; i >= 0; i--) {
307 lldb::ExpressionVariableSP expression_variable_sp;
308 expression_variable_sp = m_completed_plans[i]->GetExpressionVariable();
309 if (expression_variable_sp)
310 return expression_variable_sp;
311 }
312 return {};
313}
314bool ThreadPlanStack::AnyPlans() const {
315 std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
316 // There is always a base plan...
317 return m_plans.size() > 1;
318}
319
320bool ThreadPlanStack::AnyCompletedPlans() const {
321 std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
322 return !m_completed_plans.empty();
323}
324
325bool ThreadPlanStack::AnyDiscardedPlans() const {
326 std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
327 return !m_discarded_plans.empty();
328}
329
330bool ThreadPlanStack::IsPlanDone(ThreadPlan *in_plan) const {
331 std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
332 for (auto plan : m_completed_plans) {
333 if (plan.get() == in_plan)
334 return true;
335 }
336 return false;
337}
338
339bool ThreadPlanStack::WasPlanDiscarded(ThreadPlan *in_plan) const {
340 std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
341 for (auto plan : m_discarded_plans) {
342 if (plan.get() == in_plan)
343 return true;
344 }
345 return false;
346}
347
348ThreadPlan *ThreadPlanStack::GetPreviousPlan(ThreadPlan *current_plan) const {
349 std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
350 if (current_plan == nullptr)
351 return nullptr;
352
353 // Look first in the completed plans, if the plan is here and there is
354 // a completed plan above it, return that.
355 int stack_size = m_completed_plans.size();
356 for (int i = stack_size - 1; i > 0; i--) {
357 if (current_plan == m_completed_plans[i].get())
358 return m_completed_plans[i - 1].get();
359 }
360
361 // If this is the first completed plan, the previous one is the
362 // bottom of the regular plan stack.
363 if (stack_size > 0 && m_completed_plans[0].get() == current_plan) {
364 return GetCurrentPlan().get();
365 }
366
367 // Otherwise look for it in the regular plans.
368 stack_size = m_plans.size();
369 for (int i = stack_size - 1; i > 0; i--) {
370 if (current_plan == m_plans[i].get())
371 return m_plans[i - 1].get();
372 }
373 return nullptr;
374}
375
376ThreadPlan *ThreadPlanStack::GetInnermostExpression() const {
377 std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
378 int stack_size = m_plans.size();
379
380 for (int i = stack_size - 1; i > 0; i--) {
381 if (m_plans[i]->GetKind() == ThreadPlan::eKindCallFunction)
382 return m_plans[i].get();
383 }
384 return nullptr;
385}
386
387void ThreadPlanStack::ClearThreadCache() {
388 std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
389 for (lldb::ThreadPlanSP thread_plan_sp : m_plans)
390 thread_plan_sp->ClearThreadCache();
391}
392
393void ThreadPlanStack::WillResume() {
394 std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
395 m_completed_plans.clear();
396 m_discarded_plans.clear();
397}
398
399void ThreadPlanStackMap::Update(ThreadList &current_threads,
400 bool delete_missing,
401 bool check_for_new) {
402
403 std::lock_guard<std::recursive_mutex> guard(m_stack_map_mutex);
404 // Now find all the new threads and add them to the map:
405 if (check_for_new) {
406 for (auto thread : current_threads.Threads()) {
407 lldb::tid_t cur_tid = thread->GetID();
408 if (!Find(tid: cur_tid)) {
409 AddThread(thread&: *thread);
410 thread->QueueBasePlan(abort_other_plans: true);
411 }
412 }
413 }
414
415 // If we aren't reaping missing threads at this point,
416 // we are done.
417 if (!delete_missing)
418 return;
419 // Otherwise scan for absent TID's.
420 std::vector<lldb::tid_t> missing_threads;
421 // If we are going to delete plans from the plan stack,
422 // then scan for absent TID's:
423 for (auto &thread_plans : m_plans_list) {
424 lldb::tid_t cur_tid = thread_plans.first;
425 ThreadSP thread_sp = current_threads.FindThreadByID(tid: cur_tid);
426 if (!thread_sp)
427 missing_threads.push_back(x: cur_tid);
428 }
429 for (lldb::tid_t tid : missing_threads) {
430 RemoveTID(tid);
431 }
432}
433
434void ThreadPlanStackMap::DumpPlans(Stream &strm,
435 lldb::DescriptionLevel desc_level,
436 bool internal, bool condense_if_trivial,
437 bool skip_unreported) {
438 std::lock_guard<std::recursive_mutex> guard(m_stack_map_mutex);
439 for (auto &elem : m_plans_list) {
440 lldb::tid_t tid = elem.first;
441 uint32_t index_id = 0;
442 ThreadSP thread_sp = m_process.GetThreadList().FindThreadByID(tid);
443
444 if (skip_unreported) {
445 if (!thread_sp)
446 continue;
447 }
448 if (thread_sp)
449 index_id = thread_sp->GetIndexID();
450
451 if (condense_if_trivial) {
452 if (!elem.second.AnyPlans() && !elem.second.AnyCompletedPlans() &&
453 !elem.second.AnyDiscardedPlans()) {
454 strm.Printf(format: "thread #%u: tid = 0x%4.4" PRIx64 "\n", index_id, tid);
455 strm.IndentMore();
456 strm.Indent();
457 strm.Printf(format: "No active thread plans\n");
458 strm.IndentLess();
459 return;
460 }
461 }
462
463 strm.Indent();
464 strm.Printf(format: "thread #%u: tid = 0x%4.4" PRIx64 ":\n", index_id, tid);
465
466 elem.second.DumpThreadPlans(s&: strm, desc_level, include_internal: internal);
467 }
468}
469
470bool ThreadPlanStackMap::DumpPlansForTID(Stream &strm, lldb::tid_t tid,
471 lldb::DescriptionLevel desc_level,
472 bool internal,
473 bool condense_if_trivial,
474 bool skip_unreported) {
475 std::lock_guard<std::recursive_mutex> guard(m_stack_map_mutex);
476 uint32_t index_id = 0;
477 ThreadSP thread_sp = m_process.GetThreadList().FindThreadByID(tid);
478
479 if (skip_unreported) {
480 if (!thread_sp) {
481 strm.Format(format: "Unknown TID: {0}", args&: tid);
482 return false;
483 }
484 }
485
486 if (thread_sp)
487 index_id = thread_sp->GetIndexID();
488 ThreadPlanStack *stack = Find(tid);
489 if (!stack) {
490 strm.Format(format: "Unknown TID: {0}\n", args&: tid);
491 return false;
492 }
493
494 if (condense_if_trivial) {
495 if (!stack->AnyPlans() && !stack->AnyCompletedPlans() &&
496 !stack->AnyDiscardedPlans()) {
497 strm.Printf(format: "thread #%u: tid = 0x%4.4" PRIx64 "\n", index_id, tid);
498 strm.IndentMore();
499 strm.Indent();
500 strm.Printf(format: "No active thread plans\n");
501 strm.IndentLess();
502 return true;
503 }
504 }
505
506 strm.Indent();
507 strm.Printf(format: "thread #%u: tid = 0x%4.4" PRIx64 ":\n", index_id, tid);
508
509 stack->DumpThreadPlans(s&: strm, desc_level, include_internal: internal);
510 return true;
511}
512
513bool ThreadPlanStackMap::PrunePlansForTID(lldb::tid_t tid) {
514 // We only remove the plans for unreported TID's.
515 std::lock_guard<std::recursive_mutex> guard(m_stack_map_mutex);
516 ThreadSP thread_sp = m_process.GetThreadList().FindThreadByID(tid);
517 if (thread_sp)
518 return false;
519
520 return RemoveTID(tid);
521}
522

source code of lldb/source/Target/ThreadPlanStack.cpp