1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_PTRACE_H
3#define _LINUX_PTRACE_H
4
5#include <linux/compiler.h> /* For unlikely. */
6#include <linux/sched.h> /* For struct task_struct. */
7#include <linux/sched/signal.h> /* For send_sig(), same_thread_group(), etc. */
8#include <linux/err.h> /* for IS_ERR_VALUE */
9#include <linux/bug.h> /* For BUG_ON. */
10#include <linux/pid_namespace.h> /* For task_active_pid_ns. */
11#include <uapi/linux/ptrace.h>
12
13extern int ptrace_access_vm(struct task_struct *tsk, unsigned long addr,
14 void *buf, int len, unsigned int gup_flags);
15
16/*
17 * Ptrace flags
18 *
19 * The owner ship rules for task->ptrace which holds the ptrace
20 * flags is simple. When a task is running it owns it's task->ptrace
21 * flags. When the a task is stopped the ptracer owns task->ptrace.
22 */
23
24#define PT_SEIZED 0x00010000 /* SEIZE used, enable new behavior */
25#define PT_PTRACED 0x00000001
26#define PT_DTRACE 0x00000002 /* delayed trace (used on m68k, i386) */
27
28#define PT_OPT_FLAG_SHIFT 3
29/* PT_TRACE_* event enable flags */
30#define PT_EVENT_FLAG(event) (1 << (PT_OPT_FLAG_SHIFT + (event)))
31#define PT_TRACESYSGOOD PT_EVENT_FLAG(0)
32#define PT_TRACE_FORK PT_EVENT_FLAG(PTRACE_EVENT_FORK)
33#define PT_TRACE_VFORK PT_EVENT_FLAG(PTRACE_EVENT_VFORK)
34#define PT_TRACE_CLONE PT_EVENT_FLAG(PTRACE_EVENT_CLONE)
35#define PT_TRACE_EXEC PT_EVENT_FLAG(PTRACE_EVENT_EXEC)
36#define PT_TRACE_VFORK_DONE PT_EVENT_FLAG(PTRACE_EVENT_VFORK_DONE)
37#define PT_TRACE_EXIT PT_EVENT_FLAG(PTRACE_EVENT_EXIT)
38#define PT_TRACE_SECCOMP PT_EVENT_FLAG(PTRACE_EVENT_SECCOMP)
39
40#define PT_EXITKILL (PTRACE_O_EXITKILL << PT_OPT_FLAG_SHIFT)
41#define PT_SUSPEND_SECCOMP (PTRACE_O_SUSPEND_SECCOMP << PT_OPT_FLAG_SHIFT)
42
43/* single stepping state bits (used on ARM and PA-RISC) */
44#define PT_SINGLESTEP_BIT 31
45#define PT_SINGLESTEP (1<<PT_SINGLESTEP_BIT)
46#define PT_BLOCKSTEP_BIT 30
47#define PT_BLOCKSTEP (1<<PT_BLOCKSTEP_BIT)
48
49extern long arch_ptrace(struct task_struct *child, long request,
50 unsigned long addr, unsigned long data);
51extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len);
52extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len);
53extern void ptrace_disable(struct task_struct *);
54extern int ptrace_request(struct task_struct *child, long request,
55 unsigned long addr, unsigned long data);
56extern void ptrace_notify(int exit_code);
57extern void __ptrace_link(struct task_struct *child,
58 struct task_struct *new_parent,
59 const struct cred *ptracer_cred);
60extern void __ptrace_unlink(struct task_struct *child);
61extern void exit_ptrace(struct task_struct *tracer, struct list_head *dead);
62#define PTRACE_MODE_READ 0x01
63#define PTRACE_MODE_ATTACH 0x02
64#define PTRACE_MODE_NOAUDIT 0x04
65#define PTRACE_MODE_FSCREDS 0x08
66#define PTRACE_MODE_REALCREDS 0x10
67
68/* shorthands for READ/ATTACH and FSCREDS/REALCREDS combinations */
69#define PTRACE_MODE_READ_FSCREDS (PTRACE_MODE_READ | PTRACE_MODE_FSCREDS)
70#define PTRACE_MODE_READ_REALCREDS (PTRACE_MODE_READ | PTRACE_MODE_REALCREDS)
71#define PTRACE_MODE_ATTACH_FSCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_FSCREDS)
72#define PTRACE_MODE_ATTACH_REALCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_REALCREDS)
73
74/**
75 * ptrace_may_access - check whether the caller is permitted to access
76 * a target task.
77 * @task: target task
78 * @mode: selects type of access and caller credentials
79 *
80 * Returns true on success, false on denial.
81 *
82 * One of the flags PTRACE_MODE_FSCREDS and PTRACE_MODE_REALCREDS must
83 * be set in @mode to specify whether the access was requested through
84 * a filesystem syscall (should use effective capabilities and fsuid
85 * of the caller) or through an explicit syscall such as
86 * process_vm_writev or ptrace (and should use the real credentials).
87 */
88extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
89
90static inline int ptrace_reparented(struct task_struct *child)
91{
92 return !same_thread_group(child->real_parent, child->parent);
93}
94
95static inline void ptrace_unlink(struct task_struct *child)
96{
97 if (unlikely(child->ptrace))
98 __ptrace_unlink(child);
99}
100
101int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
102 unsigned long data);
103int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
104 unsigned long data);
105
106/**
107 * ptrace_parent - return the task that is tracing the given task
108 * @task: task to consider
109 *
110 * Returns %NULL if no one is tracing @task, or the &struct task_struct
111 * pointer to its tracer.
112 *
113 * Must called under rcu_read_lock(). The pointer returned might be kept
114 * live only by RCU. During exec, this may be called with task_lock() held
115 * on @task, still held from when check_unsafe_exec() was called.
116 */
117static inline struct task_struct *ptrace_parent(struct task_struct *task)
118{
119 if (unlikely(task->ptrace))
120 return rcu_dereference(task->parent);
121 return NULL;
122}
123
124/**
125 * ptrace_event_enabled - test whether a ptrace event is enabled
126 * @task: ptracee of interest
127 * @event: %PTRACE_EVENT_* to test
128 *
129 * Test whether @event is enabled for ptracee @task.
130 *
131 * Returns %true if @event is enabled, %false otherwise.
132 */
133static inline bool ptrace_event_enabled(struct task_struct *task, int event)
134{
135 return task->ptrace & PT_EVENT_FLAG(event);
136}
137
138/**
139 * ptrace_event - possibly stop for a ptrace event notification
140 * @event: %PTRACE_EVENT_* value to report
141 * @message: value for %PTRACE_GETEVENTMSG to return
142 *
143 * Check whether @event is enabled and, if so, report @event and @message
144 * to the ptrace parent.
145 *
146 * Called without locks.
147 */
148static inline void ptrace_event(int event, unsigned long message)
149{
150 if (unlikely(ptrace_event_enabled(current, event))) {
151 current->ptrace_message = message;
152 ptrace_notify((event << 8) | SIGTRAP);
153 } else if (event == PTRACE_EVENT_EXEC) {
154 /* legacy EXEC report via SIGTRAP */
155 if ((current->ptrace & (PT_PTRACED|PT_SEIZED)) == PT_PTRACED)
156 send_sig(SIGTRAP, current, 0);
157 }
158}
159
160/**
161 * ptrace_event_pid - possibly stop for a ptrace event notification
162 * @event: %PTRACE_EVENT_* value to report
163 * @pid: process identifier for %PTRACE_GETEVENTMSG to return
164 *
165 * Check whether @event is enabled and, if so, report @event and @pid
166 * to the ptrace parent. @pid is reported as the pid_t seen from the
167 * the ptrace parent's pid namespace.
168 *
169 * Called without locks.
170 */
171static inline void ptrace_event_pid(int event, struct pid *pid)
172{
173 /*
174 * FIXME: There's a potential race if a ptracer in a different pid
175 * namespace than parent attaches between computing message below and
176 * when we acquire tasklist_lock in ptrace_stop(). If this happens,
177 * the ptracer will get a bogus pid from PTRACE_GETEVENTMSG.
178 */
179 unsigned long message = 0;
180 struct pid_namespace *ns;
181
182 rcu_read_lock();
183 ns = task_active_pid_ns(rcu_dereference(current->parent));
184 if (ns)
185 message = pid_nr_ns(pid, ns);
186 rcu_read_unlock();
187
188 ptrace_event(event, message);
189}
190
191/**
192 * ptrace_init_task - initialize ptrace state for a new child
193 * @child: new child task
194 * @ptrace: true if child should be ptrace'd by parent's tracer
195 *
196 * This is called immediately after adding @child to its parent's children
197 * list. @ptrace is false in the normal case, and true to ptrace @child.
198 *
199 * Called with current's siglock and write_lock_irq(&tasklist_lock) held.
200 */
201static inline void ptrace_init_task(struct task_struct *child, bool ptrace)
202{
203 INIT_LIST_HEAD(&child->ptrace_entry);
204 INIT_LIST_HEAD(&child->ptraced);
205 child->jobctl = 0;
206 child->ptrace = 0;
207 child->parent = child->real_parent;
208
209 if (unlikely(ptrace) && current->ptrace) {
210 child->ptrace = current->ptrace;
211 __ptrace_link(child, current->parent, current->ptracer_cred);
212
213 if (child->ptrace & PT_SEIZED)
214 task_set_jobctl_pending(child, JOBCTL_TRAP_STOP);
215 else
216 sigaddset(&child->pending.signal, SIGSTOP);
217 }
218 else
219 child->ptracer_cred = NULL;
220}
221
222/**
223 * ptrace_release_task - final ptrace-related cleanup of a zombie being reaped
224 * @task: task in %EXIT_DEAD state
225 *
226 * Called with write_lock(&tasklist_lock) held.
227 */
228static inline void ptrace_release_task(struct task_struct *task)
229{
230 BUG_ON(!list_empty(&task->ptraced));
231 ptrace_unlink(task);
232 BUG_ON(!list_empty(&task->ptrace_entry));
233}
234
235#ifndef force_successful_syscall_return
236/*
237 * System call handlers that, upon successful completion, need to return a
238 * negative value should call force_successful_syscall_return() right before
239 * returning. On architectures where the syscall convention provides for a
240 * separate error flag (e.g., alpha, ia64, ppc{,64}, sparc{,64}, possibly
241 * others), this macro can be used to ensure that the error flag will not get
242 * set. On architectures which do not support a separate error flag, the macro
243 * is a no-op and the spurious error condition needs to be filtered out by some
244 * other means (e.g., in user-level, by passing an extra argument to the
245 * syscall handler, or something along those lines).
246 */
247#define force_successful_syscall_return() do { } while (0)
248#endif
249
250#ifndef is_syscall_success
251/*
252 * On most systems we can tell if a syscall is a success based on if the retval
253 * is an error value. On some systems like ia64 and powerpc they have different
254 * indicators of success/failure and must define their own.
255 */
256#define is_syscall_success(regs) (!IS_ERR_VALUE((unsigned long)(regs_return_value(regs))))
257#endif
258
259/*
260 * <asm/ptrace.h> should define the following things inside #ifdef __KERNEL__.
261 *
262 * These do-nothing inlines are used when the arch does not
263 * implement single-step. The kerneldoc comments are here
264 * to document the interface for all arch definitions.
265 */
266
267#ifndef arch_has_single_step
268/**
269 * arch_has_single_step - does this CPU support user-mode single-step?
270 *
271 * If this is defined, then there must be function declarations or
272 * inlines for user_enable_single_step() and user_disable_single_step().
273 * arch_has_single_step() should evaluate to nonzero iff the machine
274 * supports instruction single-step for user mode.
275 * It can be a constant or it can test a CPU feature bit.
276 */
277#define arch_has_single_step() (0)
278
279/**
280 * user_enable_single_step - single-step in user-mode task
281 * @task: either current or a task stopped in %TASK_TRACED
282 *
283 * This can only be called when arch_has_single_step() has returned nonzero.
284 * Set @task so that when it returns to user mode, it will trap after the
285 * next single instruction executes. If arch_has_block_step() is defined,
286 * this must clear the effects of user_enable_block_step() too.
287 */
288static inline void user_enable_single_step(struct task_struct *task)
289{
290 BUG(); /* This can never be called. */
291}
292
293/**
294 * user_disable_single_step - cancel user-mode single-step
295 * @task: either current or a task stopped in %TASK_TRACED
296 *
297 * Clear @task of the effects of user_enable_single_step() and
298 * user_enable_block_step(). This can be called whether or not either
299 * of those was ever called on @task, and even if arch_has_single_step()
300 * returned zero.
301 */
302static inline void user_disable_single_step(struct task_struct *task)
303{
304}
305#else
306extern void user_enable_single_step(struct task_struct *);
307extern void user_disable_single_step(struct task_struct *);
308#endif /* arch_has_single_step */
309
310#ifndef arch_has_block_step
311/**
312 * arch_has_block_step - does this CPU support user-mode block-step?
313 *
314 * If this is defined, then there must be a function declaration or inline
315 * for user_enable_block_step(), and arch_has_single_step() must be defined
316 * too. arch_has_block_step() should evaluate to nonzero iff the machine
317 * supports step-until-branch for user mode. It can be a constant or it
318 * can test a CPU feature bit.
319 */
320#define arch_has_block_step() (0)
321
322/**
323 * user_enable_block_step - step until branch in user-mode task
324 * @task: either current or a task stopped in %TASK_TRACED
325 *
326 * This can only be called when arch_has_block_step() has returned nonzero,
327 * and will never be called when single-instruction stepping is being used.
328 * Set @task so that when it returns to user mode, it will trap after the
329 * next branch or trap taken.
330 */
331static inline void user_enable_block_step(struct task_struct *task)
332{
333 BUG(); /* This can never be called. */
334}
335#else
336extern void user_enable_block_step(struct task_struct *);
337#endif /* arch_has_block_step */
338
339#ifdef ARCH_HAS_USER_SINGLE_STEP_INFO
340extern void user_single_step_siginfo(struct task_struct *tsk,
341 struct pt_regs *regs, siginfo_t *info);
342#else
343static inline void user_single_step_siginfo(struct task_struct *tsk,
344 struct pt_regs *regs, siginfo_t *info)
345{
346 info->si_signo = SIGTRAP;
347}
348#endif
349
350#ifndef arch_ptrace_stop_needed
351/**
352 * arch_ptrace_stop_needed - Decide whether arch_ptrace_stop() should be called
353 * @code: current->exit_code value ptrace will stop with
354 * @info: siginfo_t pointer (or %NULL) for signal ptrace will stop with
355 *
356 * This is called with the siglock held, to decide whether or not it's
357 * necessary to release the siglock and call arch_ptrace_stop() with the
358 * same @code and @info arguments. It can be defined to a constant if
359 * arch_ptrace_stop() is never required, or always is. On machines where
360 * this makes sense, it should be defined to a quick test to optimize out
361 * calling arch_ptrace_stop() when it would be superfluous. For example,
362 * if the thread has not been back to user mode since the last stop, the
363 * thread state might indicate that nothing needs to be done.
364 *
365 * This is guaranteed to be invoked once before a task stops for ptrace and
366 * may include arch-specific operations necessary prior to a ptrace stop.
367 */
368#define arch_ptrace_stop_needed(code, info) (0)
369#endif
370
371#ifndef arch_ptrace_stop
372/**
373 * arch_ptrace_stop - Do machine-specific work before stopping for ptrace
374 * @code: current->exit_code value ptrace will stop with
375 * @info: siginfo_t pointer (or %NULL) for signal ptrace will stop with
376 *
377 * This is called with no locks held when arch_ptrace_stop_needed() has
378 * just returned nonzero. It is allowed to block, e.g. for user memory
379 * access. The arch can have machine-specific work to be done before
380 * ptrace stops. On ia64, register backing store gets written back to user
381 * memory here. Since this can be costly (requires dropping the siglock),
382 * we only do it when the arch requires it for this particular stop, as
383 * indicated by arch_ptrace_stop_needed().
384 */
385#define arch_ptrace_stop(code, info) do { } while (0)
386#endif
387
388#ifndef current_pt_regs
389#define current_pt_regs() task_pt_regs(current)
390#endif
391
392/*
393 * unlike current_pt_regs(), this one is equal to task_pt_regs(current)
394 * on *all* architectures; the only reason to have a per-arch definition
395 * is optimisation.
396 */
397#ifndef signal_pt_regs
398#define signal_pt_regs() task_pt_regs(current)
399#endif
400
401#ifndef current_user_stack_pointer
402#define current_user_stack_pointer() user_stack_pointer(current_pt_regs())
403#endif
404
405extern int task_current_syscall(struct task_struct *target, long *callno,
406 unsigned long args[6], unsigned int maxargs,
407 unsigned long *sp, unsigned long *pc);
408
409#endif
410