1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_X86_PTRACE_H
3#define _ASM_X86_PTRACE_H
4
5#include <asm/segment.h>
6#include <asm/page_types.h>
7#include <uapi/asm/ptrace.h>
8
9#ifndef __ASSEMBLY__
10#ifdef __i386__
11
12struct pt_regs {
13 /*
14 * NB: 32-bit x86 CPUs are inconsistent as what happens in the
15 * following cases (where %seg represents a segment register):
16 *
17 * - pushl %seg: some do a 16-bit write and leave the high
18 * bits alone
19 * - movl %seg, [mem]: some do a 16-bit write despite the movl
20 * - IDT entry: some (e.g. 486) will leave the high bits of CS
21 * and (if applicable) SS undefined.
22 *
23 * Fortunately, x86-32 doesn't read the high bits on POP or IRET,
24 * so we can just treat all of the segment registers as 16-bit
25 * values.
26 */
27 unsigned long bx;
28 unsigned long cx;
29 unsigned long dx;
30 unsigned long si;
31 unsigned long di;
32 unsigned long bp;
33 unsigned long ax;
34 unsigned short ds;
35 unsigned short __dsh;
36 unsigned short es;
37 unsigned short __esh;
38 unsigned short fs;
39 unsigned short __fsh;
40 /* On interrupt, gs and __gsh store the vector number. */
41 unsigned short gs;
42 unsigned short __gsh;
43 /* On interrupt, this is the error code. */
44 unsigned long orig_ax;
45 unsigned long ip;
46 unsigned short cs;
47 unsigned short __csh;
48 unsigned long flags;
49 unsigned long sp;
50 unsigned short ss;
51 unsigned short __ssh;
52};
53
54#else /* __i386__ */
55
56struct pt_regs {
57/*
58 * C ABI says these regs are callee-preserved. They aren't saved on kernel entry
59 * unless syscall needs a complete, fully filled "struct pt_regs".
60 */
61 unsigned long r15;
62 unsigned long r14;
63 unsigned long r13;
64 unsigned long r12;
65 unsigned long bp;
66 unsigned long bx;
67/* These regs are callee-clobbered. Always saved on kernel entry. */
68 unsigned long r11;
69 unsigned long r10;
70 unsigned long r9;
71 unsigned long r8;
72 unsigned long ax;
73 unsigned long cx;
74 unsigned long dx;
75 unsigned long si;
76 unsigned long di;
77/*
78 * On syscall entry, this is syscall#. On CPU exception, this is error code.
79 * On hw interrupt, it's IRQ number:
80 */
81 unsigned long orig_ax;
82/* Return frame for iretq */
83 unsigned long ip;
84 unsigned long cs;
85 unsigned long flags;
86 unsigned long sp;
87 unsigned long ss;
88/* top of stack page */
89};
90
91#endif /* !__i386__ */
92
93#ifdef CONFIG_PARAVIRT
94#include <asm/paravirt_types.h>
95#endif
96
97struct cpuinfo_x86;
98struct task_struct;
99
100extern unsigned long profile_pc(struct pt_regs *regs);
101#define profile_pc profile_pc
102
103extern unsigned long
104convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs);
105extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
106 int error_code, int si_code);
107
108
109static inline unsigned long regs_return_value(struct pt_regs *regs)
110{
111 return regs->ax;
112}
113
114static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
115{
116 regs->ax = rc;
117}
118
119/*
120 * user_mode(regs) determines whether a register set came from user
121 * mode. On x86_32, this is true if V8086 mode was enabled OR if the
122 * register set was from protected mode with RPL-3 CS value. This
123 * tricky test checks that with one comparison.
124 *
125 * On x86_64, vm86 mode is mercifully nonexistent, and we don't need
126 * the extra check.
127 */
128static inline int user_mode(struct pt_regs *regs)
129{
130#ifdef CONFIG_X86_32
131 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >= USER_RPL;
132#else
133 return !!(regs->cs & 3);
134#endif
135}
136
137static inline int v8086_mode(struct pt_regs *regs)
138{
139#ifdef CONFIG_X86_32
140 return (regs->flags & X86_VM_MASK);
141#else
142 return 0; /* No V86 mode support in long mode */
143#endif
144}
145
146static inline bool user_64bit_mode(struct pt_regs *regs)
147{
148#ifdef CONFIG_X86_64
149#ifndef CONFIG_PARAVIRT_XXL
150 /*
151 * On non-paravirt systems, this is the only long mode CPL 3
152 * selector. We do not allow long mode selectors in the LDT.
153 */
154 return regs->cs == __USER_CS;
155#else
156 /* Headers are too twisted for this to go in paravirt.h. */
157 return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
158#endif
159#else /* !CONFIG_X86_64 */
160 return false;
161#endif
162}
163
164#ifdef CONFIG_X86_64
165#define current_user_stack_pointer() current_pt_regs()->sp
166#define compat_user_stack_pointer() current_pt_regs()->sp
167#endif
168
169#ifdef CONFIG_X86_32
170extern unsigned long kernel_stack_pointer(struct pt_regs *regs);
171#else
172static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
173{
174 return regs->sp;
175}
176#endif
177
178#define GET_IP(regs) ((regs)->ip)
179#define GET_FP(regs) ((regs)->bp)
180#define GET_USP(regs) ((regs)->sp)
181
182#include <asm-generic/ptrace.h>
183
184/* Query offset/name of register from its name/offset */
185extern int regs_query_register_offset(const char *name);
186extern const char *regs_query_register_name(unsigned int offset);
187#define MAX_REG_OFFSET (offsetof(struct pt_regs, ss))
188
189/**
190 * regs_get_register() - get register value from its offset
191 * @regs: pt_regs from which register value is gotten.
192 * @offset: offset number of the register.
193 *
194 * regs_get_register returns the value of a register. The @offset is the
195 * offset of the register in struct pt_regs address which specified by @regs.
196 * If @offset is bigger than MAX_REG_OFFSET, this returns 0.
197 */
198static inline unsigned long regs_get_register(struct pt_regs *regs,
199 unsigned int offset)
200{
201 if (unlikely(offset > MAX_REG_OFFSET))
202 return 0;
203#ifdef CONFIG_X86_32
204 /*
205 * Traps from the kernel do not save sp and ss.
206 * Use the helper function to retrieve sp.
207 */
208 if (offset == offsetof(struct pt_regs, sp) &&
209 regs->cs == __KERNEL_CS)
210 return kernel_stack_pointer(regs);
211
212 /* The selector fields are 16-bit. */
213 if (offset == offsetof(struct pt_regs, cs) ||
214 offset == offsetof(struct pt_regs, ss) ||
215 offset == offsetof(struct pt_regs, ds) ||
216 offset == offsetof(struct pt_regs, es) ||
217 offset == offsetof(struct pt_regs, fs) ||
218 offset == offsetof(struct pt_regs, gs)) {
219 return *(u16 *)((unsigned long)regs + offset);
220
221 }
222#endif
223 return *(unsigned long *)((unsigned long)regs + offset);
224}
225
226/**
227 * regs_within_kernel_stack() - check the address in the stack
228 * @regs: pt_regs which contains kernel stack pointer.
229 * @addr: address which is checked.
230 *
231 * regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
232 * If @addr is within the kernel stack, it returns true. If not, returns false.
233 */
234static inline int regs_within_kernel_stack(struct pt_regs *regs,
235 unsigned long addr)
236{
237 return ((addr & ~(THREAD_SIZE - 1)) ==
238 (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1)));
239}
240
241/**
242 * regs_get_kernel_stack_nth_addr() - get the address of the Nth entry on stack
243 * @regs: pt_regs which contains kernel stack pointer.
244 * @n: stack entry number.
245 *
246 * regs_get_kernel_stack_nth() returns the address of the @n th entry of the
247 * kernel stack which is specified by @regs. If the @n th entry is NOT in
248 * the kernel stack, this returns NULL.
249 */
250static inline unsigned long *regs_get_kernel_stack_nth_addr(struct pt_regs *regs, unsigned int n)
251{
252 unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
253
254 addr += n;
255 if (regs_within_kernel_stack(regs, (unsigned long)addr))
256 return addr;
257 else
258 return NULL;
259}
260
261/* To avoid include hell, we can't include uaccess.h */
262extern long probe_kernel_read(void *dst, const void *src, size_t size);
263
264/**
265 * regs_get_kernel_stack_nth() - get Nth entry of the stack
266 * @regs: pt_regs which contains kernel stack pointer.
267 * @n: stack entry number.
268 *
269 * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
270 * is specified by @regs. If the @n th entry is NOT in the kernel stack
271 * this returns 0.
272 */
273static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
274 unsigned int n)
275{
276 unsigned long *addr;
277 unsigned long val;
278 long ret;
279
280 addr = regs_get_kernel_stack_nth_addr(regs, n);
281 if (addr) {
282 ret = probe_kernel_read(&val, addr, sizeof(val));
283 if (!ret)
284 return val;
285 }
286 return 0;
287}
288
289/**
290 * regs_get_kernel_argument() - get Nth function argument in kernel
291 * @regs: pt_regs of that context
292 * @n: function argument number (start from 0)
293 *
294 * regs_get_argument() returns @n th argument of the function call.
295 * Note that this chooses most probably assignment, in some case
296 * it can be incorrect.
297 * This is expected to be called from kprobes or ftrace with regs
298 * where the top of stack is the return address.
299 */
300static inline unsigned long regs_get_kernel_argument(struct pt_regs *regs,
301 unsigned int n)
302{
303 static const unsigned int argument_offs[] = {
304#ifdef __i386__
305 offsetof(struct pt_regs, ax),
306 offsetof(struct pt_regs, cx),
307 offsetof(struct pt_regs, dx),
308#define NR_REG_ARGUMENTS 3
309#else
310 offsetof(struct pt_regs, di),
311 offsetof(struct pt_regs, si),
312 offsetof(struct pt_regs, dx),
313 offsetof(struct pt_regs, cx),
314 offsetof(struct pt_regs, r8),
315 offsetof(struct pt_regs, r9),
316#define NR_REG_ARGUMENTS 6
317#endif
318 };
319
320 if (n >= NR_REG_ARGUMENTS) {
321 n -= NR_REG_ARGUMENTS - 1;
322 return regs_get_kernel_stack_nth(regs, n);
323 } else
324 return regs_get_register(regs, argument_offs[n]);
325}
326
327#define arch_has_single_step() (1)
328#ifdef CONFIG_X86_DEBUGCTLMSR
329#define arch_has_block_step() (1)
330#else
331#define arch_has_block_step() (boot_cpu_data.x86 >= 6)
332#endif
333
334#define ARCH_HAS_USER_SINGLE_STEP_REPORT
335
336/*
337 * When hitting ptrace_stop(), we cannot return using SYSRET because
338 * that does not restore the full CPU state, only a minimal set. The
339 * ptracer can change arbitrary register values, which is usually okay
340 * because the usual ptrace stops run off the signal delivery path which
341 * forces IRET; however, ptrace_event() stops happen in arbitrary places
342 * in the kernel and don't force IRET path.
343 *
344 * So force IRET path after a ptrace stop.
345 */
346#define arch_ptrace_stop_needed(code, info) \
347({ \
348 force_iret(); \
349 false; \
350})
351
352struct user_desc;
353extern int do_get_thread_area(struct task_struct *p, int idx,
354 struct user_desc __user *info);
355extern int do_set_thread_area(struct task_struct *p, int idx,
356 struct user_desc __user *info, int can_allocate);
357
358#endif /* !__ASSEMBLY__ */
359#endif /* _ASM_X86_PTRACE_H */
360