1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * AT_SYSINFO entry point
4*/
5
6#include <linux/linkage.h>
7#include <asm/dwarf2.h>
8#include <asm/cpufeatures.h>
9#include <asm/alternative.h>
10
11 .text
12 .globl __kernel_vsyscall
13 .type __kernel_vsyscall,@function
14 ALIGN
15__kernel_vsyscall:
16 CFI_STARTPROC
17 /*
18 * Reshuffle regs so that all of any of the entry instructions
19 * will preserve enough state.
20 *
21 * A really nice entry sequence would be:
22 * pushl %edx
23 * pushl %ecx
24 * movl %esp, %ecx
25 *
26 * Unfortunately, naughty Android versions between July and December
27 * 2015 actually hardcode the traditional Linux SYSENTER entry
28 * sequence. That is severely broken for a number of reasons (ask
29 * anyone with an AMD CPU, for example). Nonetheless, we try to keep
30 * it working approximately as well as it ever worked.
31 *
32 * This link may elucidate some of the history:
33 * https://android-review.googlesource.com/#/q/Iac3295376d61ef83e713ac9b528f3b50aa780cd7
34 * personally, I find it hard to understand what's going on there.
35 *
36 * Note to future user developers: DO NOT USE SYSENTER IN YOUR CODE.
37 * Execute an indirect call to the address in the AT_SYSINFO auxv
38 * entry. That is the ONLY correct way to make a fast 32-bit system
39 * call on Linux. (Open-coding int $0x80 is also fine, but it's
40 * slow.)
41 */
42 pushl %ecx
43 CFI_ADJUST_CFA_OFFSET 4
44 CFI_REL_OFFSET ecx, 0
45 pushl %edx
46 CFI_ADJUST_CFA_OFFSET 4
47 CFI_REL_OFFSET edx, 0
48 pushl %ebp
49 CFI_ADJUST_CFA_OFFSET 4
50 CFI_REL_OFFSET ebp, 0
51
52 #define SYSENTER_SEQUENCE "movl %esp, %ebp; sysenter"
53 #define SYSCALL_SEQUENCE "movl %ecx, %ebp; syscall"
54
55#ifdef CONFIG_X86_64
56 /* If SYSENTER (Intel) or SYSCALL32 (AMD) is available, use it. */
57 ALTERNATIVE_2 "", SYSENTER_SEQUENCE, X86_FEATURE_SYSENTER32, \
58 SYSCALL_SEQUENCE, X86_FEATURE_SYSCALL32
59#else
60 ALTERNATIVE "", SYSENTER_SEQUENCE, X86_FEATURE_SEP
61#endif
62
63 /* Enter using int $0x80 */
64 int $0x80
65SYM_INNER_LABEL(int80_landing_pad, SYM_L_GLOBAL)
66
67 /*
68 * Restore EDX and ECX in case they were clobbered. EBP is not
69 * clobbered (the kernel restores it), but it's cleaner and
70 * probably faster to pop it than to adjust ESP using addl.
71 */
72 popl %ebp
73 CFI_RESTORE ebp
74 CFI_ADJUST_CFA_OFFSET -4
75 popl %edx
76 CFI_RESTORE edx
77 CFI_ADJUST_CFA_OFFSET -4
78 popl %ecx
79 CFI_RESTORE ecx
80 CFI_ADJUST_CFA_OFFSET -4
81 RET
82 CFI_ENDPROC
83
84 .size __kernel_vsyscall,.-__kernel_vsyscall
85 .previous
86

source code of linux/arch/x86/entry/vdso/vdso32/system_call.S