1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | /* |
3 | * Asm versions of Xen pv-ops, suitable for direct use. |
4 | * |
5 | * We only bother with direct forms (ie, vcpu in percpu data) of the |
6 | * operations here; the indirect forms are better handled in C. |
7 | */ |
8 | |
9 | #include <asm/errno.h> |
10 | #include <asm/asm-offsets.h> |
11 | #include <asm/percpu.h> |
12 | #include <asm/processor-flags.h> |
13 | #include <asm/segment.h> |
14 | #include <asm/thread_info.h> |
15 | #include <asm/asm.h> |
16 | #include <asm/frame.h> |
17 | #include <asm/unwind_hints.h> |
18 | |
19 | #include <xen/interface/xen.h> |
20 | |
21 | #include <linux/init.h> |
22 | #include <linux/linkage.h> |
23 | #include <../entry/calling.h> |
24 | |
25 | .pushsection .noinstr.text, "ax" |
26 | /* |
27 | * Disabling events is simply a matter of making the event mask |
28 | * non-zero. |
29 | */ |
30 | SYM_FUNC_START(xen_irq_disable_direct) |
31 | movb $1, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask |
32 | RET |
33 | SYM_FUNC_END(xen_irq_disable_direct) |
34 | |
35 | /* |
36 | * Force an event check by making a hypercall, but preserve regs |
37 | * before making the call. |
38 | */ |
39 | SYM_FUNC_START(check_events) |
40 | FRAME_BEGIN |
41 | push %rax |
42 | push %rcx |
43 | push %rdx |
44 | push %rsi |
45 | push %rdi |
46 | push %r8 |
47 | push %r9 |
48 | push %r10 |
49 | push %r11 |
50 | call xen_force_evtchn_callback |
51 | pop %r11 |
52 | pop %r10 |
53 | pop %r9 |
54 | pop %r8 |
55 | pop %rdi |
56 | pop %rsi |
57 | pop %rdx |
58 | pop %rcx |
59 | pop %rax |
60 | FRAME_END |
61 | RET |
62 | SYM_FUNC_END(check_events) |
63 | |
64 | /* |
65 | * Enable events. This clears the event mask and tests the pending |
66 | * event status with one and operation. If there are pending events, |
67 | * then enter the hypervisor to get them handled. |
68 | */ |
69 | SYM_FUNC_START(xen_irq_enable_direct) |
70 | FRAME_BEGIN |
71 | /* Unmask events */ |
72 | movb $0, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask |
73 | |
74 | /* |
75 | * Preempt here doesn't matter because that will deal with any |
76 | * pending interrupts. The pending check may end up being run |
77 | * on the wrong CPU, but that doesn't hurt. |
78 | */ |
79 | |
80 | /* Test for pending */ |
81 | testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending |
82 | jz 1f |
83 | |
84 | call check_events |
85 | 1: |
86 | FRAME_END |
87 | RET |
88 | SYM_FUNC_END(xen_irq_enable_direct) |
89 | |
90 | /* |
91 | * (xen_)save_fl is used to get the current interrupt enable status. |
92 | * Callers expect the status to be in X86_EFLAGS_IF, and other bits |
93 | * may be set in the return value. We take advantage of this by |
94 | * making sure that X86_EFLAGS_IF has the right value (and other bits |
95 | * in that byte are 0), but other bits in the return value are |
96 | * undefined. We need to toggle the state of the bit, because Xen and |
97 | * x86 use opposite senses (mask vs enable). |
98 | */ |
99 | SYM_FUNC_START(xen_save_fl_direct) |
100 | testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask |
101 | setz %ah |
102 | addb %ah, %ah |
103 | RET |
104 | SYM_FUNC_END(xen_save_fl_direct) |
105 | |
106 | SYM_FUNC_START(xen_read_cr2) |
107 | FRAME_BEGIN |
108 | _ASM_MOV PER_CPU_VAR(xen_vcpu), %_ASM_AX |
109 | _ASM_MOV XEN_vcpu_info_arch_cr2(%_ASM_AX), %_ASM_AX |
110 | FRAME_END |
111 | RET |
112 | SYM_FUNC_END(xen_read_cr2); |
113 | |
114 | SYM_FUNC_START(xen_read_cr2_direct) |
115 | FRAME_BEGIN |
116 | _ASM_MOV PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_arch_cr2, %_ASM_AX |
117 | FRAME_END |
118 | RET |
119 | SYM_FUNC_END(xen_read_cr2_direct); |
120 | .popsection |
121 | |
122 | .macro xen_pv_trap name |
123 | SYM_CODE_START(xen_\name) |
124 | UNWIND_HINT_ENTRY |
125 | ENDBR |
126 | pop %rcx |
127 | pop %r11 |
128 | jmp \name |
129 | SYM_CODE_END(xen_\name) |
130 | _ASM_NOKPROBE(xen_\name) |
131 | .endm |
132 | |
133 | xen_pv_trap asm_exc_divide_error |
134 | xen_pv_trap asm_xenpv_exc_debug |
135 | xen_pv_trap asm_exc_int3 |
136 | xen_pv_trap asm_xenpv_exc_nmi |
137 | xen_pv_trap asm_exc_overflow |
138 | xen_pv_trap asm_exc_bounds |
139 | xen_pv_trap asm_exc_invalid_op |
140 | xen_pv_trap asm_exc_device_not_available |
141 | xen_pv_trap asm_xenpv_exc_double_fault |
142 | xen_pv_trap asm_exc_coproc_segment_overrun |
143 | xen_pv_trap asm_exc_invalid_tss |
144 | xen_pv_trap asm_exc_segment_not_present |
145 | xen_pv_trap asm_exc_stack_segment |
146 | xen_pv_trap asm_exc_general_protection |
147 | xen_pv_trap asm_exc_page_fault |
148 | xen_pv_trap asm_exc_spurious_interrupt_bug |
149 | xen_pv_trap asm_exc_coprocessor_error |
150 | xen_pv_trap asm_exc_alignment_check |
151 | #ifdef CONFIG_X86_CET |
152 | xen_pv_trap asm_exc_control_protection |
153 | #endif |
154 | #ifdef CONFIG_X86_MCE |
155 | xen_pv_trap asm_xenpv_exc_machine_check |
156 | #endif /* CONFIG_X86_MCE */ |
157 | xen_pv_trap asm_exc_simd_coprocessor_error |
158 | #ifdef CONFIG_IA32_EMULATION |
159 | xen_pv_trap entry_INT80_compat |
160 | #endif |
161 | xen_pv_trap asm_exc_xen_unknown_trap |
162 | xen_pv_trap asm_exc_xen_hypervisor_callback |
163 | |
164 | __INIT |
165 | SYM_CODE_START(xen_early_idt_handler_array) |
166 | i = 0 |
167 | .rept NUM_EXCEPTION_VECTORS |
168 | UNWIND_HINT_UNDEFINED |
169 | ENDBR |
170 | pop %rcx |
171 | pop %r11 |
172 | jmp early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE |
173 | i = i + 1 |
174 | .fill xen_early_idt_handler_array + i*XEN_EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc |
175 | .endr |
176 | SYM_CODE_END(xen_early_idt_handler_array) |
177 | __FINIT |
178 | |
179 | hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32 |
180 | /* |
181 | * Xen64 iret frame: |
182 | * |
183 | * ss |
184 | * rsp |
185 | * rflags |
186 | * cs |
187 | * rip <-- standard iret frame |
188 | * |
189 | * flags |
190 | * |
191 | * rcx } |
192 | * r11 }<-- pushed by hypercall page |
193 | * rsp->rax } |
194 | */ |
195 | SYM_CODE_START(xen_iret) |
196 | UNWIND_HINT_UNDEFINED |
197 | ANNOTATE_NOENDBR |
198 | pushq $0 |
199 | jmp hypercall_iret |
200 | SYM_CODE_END(xen_iret) |
201 | |
202 | /* |
203 | * XEN pv doesn't use trampoline stack, PER_CPU_VAR(cpu_tss_rw + TSS_sp0) is |
204 | * also the kernel stack. Reusing swapgs_restore_regs_and_return_to_usermode() |
205 | * in XEN pv would cause %rsp to move up to the top of the kernel stack and |
206 | * leave the IRET frame below %rsp, which is dangerous to be corrupted if #NMI |
207 | * interrupts. And swapgs_restore_regs_and_return_to_usermode() pushing the IRET |
208 | * frame at the same address is useless. |
209 | */ |
210 | SYM_CODE_START(xenpv_restore_regs_and_return_to_usermode) |
211 | UNWIND_HINT_REGS |
212 | POP_REGS |
213 | |
214 | /* stackleak_erase() can work safely on the kernel stack. */ |
215 | STACKLEAK_ERASE_NOCLOBBER |
216 | |
217 | addq $8, %rsp /* skip regs->orig_ax */ |
218 | jmp xen_iret |
219 | SYM_CODE_END(xenpv_restore_regs_and_return_to_usermode) |
220 | |
221 | /* |
222 | * Xen handles syscall callbacks much like ordinary exceptions, which |
223 | * means we have: |
224 | * - kernel gs |
225 | * - kernel rsp |
226 | * - an iret-like stack frame on the stack (including rcx and r11): |
227 | * ss |
228 | * rsp |
229 | * rflags |
230 | * cs |
231 | * rip |
232 | * r11 |
233 | * rsp->rcx |
234 | */ |
235 | |
236 | /* Normal 64-bit system call target */ |
237 | SYM_CODE_START(xen_entry_SYSCALL_64) |
238 | UNWIND_HINT_ENTRY |
239 | ENDBR |
240 | popq %rcx |
241 | popq %r11 |
242 | |
243 | /* |
244 | * Neither Xen nor the kernel really knows what the old SS and |
245 | * CS were. The kernel expects __USER_DS and __USER_CS, so |
246 | * report those values even though Xen will guess its own values. |
247 | */ |
248 | movq $__USER_DS, 4*8(%rsp) |
249 | movq $__USER_CS, 1*8(%rsp) |
250 | |
251 | jmp entry_SYSCALL_64_after_hwframe |
252 | SYM_CODE_END(xen_entry_SYSCALL_64) |
253 | |
254 | #ifdef CONFIG_IA32_EMULATION |
255 | |
256 | /* 32-bit compat syscall target */ |
257 | SYM_CODE_START(xen_entry_SYSCALL_compat) |
258 | UNWIND_HINT_ENTRY |
259 | ENDBR |
260 | popq %rcx |
261 | popq %r11 |
262 | |
263 | /* |
264 | * Neither Xen nor the kernel really knows what the old SS and |
265 | * CS were. The kernel expects __USER_DS and __USER32_CS, so |
266 | * report those values even though Xen will guess its own values. |
267 | */ |
268 | movq $__USER_DS, 4*8(%rsp) |
269 | movq $__USER32_CS, 1*8(%rsp) |
270 | |
271 | jmp entry_SYSCALL_compat_after_hwframe |
272 | SYM_CODE_END(xen_entry_SYSCALL_compat) |
273 | |
274 | /* 32-bit compat sysenter target */ |
275 | SYM_CODE_START(xen_entry_SYSENTER_compat) |
276 | UNWIND_HINT_ENTRY |
277 | ENDBR |
278 | /* |
279 | * NB: Xen is polite and clears TF from EFLAGS for us. This means |
280 | * that we don't need to guard against single step exceptions here. |
281 | */ |
282 | popq %rcx |
283 | popq %r11 |
284 | |
285 | /* |
286 | * Neither Xen nor the kernel really knows what the old SS and |
287 | * CS were. The kernel expects __USER_DS and __USER32_CS, so |
288 | * report those values even though Xen will guess its own values. |
289 | */ |
290 | movq $__USER_DS, 4*8(%rsp) |
291 | movq $__USER32_CS, 1*8(%rsp) |
292 | |
293 | jmp entry_SYSENTER_compat_after_hwframe |
294 | SYM_CODE_END(xen_entry_SYSENTER_compat) |
295 | |
296 | #else /* !CONFIG_IA32_EMULATION */ |
297 | |
298 | SYM_CODE_START(xen_entry_SYSCALL_compat) |
299 | SYM_CODE_START(xen_entry_SYSENTER_compat) |
300 | UNWIND_HINT_ENTRY |
301 | ENDBR |
302 | lea 16(%rsp), %rsp /* strip %rcx, %r11 */ |
303 | mov $-ENOSYS, %rax |
304 | pushq $0 |
305 | jmp hypercall_iret |
306 | SYM_CODE_END(xen_entry_SYSENTER_compat) |
307 | SYM_CODE_END(xen_entry_SYSCALL_compat) |
308 | |
309 | #endif /* CONFIG_IA32_EMULATION */ |
310 | |