1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef _ASM_X86_KVM_PARA_H |
3 | #define _ASM_X86_KVM_PARA_H |
4 | |
5 | #include <asm/processor.h> |
6 | #include <asm/alternative.h> |
7 | #include <uapi/asm/kvm_para.h> |
8 | |
9 | extern void kvmclock_init(void); |
10 | |
11 | #ifdef CONFIG_KVM_GUEST |
12 | bool kvm_check_and_clear_guest_paused(void); |
13 | #else |
14 | static inline bool kvm_check_and_clear_guest_paused(void) |
15 | { |
16 | return false; |
17 | } |
18 | #endif /* CONFIG_KVM_GUEST */ |
19 | |
20 | #define KVM_HYPERCALL \ |
21 | ALTERNATIVE(".byte 0x0f,0x01,0xc1", ".byte 0x0f,0x01,0xd9", X86_FEATURE_VMMCALL) |
22 | |
23 | /* For KVM hypercalls, a three-byte sequence of either the vmcall or the vmmcall |
24 | * instruction. The hypervisor may replace it with something else but only the |
25 | * instructions are guaranteed to be supported. |
26 | * |
27 | * Up to four arguments may be passed in rbx, rcx, rdx, and rsi respectively. |
28 | * The hypercall number should be placed in rax and the return value will be |
29 | * placed in rax. No other registers will be clobbered unless explicitly |
30 | * noted by the particular hypercall. |
31 | */ |
32 | |
33 | static inline long kvm_hypercall0(unsigned int nr) |
34 | { |
35 | long ret; |
36 | asm volatile(KVM_HYPERCALL |
37 | : "=a" (ret) |
38 | : "a" (nr) |
39 | : "memory" ); |
40 | return ret; |
41 | } |
42 | |
43 | static inline long kvm_hypercall1(unsigned int nr, unsigned long p1) |
44 | { |
45 | long ret; |
46 | asm volatile(KVM_HYPERCALL |
47 | : "=a" (ret) |
48 | : "a" (nr), "b" (p1) |
49 | : "memory" ); |
50 | return ret; |
51 | } |
52 | |
53 | static inline long kvm_hypercall2(unsigned int nr, unsigned long p1, |
54 | unsigned long p2) |
55 | { |
56 | long ret; |
57 | asm volatile(KVM_HYPERCALL |
58 | : "=a" (ret) |
59 | : "a" (nr), "b" (p1), "c" (p2) |
60 | : "memory" ); |
61 | return ret; |
62 | } |
63 | |
64 | static inline long kvm_hypercall3(unsigned int nr, unsigned long p1, |
65 | unsigned long p2, unsigned long p3) |
66 | { |
67 | long ret; |
68 | asm volatile(KVM_HYPERCALL |
69 | : "=a" (ret) |
70 | : "a" (nr), "b" (p1), "c" (p2), "d" (p3) |
71 | : "memory" ); |
72 | return ret; |
73 | } |
74 | |
75 | static inline long kvm_hypercall4(unsigned int nr, unsigned long p1, |
76 | unsigned long p2, unsigned long p3, |
77 | unsigned long p4) |
78 | { |
79 | long ret; |
80 | asm volatile(KVM_HYPERCALL |
81 | : "=a" (ret) |
82 | : "a" (nr), "b" (p1), "c" (p2), "d" (p3), "S" (p4) |
83 | : "memory" ); |
84 | return ret; |
85 | } |
86 | |
87 | #ifdef CONFIG_KVM_GUEST |
88 | bool kvm_para_available(void); |
89 | unsigned int kvm_arch_para_features(void); |
90 | unsigned int kvm_arch_para_hints(void); |
91 | void kvm_async_pf_task_wait(u32 token, int interrupt_kernel); |
92 | void kvm_async_pf_task_wake(u32 token); |
93 | u32 kvm_read_and_reset_pf_reason(void); |
94 | extern void kvm_disable_steal_time(void); |
95 | void do_async_page_fault(struct pt_regs *regs, unsigned long error_code); |
96 | |
97 | #ifdef CONFIG_PARAVIRT_SPINLOCKS |
98 | void __init kvm_spinlock_init(void); |
99 | #else /* !CONFIG_PARAVIRT_SPINLOCKS */ |
100 | static inline void kvm_spinlock_init(void) |
101 | { |
102 | } |
103 | #endif /* CONFIG_PARAVIRT_SPINLOCKS */ |
104 | |
105 | #else /* CONFIG_KVM_GUEST */ |
106 | #define kvm_async_pf_task_wait(T, I) do {} while(0) |
107 | #define kvm_async_pf_task_wake(T) do {} while(0) |
108 | |
109 | static inline bool kvm_para_available(void) |
110 | { |
111 | return false; |
112 | } |
113 | |
114 | static inline unsigned int kvm_arch_para_features(void) |
115 | { |
116 | return 0; |
117 | } |
118 | |
119 | static inline unsigned int kvm_arch_para_hints(void) |
120 | { |
121 | return 0; |
122 | } |
123 | |
124 | static inline u32 kvm_read_and_reset_pf_reason(void) |
125 | { |
126 | return 0; |
127 | } |
128 | |
129 | static inline void kvm_disable_steal_time(void) |
130 | { |
131 | return; |
132 | } |
133 | #endif |
134 | |
135 | #endif /* _ASM_X86_KVM_PARA_H */ |
136 | |