1 | /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ |
2 | #ifndef _UAPI_ASM_X86_KVM_PARA_H |
3 | #define _UAPI_ASM_X86_KVM_PARA_H |
4 | |
5 | #include <linux/types.h> |
6 | |
7 | /* This CPUID returns the signature 'KVMKVMKVM' in ebx, ecx, and edx. It |
8 | * should be used to determine that a VM is running under KVM. |
9 | */ |
10 | #define KVM_CPUID_SIGNATURE 0x40000000 |
11 | #define KVM_SIGNATURE "KVMKVMKVM\0\0\0" |
12 | |
13 | /* This CPUID returns two feature bitmaps in eax, edx. Before enabling |
14 | * a particular paravirtualization, the appropriate feature bit should |
15 | * be checked in eax. The performance hint feature bit should be checked |
16 | * in edx. |
17 | */ |
18 | #define KVM_CPUID_FEATURES 0x40000001 |
19 | #define KVM_FEATURE_CLOCKSOURCE 0 |
20 | #define KVM_FEATURE_NOP_IO_DELAY 1 |
21 | #define KVM_FEATURE_MMU_OP 2 |
22 | /* This indicates that the new set of kvmclock msrs |
23 | * are available. The use of 0x11 and 0x12 is deprecated |
24 | */ |
25 | #define KVM_FEATURE_CLOCKSOURCE2 3 |
26 | #define KVM_FEATURE_ASYNC_PF 4 |
27 | #define KVM_FEATURE_STEAL_TIME 5 |
28 | #define KVM_FEATURE_PV_EOI 6 |
29 | #define KVM_FEATURE_PV_UNHALT 7 |
30 | #define KVM_FEATURE_PV_TLB_FLUSH 9 |
31 | #define KVM_FEATURE_ASYNC_PF_VMEXIT 10 |
32 | #define KVM_FEATURE_PV_SEND_IPI 11 |
33 | #define KVM_FEATURE_POLL_CONTROL 12 |
34 | #define KVM_FEATURE_PV_SCHED_YIELD 13 |
35 | #define KVM_FEATURE_ASYNC_PF_INT 14 |
36 | #define KVM_FEATURE_MSI_EXT_DEST_ID 15 |
37 | #define KVM_FEATURE_HC_MAP_GPA_RANGE 16 |
38 | #define KVM_FEATURE_MIGRATION_CONTROL 17 |
39 | |
40 | #define KVM_HINTS_REALTIME 0 |
41 | |
42 | /* The last 8 bits are used to indicate how to interpret the flags field |
43 | * in pvclock structure. If no bits are set, all flags are ignored. |
44 | */ |
45 | #define KVM_FEATURE_CLOCKSOURCE_STABLE_BIT 24 |
46 | |
47 | #define MSR_KVM_WALL_CLOCK 0x11 |
48 | #define MSR_KVM_SYSTEM_TIME 0x12 |
49 | |
50 | #define KVM_MSR_ENABLED 1 |
51 | /* Custom MSRs falls in the range 0x4b564d00-0x4b564dff */ |
52 | #define MSR_KVM_WALL_CLOCK_NEW 0x4b564d00 |
53 | #define MSR_KVM_SYSTEM_TIME_NEW 0x4b564d01 |
54 | #define MSR_KVM_ASYNC_PF_EN 0x4b564d02 |
55 | #define MSR_KVM_STEAL_TIME 0x4b564d03 |
56 | #define MSR_KVM_PV_EOI_EN 0x4b564d04 |
57 | #define MSR_KVM_POLL_CONTROL 0x4b564d05 |
58 | #define MSR_KVM_ASYNC_PF_INT 0x4b564d06 |
59 | #define MSR_KVM_ASYNC_PF_ACK 0x4b564d07 |
60 | #define MSR_KVM_MIGRATION_CONTROL 0x4b564d08 |
61 | |
62 | struct kvm_steal_time { |
63 | __u64 steal; |
64 | __u32 version; |
65 | __u32 flags; |
66 | __u8 preempted; |
67 | __u8 u8_pad[3]; |
68 | __u32 pad[11]; |
69 | }; |
70 | |
71 | #define KVM_VCPU_PREEMPTED (1 << 0) |
72 | #define KVM_VCPU_FLUSH_TLB (1 << 1) |
73 | |
74 | #define KVM_CLOCK_PAIRING_WALLCLOCK 0 |
75 | struct kvm_clock_pairing { |
76 | __s64 sec; |
77 | __s64 nsec; |
78 | __u64 tsc; |
79 | __u32 flags; |
80 | __u32 pad[9]; |
81 | }; |
82 | |
83 | #define KVM_STEAL_ALIGNMENT_BITS 5 |
84 | #define KVM_STEAL_VALID_BITS ((-1ULL << (KVM_STEAL_ALIGNMENT_BITS + 1))) |
85 | #define KVM_STEAL_RESERVED_MASK (((1 << KVM_STEAL_ALIGNMENT_BITS) - 1 ) << 1) |
86 | |
87 | #define KVM_MAX_MMU_OP_BATCH 32 |
88 | |
89 | #define KVM_ASYNC_PF_ENABLED (1 << 0) |
90 | #define KVM_ASYNC_PF_SEND_ALWAYS (1 << 1) |
91 | #define KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT (1 << 2) |
92 | #define KVM_ASYNC_PF_DELIVERY_AS_INT (1 << 3) |
93 | |
94 | /* MSR_KVM_ASYNC_PF_INT */ |
95 | #define KVM_ASYNC_PF_VEC_MASK GENMASK(7, 0) |
96 | |
97 | /* MSR_KVM_MIGRATION_CONTROL */ |
98 | #define KVM_MIGRATION_READY (1 << 0) |
99 | |
100 | /* KVM_HC_MAP_GPA_RANGE */ |
101 | #define KVM_MAP_GPA_RANGE_PAGE_SZ_4K 0 |
102 | #define KVM_MAP_GPA_RANGE_PAGE_SZ_2M (1 << 0) |
103 | #define KVM_MAP_GPA_RANGE_PAGE_SZ_1G (1 << 1) |
104 | #define KVM_MAP_GPA_RANGE_ENC_STAT(n) (n << 4) |
105 | #define KVM_MAP_GPA_RANGE_ENCRYPTED KVM_MAP_GPA_RANGE_ENC_STAT(1) |
106 | #define KVM_MAP_GPA_RANGE_DECRYPTED KVM_MAP_GPA_RANGE_ENC_STAT(0) |
107 | |
108 | /* Operations for KVM_HC_MMU_OP */ |
109 | #define KVM_MMU_OP_WRITE_PTE 1 |
110 | #define KVM_MMU_OP_FLUSH_TLB 2 |
111 | #define KVM_MMU_OP_RELEASE_PT 3 |
112 | |
113 | /* Payload for KVM_HC_MMU_OP */ |
114 | struct { |
115 | __u32 ; |
116 | __u32 ; |
117 | }; |
118 | |
119 | struct kvm_mmu_op_write_pte { |
120 | struct kvm_mmu_op_header ; |
121 | __u64 pte_phys; |
122 | __u64 pte_val; |
123 | }; |
124 | |
125 | struct kvm_mmu_op_flush_tlb { |
126 | struct kvm_mmu_op_header ; |
127 | }; |
128 | |
129 | struct kvm_mmu_op_release_pt { |
130 | struct kvm_mmu_op_header ; |
131 | __u64 pt_phys; |
132 | }; |
133 | |
134 | #define KVM_PV_REASON_PAGE_NOT_PRESENT 1 |
135 | #define KVM_PV_REASON_PAGE_READY 2 |
136 | |
137 | struct kvm_vcpu_pv_apf_data { |
138 | /* Used for 'page not present' events delivered via #PF */ |
139 | __u32 flags; |
140 | |
141 | /* Used for 'page ready' events delivered via interrupt notification */ |
142 | __u32 token; |
143 | |
144 | __u8 pad[56]; |
145 | __u32 enabled; |
146 | }; |
147 | |
148 | #define KVM_PV_EOI_BIT 0 |
149 | #define KVM_PV_EOI_MASK (0x1 << KVM_PV_EOI_BIT) |
150 | #define KVM_PV_EOI_ENABLED KVM_PV_EOI_MASK |
151 | #define KVM_PV_EOI_DISABLED 0x0 |
152 | |
153 | #endif /* _UAPI_ASM_X86_KVM_PARA_H */ |
154 | |