1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * AMD Memory Encryption Support
4 *
5 * Copyright (C) 2017 Advanced Micro Devices, Inc.
6 *
7 * Author: Tom Lendacky <thomas.lendacky@amd.com>
8 */
9
10#include <linux/linkage.h>
11
12#include <asm/processor-flags.h>
13#include <asm/msr.h>
14#include <asm/asm-offsets.h>
15#include <asm/segment.h>
16#include <asm/trapnr.h>
17
18 .text
19 .code32
20SYM_FUNC_START(get_sev_encryption_bit)
21 push %ebx
22
23 movl $0x80000000, %eax /* CPUID to check the highest leaf */
24 cpuid
25 cmpl $0x8000001f, %eax /* See if 0x8000001f is available */
26 jb .Lno_sev
27
28 /*
29 * Check for the SEV feature:
30 * CPUID Fn8000_001F[EAX] - Bit 1
31 * CPUID Fn8000_001F[EBX] - Bits 5:0
32 * Pagetable bit position used to indicate encryption
33 */
34 movl $0x8000001f, %eax
35 cpuid
36 bt $1, %eax /* Check if SEV is available */
37 jnc .Lno_sev
38
39 movl $MSR_AMD64_SEV, %ecx /* Read the SEV MSR */
40 rdmsr
41 bt $MSR_AMD64_SEV_ENABLED_BIT, %eax /* Check if SEV is active */
42 jnc .Lno_sev
43
44 movl %ebx, %eax
45 andl $0x3f, %eax /* Return the encryption bit location */
46 jmp .Lsev_exit
47
48.Lno_sev:
49 xor %eax, %eax
50
51.Lsev_exit:
52 pop %ebx
53 RET
54SYM_FUNC_END(get_sev_encryption_bit)
55
56/**
57 * sev_es_req_cpuid - Request a CPUID value from the Hypervisor using
58 * the GHCB MSR protocol
59 *
60 * @%eax: Register to request (0=EAX, 1=EBX, 2=ECX, 3=EDX)
61 * @%edx: CPUID Function
62 *
63 * Returns 0 in %eax on success, non-zero on failure
64 * %edx returns CPUID value on success
65 */
66SYM_CODE_START_LOCAL(sev_es_req_cpuid)
67 shll $30, %eax
68 orl $0x00000004, %eax
69 movl $MSR_AMD64_SEV_ES_GHCB, %ecx
70 wrmsr
71 rep; vmmcall # VMGEXIT
72 rdmsr
73
74 /* Check response */
75 movl %eax, %ecx
76 andl $0x3ffff000, %ecx # Bits [12-29] MBZ
77 jnz 2f
78
79 /* Check return code */
80 andl $0xfff, %eax
81 cmpl $5, %eax
82 jne 2f
83
84 /* All good - return success */
85 xorl %eax, %eax
861:
87 RET
882:
89 movl $-1, %eax
90 jmp 1b
91SYM_CODE_END(sev_es_req_cpuid)
92
93SYM_CODE_START_LOCAL(startup32_vc_handler)
94 pushl %eax
95 pushl %ebx
96 pushl %ecx
97 pushl %edx
98
99 /* Keep CPUID function in %ebx */
100 movl %eax, %ebx
101
102 /* Check if error-code == SVM_EXIT_CPUID */
103 cmpl $0x72, 16(%esp)
104 jne .Lfail
105
106 movl $0, %eax # Request CPUID[fn].EAX
107 movl %ebx, %edx # CPUID fn
108 call sev_es_req_cpuid # Call helper
109 testl %eax, %eax # Check return code
110 jnz .Lfail
111 movl %edx, 12(%esp) # Store result
112
113 movl $1, %eax # Request CPUID[fn].EBX
114 movl %ebx, %edx # CPUID fn
115 call sev_es_req_cpuid # Call helper
116 testl %eax, %eax # Check return code
117 jnz .Lfail
118 movl %edx, 8(%esp) # Store result
119
120 movl $2, %eax # Request CPUID[fn].ECX
121 movl %ebx, %edx # CPUID fn
122 call sev_es_req_cpuid # Call helper
123 testl %eax, %eax # Check return code
124 jnz .Lfail
125 movl %edx, 4(%esp) # Store result
126
127 movl $3, %eax # Request CPUID[fn].EDX
128 movl %ebx, %edx # CPUID fn
129 call sev_es_req_cpuid # Call helper
130 testl %eax, %eax # Check return code
131 jnz .Lfail
132 movl %edx, 0(%esp) # Store result
133
134 /*
135 * Sanity check CPUID results from the Hypervisor. See comment in
136 * do_vc_no_ghcb() for more details on why this is necessary.
137 */
138
139 /* Fail if SEV leaf not available in CPUID[0x80000000].EAX */
140 cmpl $0x80000000, %ebx
141 jne .Lcheck_sev
142 cmpl $0x8000001f, 12(%esp)
143 jb .Lfail
144 jmp .Ldone
145
146.Lcheck_sev:
147 /* Fail if SEV bit not set in CPUID[0x8000001f].EAX[1] */
148 cmpl $0x8000001f, %ebx
149 jne .Ldone
150 btl $1, 12(%esp)
151 jnc .Lfail
152
153.Ldone:
154 popl %edx
155 popl %ecx
156 popl %ebx
157 popl %eax
158
159 /* Remove error code */
160 addl $4, %esp
161
162 /* Jump over CPUID instruction */
163 addl $2, (%esp)
164
165 iret
166.Lfail:
167 /* Send terminate request to Hypervisor */
168 movl $0x100, %eax
169 xorl %edx, %edx
170 movl $MSR_AMD64_SEV_ES_GHCB, %ecx
171 wrmsr
172 rep; vmmcall
173
174 /* If request fails, go to hlt loop */
175 hlt
176 jmp .Lfail
177SYM_CODE_END(startup32_vc_handler)
178
179/*
180 * Write an IDT entry into boot32_idt
181 *
182 * Parameters:
183 *
184 * %eax: Handler address
185 * %edx: Vector number
186 * %ecx: IDT address
187 */
188SYM_FUNC_START_LOCAL(startup32_set_idt_entry)
189 /* IDT entry address to %ecx */
190 leal (%ecx, %edx, 8), %ecx
191
192 /* Build IDT entry, lower 4 bytes */
193 movl %eax, %edx
194 andl $0x0000ffff, %edx # Target code segment offset [15:0]
195 orl $(__KERNEL32_CS << 16), %edx # Target code segment selector
196
197 /* Store lower 4 bytes to IDT */
198 movl %edx, (%ecx)
199
200 /* Build IDT entry, upper 4 bytes */
201 movl %eax, %edx
202 andl $0xffff0000, %edx # Target code segment offset [31:16]
203 orl $0x00008e00, %edx # Present, Type 32-bit Interrupt Gate
204
205 /* Store upper 4 bytes to IDT */
206 movl %edx, 4(%ecx)
207
208 RET
209SYM_FUNC_END(startup32_set_idt_entry)
210
211SYM_FUNC_START(startup32_load_idt)
212 push %ebp
213 push %ebx
214
215 call 1f
2161: pop %ebp
217
218 leal (boot32_idt - 1b)(%ebp), %ebx
219
220 /* #VC handler */
221 leal (startup32_vc_handler - 1b)(%ebp), %eax
222 movl $X86_TRAP_VC, %edx
223 movl %ebx, %ecx
224 call startup32_set_idt_entry
225
226 /* Load IDT */
227 leal (boot32_idt_desc - 1b)(%ebp), %ecx
228 movl %ebx, 2(%ecx)
229 lidt (%ecx)
230
231 pop %ebx
232 pop %ebp
233 RET
234SYM_FUNC_END(startup32_load_idt)
235
236/*
237 * Check for the correct C-bit position when the startup_32 boot-path is used.
238 *
239 * The check makes use of the fact that all memory is encrypted when paging is
240 * disabled. The function creates 64 bits of random data using the RDRAND
241 * instruction. RDRAND is mandatory for SEV guests, so always available. If the
242 * hypervisor violates that the kernel will crash right here.
243 *
244 * The 64 bits of random data are stored to a memory location and at the same
245 * time kept in the %eax and %ebx registers. Since encryption is always active
246 * when paging is off the random data will be stored encrypted in main memory.
247 *
248 * Then paging is enabled. When the C-bit position is correct all memory is
249 * still mapped encrypted and comparing the register values with memory will
250 * succeed. An incorrect C-bit position will map all memory unencrypted, so that
251 * the compare will use the encrypted random data and fail.
252 */
253SYM_FUNC_START(startup32_check_sev_cbit)
254 pushl %ebx
255 pushl %ebp
256
257 call 0f
2580: popl %ebp
259
260 /* Check for non-zero sev_status */
261 movl (sev_status - 0b)(%ebp), %eax
262 testl %eax, %eax
263 jz 4f
264
265 /*
266 * Get two 32-bit random values - Don't bail out if RDRAND fails
267 * because it is better to prevent forward progress if no random value
268 * can be gathered.
269 */
2701: rdrand %eax
271 jnc 1b
2722: rdrand %ebx
273 jnc 2b
274
275 /* Store to memory and keep it in the registers */
276 leal (sev_check_data - 0b)(%ebp), %ebp
277 movl %eax, 0(%ebp)
278 movl %ebx, 4(%ebp)
279
280 /* Enable paging to see if encryption is active */
281 movl %cr0, %edx /* Backup %cr0 in %edx */
282 movl $(X86_CR0_PG | X86_CR0_PE), %ecx /* Enable Paging and Protected mode */
283 movl %ecx, %cr0
284
285 cmpl %eax, 0(%ebp)
286 jne 3f
287 cmpl %ebx, 4(%ebp)
288 jne 3f
289
290 movl %edx, %cr0 /* Restore previous %cr0 */
291
292 jmp 4f
293
2943: /* Check failed - hlt the machine */
295 hlt
296 jmp 3b
297
2984:
299 popl %ebp
300 popl %ebx
301 RET
302SYM_FUNC_END(startup32_check_sev_cbit)
303
304 .code64
305
306#include "../../kernel/sev_verify_cbit.S"
307
308 .data
309
310 .balign 8
311SYM_DATA(sme_me_mask, .quad 0)
312SYM_DATA(sev_status, .quad 0)
313SYM_DATA(sev_check_data, .quad 0)
314
315SYM_DATA_START_LOCAL(boot32_idt)
316 .rept 32
317 .quad 0
318 .endr
319SYM_DATA_END(boot32_idt)
320
321SYM_DATA_START_LOCAL(boot32_idt_desc)
322 .word . - boot32_idt - 1
323 .long 0
324SYM_DATA_END(boot32_idt_desc)
325

source code of linux/arch/x86/boot/compressed/mem_encrypt.S