1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * prepare to run common code |
4 | * |
5 | * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE |
6 | */ |
7 | |
8 | #define DISABLE_BRANCH_PROFILING |
9 | |
10 | /* cpu_feature_enabled() cannot be used this early */ |
11 | #define USE_EARLY_PGTABLE_L5 |
12 | |
13 | #include <linux/init.h> |
14 | #include <linux/linkage.h> |
15 | #include <linux/types.h> |
16 | #include <linux/kernel.h> |
17 | #include <linux/string.h> |
18 | #include <linux/percpu.h> |
19 | #include <linux/start_kernel.h> |
20 | #include <linux/io.h> |
21 | #include <linux/memblock.h> |
22 | #include <linux/cc_platform.h> |
23 | #include <linux/pgtable.h> |
24 | |
25 | #include <asm/asm.h> |
26 | #include <asm/page_64.h> |
27 | #include <asm/processor.h> |
28 | #include <asm/proto.h> |
29 | #include <asm/smp.h> |
30 | #include <asm/setup.h> |
31 | #include <asm/desc.h> |
32 | #include <asm/tlbflush.h> |
33 | #include <asm/sections.h> |
34 | #include <asm/kdebug.h> |
35 | #include <asm/e820/api.h> |
36 | #include <asm/bios_ebda.h> |
37 | #include <asm/bootparam_utils.h> |
38 | #include <asm/microcode.h> |
39 | #include <asm/kasan.h> |
40 | #include <asm/fixmap.h> |
41 | #include <asm/realmode.h> |
42 | #include <asm/extable.h> |
43 | #include <asm/trapnr.h> |
44 | #include <asm/sev.h> |
45 | #include <asm/tdx.h> |
46 | #include <asm/init.h> |
47 | |
48 | /* |
49 | * Manage page tables very early on. |
50 | */ |
51 | extern pmd_t early_dynamic_pgts[EARLY_DYNAMIC_PAGE_TABLES][PTRS_PER_PMD]; |
52 | static unsigned int __initdata next_early_pgt; |
53 | pmdval_t early_pmd_flags = __PAGE_KERNEL_LARGE & ~(_PAGE_GLOBAL | _PAGE_NX); |
54 | |
55 | #ifdef CONFIG_X86_5LEVEL |
56 | unsigned int __pgtable_l5_enabled __ro_after_init; |
57 | unsigned int pgdir_shift __ro_after_init = 39; |
58 | EXPORT_SYMBOL(pgdir_shift); |
59 | unsigned int ptrs_per_p4d __ro_after_init = 1; |
60 | EXPORT_SYMBOL(ptrs_per_p4d); |
61 | #endif |
62 | |
63 | #ifdef CONFIG_DYNAMIC_MEMORY_LAYOUT |
64 | unsigned long page_offset_base __ro_after_init = __PAGE_OFFSET_BASE_L4; |
65 | EXPORT_SYMBOL(page_offset_base); |
66 | unsigned long vmalloc_base __ro_after_init = __VMALLOC_BASE_L4; |
67 | EXPORT_SYMBOL(vmalloc_base); |
68 | unsigned long vmemmap_base __ro_after_init = __VMEMMAP_BASE_L4; |
69 | EXPORT_SYMBOL(vmemmap_base); |
70 | #endif |
71 | |
72 | static inline bool check_la57_support(void) |
73 | { |
74 | if (!IS_ENABLED(CONFIG_X86_5LEVEL)) |
75 | return false; |
76 | |
77 | /* |
78 | * 5-level paging is detected and enabled at kernel decompression |
79 | * stage. Only check if it has been enabled there. |
80 | */ |
81 | if (!(native_read_cr4() & X86_CR4_LA57)) |
82 | return false; |
83 | |
84 | RIP_REL_REF(__pgtable_l5_enabled) = 1; |
85 | RIP_REL_REF(pgdir_shift) = 48; |
86 | RIP_REL_REF(ptrs_per_p4d) = 512; |
87 | RIP_REL_REF(page_offset_base) = __PAGE_OFFSET_BASE_L5; |
88 | RIP_REL_REF(vmalloc_base) = __VMALLOC_BASE_L5; |
89 | RIP_REL_REF(vmemmap_base) = __VMEMMAP_BASE_L5; |
90 | |
91 | return true; |
92 | } |
93 | |
94 | static unsigned long __head sme_postprocess_startup(struct boot_params *bp, pmdval_t *pmd) |
95 | { |
96 | unsigned long vaddr, vaddr_end; |
97 | int i; |
98 | |
99 | /* Encrypt the kernel and related (if SME is active) */ |
100 | sme_encrypt_kernel(bp); |
101 | |
102 | /* |
103 | * Clear the memory encryption mask from the .bss..decrypted section. |
104 | * The bss section will be memset to zero later in the initialization so |
105 | * there is no need to zero it after changing the memory encryption |
106 | * attribute. |
107 | */ |
108 | if (sme_get_me_mask()) { |
109 | vaddr = (unsigned long)__start_bss_decrypted; |
110 | vaddr_end = (unsigned long)__end_bss_decrypted; |
111 | |
112 | for (; vaddr < vaddr_end; vaddr += PMD_SIZE) { |
113 | /* |
114 | * On SNP, transition the page to shared in the RMP table so that |
115 | * it is consistent with the page table attribute change. |
116 | * |
117 | * __start_bss_decrypted has a virtual address in the high range |
118 | * mapping (kernel .text). PVALIDATE, by way of |
119 | * early_snp_set_memory_shared(), requires a valid virtual |
120 | * address but the kernel is currently running off of the identity |
121 | * mapping so use __pa() to get a *currently* valid virtual address. |
122 | */ |
123 | early_snp_set_memory_shared(__pa(vaddr), __pa(vaddr), PTRS_PER_PMD); |
124 | |
125 | i = pmd_index(address: vaddr); |
126 | pmd[i] -= sme_get_me_mask(); |
127 | } |
128 | } |
129 | |
130 | /* |
131 | * Return the SME encryption mask (if SME is active) to be used as a |
132 | * modifier for the initial pgdir entry programmed into CR3. |
133 | */ |
134 | return sme_get_me_mask(); |
135 | } |
136 | |
137 | /* Code in __startup_64() can be relocated during execution, but the compiler |
138 | * doesn't have to generate PC-relative relocations when accessing globals from |
139 | * that function. Clang actually does not generate them, which leads to |
140 | * boot-time crashes. To work around this problem, every global pointer must |
141 | * be accessed using RIP_REL_REF(). |
142 | */ |
143 | unsigned long __head __startup_64(unsigned long physaddr, |
144 | struct boot_params *bp) |
145 | { |
146 | pmd_t (*early_pgts)[PTRS_PER_PMD] = RIP_REL_REF(early_dynamic_pgts); |
147 | unsigned long pgtable_flags; |
148 | unsigned long load_delta; |
149 | pgdval_t *pgd; |
150 | p4dval_t *p4d; |
151 | pudval_t *pud; |
152 | pmdval_t *pmd, pmd_entry; |
153 | bool la57; |
154 | int i; |
155 | |
156 | la57 = check_la57_support(); |
157 | |
158 | /* Is the address too large? */ |
159 | if (physaddr >> MAX_PHYSMEM_BITS) |
160 | for (;;); |
161 | |
162 | /* |
163 | * Compute the delta between the address I am compiled to run at |
164 | * and the address I am actually running at. |
165 | */ |
166 | load_delta = physaddr - (unsigned long)(_text - __START_KERNEL_map); |
167 | RIP_REL_REF(phys_base) = load_delta; |
168 | |
169 | /* Is the address not 2M aligned? */ |
170 | if (load_delta & ~PMD_MASK) |
171 | for (;;); |
172 | |
173 | /* Include the SME encryption mask in the fixup value */ |
174 | load_delta += sme_get_me_mask(); |
175 | |
176 | /* Fixup the physical addresses in the page table */ |
177 | |
178 | pgd = &RIP_REL_REF(early_top_pgt)->pgd; |
179 | pgd[pgd_index(__START_KERNEL_map)] += load_delta; |
180 | |
181 | if (la57) { |
182 | p4d = (p4dval_t *)&RIP_REL_REF(level4_kernel_pgt); |
183 | p4d[MAX_PTRS_PER_P4D - 1] += load_delta; |
184 | |
185 | pgd[pgd_index(__START_KERNEL_map)] = (pgdval_t)p4d | _PAGE_TABLE; |
186 | } |
187 | |
188 | RIP_REL_REF(level3_kernel_pgt)[PTRS_PER_PUD - 2].pud += load_delta; |
189 | RIP_REL_REF(level3_kernel_pgt)[PTRS_PER_PUD - 1].pud += load_delta; |
190 | |
191 | for (i = FIXMAP_PMD_TOP; i > FIXMAP_PMD_TOP - FIXMAP_PMD_NUM; i--) |
192 | RIP_REL_REF(level2_fixmap_pgt)[i].pmd += load_delta; |
193 | |
194 | /* |
195 | * Set up the identity mapping for the switchover. These |
196 | * entries should *NOT* have the global bit set! This also |
197 | * creates a bunch of nonsense entries but that is fine -- |
198 | * it avoids problems around wraparound. |
199 | */ |
200 | |
201 | pud = &early_pgts[0]->pmd; |
202 | pmd = &early_pgts[1]->pmd; |
203 | RIP_REL_REF(next_early_pgt) = 2; |
204 | |
205 | pgtable_flags = _KERNPG_TABLE_NOENC + sme_get_me_mask(); |
206 | |
207 | if (la57) { |
208 | p4d = &early_pgts[RIP_REL_REF(next_early_pgt)++]->pmd; |
209 | |
210 | i = (physaddr >> PGDIR_SHIFT) % PTRS_PER_PGD; |
211 | pgd[i + 0] = (pgdval_t)p4d + pgtable_flags; |
212 | pgd[i + 1] = (pgdval_t)p4d + pgtable_flags; |
213 | |
214 | i = physaddr >> P4D_SHIFT; |
215 | p4d[(i + 0) % PTRS_PER_P4D] = (pgdval_t)pud + pgtable_flags; |
216 | p4d[(i + 1) % PTRS_PER_P4D] = (pgdval_t)pud + pgtable_flags; |
217 | } else { |
218 | i = (physaddr >> PGDIR_SHIFT) % PTRS_PER_PGD; |
219 | pgd[i + 0] = (pgdval_t)pud + pgtable_flags; |
220 | pgd[i + 1] = (pgdval_t)pud + pgtable_flags; |
221 | } |
222 | |
223 | i = physaddr >> PUD_SHIFT; |
224 | pud[(i + 0) % PTRS_PER_PUD] = (pudval_t)pmd + pgtable_flags; |
225 | pud[(i + 1) % PTRS_PER_PUD] = (pudval_t)pmd + pgtable_flags; |
226 | |
227 | pmd_entry = __PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL; |
228 | /* Filter out unsupported __PAGE_KERNEL_* bits: */ |
229 | pmd_entry &= RIP_REL_REF(__supported_pte_mask); |
230 | pmd_entry += sme_get_me_mask(); |
231 | pmd_entry += physaddr; |
232 | |
233 | for (i = 0; i < DIV_ROUND_UP(_end - _text, PMD_SIZE); i++) { |
234 | int idx = i + (physaddr >> PMD_SHIFT); |
235 | |
236 | pmd[idx % PTRS_PER_PMD] = pmd_entry + i * PMD_SIZE; |
237 | } |
238 | |
239 | /* |
240 | * Fixup the kernel text+data virtual addresses. Note that |
241 | * we might write invalid pmds, when the kernel is relocated |
242 | * cleanup_highmap() fixes this up along with the mappings |
243 | * beyond _end. |
244 | * |
245 | * Only the region occupied by the kernel image has so far |
246 | * been checked against the table of usable memory regions |
247 | * provided by the firmware, so invalidate pages outside that |
248 | * region. A page table entry that maps to a reserved area of |
249 | * memory would allow processor speculation into that area, |
250 | * and on some hardware (particularly the UV platform) even |
251 | * speculative access to some reserved areas is caught as an |
252 | * error, causing the BIOS to halt the system. |
253 | */ |
254 | |
255 | pmd = &RIP_REL_REF(level2_kernel_pgt)->pmd; |
256 | |
257 | /* invalidate pages before the kernel image */ |
258 | for (i = 0; i < pmd_index(address: (unsigned long)_text); i++) |
259 | pmd[i] &= ~_PAGE_PRESENT; |
260 | |
261 | /* fixup pages that are part of the kernel image */ |
262 | for (; i <= pmd_index(address: (unsigned long)_end); i++) |
263 | if (pmd[i] & _PAGE_PRESENT) |
264 | pmd[i] += load_delta; |
265 | |
266 | /* invalidate pages after the kernel image */ |
267 | for (; i < PTRS_PER_PMD; i++) |
268 | pmd[i] &= ~_PAGE_PRESENT; |
269 | |
270 | return sme_postprocess_startup(bp, pmd); |
271 | } |
272 | |
273 | /* Wipe all early page tables except for the kernel symbol map */ |
274 | static void __init reset_early_page_tables(void) |
275 | { |
276 | memset(early_top_pgt, 0, sizeof(pgd_t)*(PTRS_PER_PGD-1)); |
277 | next_early_pgt = 0; |
278 | write_cr3(__sme_pa_nodebug(early_top_pgt)); |
279 | } |
280 | |
281 | /* Create a new PMD entry */ |
282 | bool __init __early_make_pgtable(unsigned long address, pmdval_t pmd) |
283 | { |
284 | unsigned long physaddr = address - __PAGE_OFFSET; |
285 | pgdval_t pgd, *pgd_p; |
286 | p4dval_t p4d, *p4d_p; |
287 | pudval_t pud, *pud_p; |
288 | pmdval_t *pmd_p; |
289 | |
290 | /* Invalid address or early pgt is done ? */ |
291 | if (physaddr >= MAXMEM || read_cr3_pa() != __pa_nodebug(early_top_pgt)) |
292 | return false; |
293 | |
294 | again: |
295 | pgd_p = &early_top_pgt[pgd_index(address)].pgd; |
296 | pgd = *pgd_p; |
297 | |
298 | /* |
299 | * The use of __START_KERNEL_map rather than __PAGE_OFFSET here is |
300 | * critical -- __PAGE_OFFSET would point us back into the dynamic |
301 | * range and we might end up looping forever... |
302 | */ |
303 | if (!pgtable_l5_enabled()) |
304 | p4d_p = pgd_p; |
305 | else if (pgd) |
306 | p4d_p = (p4dval_t *)((pgd & PTE_PFN_MASK) + __START_KERNEL_map - phys_base); |
307 | else { |
308 | if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) { |
309 | reset_early_page_tables(); |
310 | goto again; |
311 | } |
312 | |
313 | p4d_p = (p4dval_t *)early_dynamic_pgts[next_early_pgt++]; |
314 | memset(p4d_p, 0, sizeof(*p4d_p) * PTRS_PER_P4D); |
315 | *pgd_p = (pgdval_t)p4d_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE; |
316 | } |
317 | p4d_p += p4d_index(address); |
318 | p4d = *p4d_p; |
319 | |
320 | if (p4d) |
321 | pud_p = (pudval_t *)((p4d & PTE_PFN_MASK) + __START_KERNEL_map - phys_base); |
322 | else { |
323 | if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) { |
324 | reset_early_page_tables(); |
325 | goto again; |
326 | } |
327 | |
328 | pud_p = (pudval_t *)early_dynamic_pgts[next_early_pgt++]; |
329 | memset(pud_p, 0, sizeof(*pud_p) * PTRS_PER_PUD); |
330 | *p4d_p = (p4dval_t)pud_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE; |
331 | } |
332 | pud_p += pud_index(address); |
333 | pud = *pud_p; |
334 | |
335 | if (pud) |
336 | pmd_p = (pmdval_t *)((pud & PTE_PFN_MASK) + __START_KERNEL_map - phys_base); |
337 | else { |
338 | if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) { |
339 | reset_early_page_tables(); |
340 | goto again; |
341 | } |
342 | |
343 | pmd_p = (pmdval_t *)early_dynamic_pgts[next_early_pgt++]; |
344 | memset(pmd_p, 0, sizeof(*pmd_p) * PTRS_PER_PMD); |
345 | *pud_p = (pudval_t)pmd_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE; |
346 | } |
347 | pmd_p[pmd_index(address)] = pmd; |
348 | |
349 | return true; |
350 | } |
351 | |
352 | static bool __init early_make_pgtable(unsigned long address) |
353 | { |
354 | unsigned long physaddr = address - __PAGE_OFFSET; |
355 | pmdval_t pmd; |
356 | |
357 | pmd = (physaddr & PMD_MASK) + early_pmd_flags; |
358 | |
359 | return __early_make_pgtable(address, pmd); |
360 | } |
361 | |
362 | void __init do_early_exception(struct pt_regs *regs, int trapnr) |
363 | { |
364 | if (trapnr == X86_TRAP_PF && |
365 | early_make_pgtable(address: native_read_cr2())) |
366 | return; |
367 | |
368 | if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT) && |
369 | trapnr == X86_TRAP_VC && handle_vc_boot_ghcb(regs)) |
370 | return; |
371 | |
372 | if (trapnr == X86_TRAP_VE && tdx_early_handle_ve(regs)) |
373 | return; |
374 | |
375 | early_fixup_exception(regs, trapnr); |
376 | } |
377 | |
378 | /* Don't add a printk in there. printk relies on the PDA which is not initialized |
379 | yet. */ |
380 | void __init clear_bss(void) |
381 | { |
382 | memset(__bss_start, 0, |
383 | (unsigned long) __bss_stop - (unsigned long) __bss_start); |
384 | memset(__brk_base, 0, |
385 | (unsigned long) __brk_limit - (unsigned long) __brk_base); |
386 | } |
387 | |
388 | static unsigned long get_cmd_line_ptr(void) |
389 | { |
390 | unsigned long cmd_line_ptr = boot_params.hdr.cmd_line_ptr; |
391 | |
392 | cmd_line_ptr |= (u64)boot_params.ext_cmd_line_ptr << 32; |
393 | |
394 | return cmd_line_ptr; |
395 | } |
396 | |
397 | static void __init copy_bootdata(char *real_mode_data) |
398 | { |
399 | char * command_line; |
400 | unsigned long cmd_line_ptr; |
401 | |
402 | /* |
403 | * If SME is active, this will create decrypted mappings of the |
404 | * boot data in advance of the copy operations. |
405 | */ |
406 | sme_map_bootdata(real_mode_data); |
407 | |
408 | memcpy(&boot_params, real_mode_data, sizeof(boot_params)); |
409 | sanitize_boot_params(boot_params: &boot_params); |
410 | cmd_line_ptr = get_cmd_line_ptr(); |
411 | if (cmd_line_ptr) { |
412 | command_line = __va(cmd_line_ptr); |
413 | memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE); |
414 | } |
415 | |
416 | /* |
417 | * The old boot data is no longer needed and won't be reserved, |
418 | * freeing up that memory for use by the system. If SME is active, |
419 | * we need to remove the mappings that were created so that the |
420 | * memory doesn't remain mapped as decrypted. |
421 | */ |
422 | sme_unmap_bootdata(real_mode_data); |
423 | } |
424 | |
425 | asmlinkage __visible void __init __noreturn x86_64_start_kernel(char * real_mode_data) |
426 | { |
427 | /* |
428 | * Build-time sanity checks on the kernel image and module |
429 | * area mappings. (these are purely build-time and produce no code) |
430 | */ |
431 | BUILD_BUG_ON(MODULES_VADDR < __START_KERNEL_map); |
432 | BUILD_BUG_ON(MODULES_VADDR - __START_KERNEL_map < KERNEL_IMAGE_SIZE); |
433 | BUILD_BUG_ON(MODULES_LEN + KERNEL_IMAGE_SIZE > 2*PUD_SIZE); |
434 | BUILD_BUG_ON((__START_KERNEL_map & ~PMD_MASK) != 0); |
435 | BUILD_BUG_ON((MODULES_VADDR & ~PMD_MASK) != 0); |
436 | BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL)); |
437 | MAYBE_BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) == |
438 | (__START_KERNEL & PGDIR_MASK))); |
439 | BUILD_BUG_ON(__fix_to_virt(__end_of_fixed_addresses) <= MODULES_END); |
440 | |
441 | cr4_init_shadow(); |
442 | |
443 | /* Kill off the identity-map trampoline */ |
444 | reset_early_page_tables(); |
445 | |
446 | clear_bss(); |
447 | |
448 | /* |
449 | * This needs to happen *before* kasan_early_init() because latter maps stuff |
450 | * into that page. |
451 | */ |
452 | clear_page(page: init_top_pgt); |
453 | |
454 | /* |
455 | * SME support may update early_pmd_flags to include the memory |
456 | * encryption mask, so it needs to be called before anything |
457 | * that may generate a page fault. |
458 | */ |
459 | sme_early_init(); |
460 | |
461 | kasan_early_init(); |
462 | |
463 | /* |
464 | * Flush global TLB entries which could be left over from the trampoline page |
465 | * table. |
466 | * |
467 | * This needs to happen *after* kasan_early_init() as KASAN-enabled .configs |
468 | * instrument native_write_cr4() so KASAN must be initialized for that |
469 | * instrumentation to work. |
470 | */ |
471 | __native_tlb_flush_global(this_cpu_read(cpu_tlbstate.cr4)); |
472 | |
473 | idt_setup_early_handler(); |
474 | |
475 | /* Needed before cc_platform_has() can be used for TDX */ |
476 | tdx_early_init(); |
477 | |
478 | copy_bootdata(__va(real_mode_data)); |
479 | |
480 | /* |
481 | * Load microcode early on BSP. |
482 | */ |
483 | load_ucode_bsp(); |
484 | |
485 | /* set init_top_pgt kernel high mapping*/ |
486 | init_top_pgt[511] = early_top_pgt[511]; |
487 | |
488 | x86_64_start_reservations(real_mode_data); |
489 | } |
490 | |
491 | void __init __noreturn x86_64_start_reservations(char *real_mode_data) |
492 | { |
493 | /* version is always not zero if it is copied */ |
494 | if (!boot_params.hdr.version) |
495 | copy_bootdata(__va(real_mode_data)); |
496 | |
497 | x86_early_init_platform_quirks(); |
498 | |
499 | switch (boot_params.hdr.hardware_subarch) { |
500 | case X86_SUBARCH_INTEL_MID: |
501 | x86_intel_mid_early_setup(); |
502 | break; |
503 | default: |
504 | break; |
505 | } |
506 | |
507 | start_kernel(); |
508 | } |
509 | |
510 | /* |
511 | * Data structures and code used for IDT setup in head_64.S. The bringup-IDT is |
512 | * used until the idt_table takes over. On the boot CPU this happens in |
513 | * x86_64_start_kernel(), on secondary CPUs in start_secondary(). In both cases |
514 | * this happens in the functions called from head_64.S. |
515 | * |
516 | * The idt_table can't be used that early because all the code modifying it is |
517 | * in idt.c and can be instrumented by tracing or KASAN, which both don't work |
518 | * during early CPU bringup. Also the idt_table has the runtime vectors |
519 | * configured which require certain CPU state to be setup already (like TSS), |
520 | * which also hasn't happened yet in early CPU bringup. |
521 | */ |
522 | static gate_desc bringup_idt_table[NUM_EXCEPTION_VECTORS] __page_aligned_data; |
523 | |
524 | /* This may run while still in the direct mapping */ |
525 | static void __head startup_64_load_idt(void *vc_handler) |
526 | { |
527 | struct desc_ptr desc = { |
528 | .address = (unsigned long)&RIP_REL_REF(bringup_idt_table), |
529 | .size = sizeof(bringup_idt_table) - 1, |
530 | }; |
531 | struct idt_data data; |
532 | gate_desc idt_desc; |
533 | |
534 | /* @vc_handler is set only for a VMM Communication Exception */ |
535 | if (vc_handler) { |
536 | init_idt_data(data: &data, X86_TRAP_VC, addr: vc_handler); |
537 | idt_init_desc(gate: &idt_desc, d: &data); |
538 | native_write_idt_entry(idt: (gate_desc *)desc.address, X86_TRAP_VC, gate: &idt_desc); |
539 | } |
540 | |
541 | native_load_idt(dtr: &desc); |
542 | } |
543 | |
544 | /* This is used when running on kernel addresses */ |
545 | void early_setup_idt(void) |
546 | { |
547 | void *handler = NULL; |
548 | |
549 | if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT)) { |
550 | setup_ghcb(); |
551 | handler = vc_boot_ghcb; |
552 | } |
553 | |
554 | startup_64_load_idt(vc_handler: handler); |
555 | } |
556 | |
557 | /* |
558 | * Setup boot CPU state needed before kernel switches to virtual addresses. |
559 | */ |
560 | void __head startup_64_setup_gdt_idt(void) |
561 | { |
562 | void *handler = NULL; |
563 | |
564 | struct desc_ptr startup_gdt_descr = { |
565 | .address = (unsigned long)&RIP_REL_REF(init_per_cpu_var(gdt_page.gdt)), |
566 | .size = GDT_SIZE - 1, |
567 | }; |
568 | |
569 | /* Load GDT */ |
570 | native_load_gdt(dtr: &startup_gdt_descr); |
571 | |
572 | /* New GDT is live - reload data segment registers */ |
573 | asm volatile("movl %%eax, %%ds\n" |
574 | "movl %%eax, %%ss\n" |
575 | "movl %%eax, %%es\n" : : "a" (__KERNEL_DS) : "memory" ); |
576 | |
577 | if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT)) |
578 | handler = &RIP_REL_REF(vc_no_ghcb); |
579 | |
580 | startup_64_load_idt(vc_handler: handler); |
581 | } |
582 | |