1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | /* |
3 | * linux/arch/x86/kernel/head_64.S -- start in 32bit and switch to 64bit |
4 | * |
5 | * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE |
6 | * Copyright (C) 2000 Pavel Machek <pavel@suse.cz> |
7 | * Copyright (C) 2000 Karsten Keil <kkeil@suse.de> |
8 | * Copyright (C) 2001,2002 Andi Kleen <ak@suse.de> |
9 | * Copyright (C) 2005 Eric Biederman <ebiederm@xmission.com> |
10 | */ |
11 | |
12 | #include <linux/export.h> |
13 | #include <linux/linkage.h> |
14 | #include <linux/threads.h> |
15 | #include <linux/init.h> |
16 | #include <linux/pgtable.h> |
17 | #include <asm/segment.h> |
18 | #include <asm/page.h> |
19 | #include <asm/msr.h> |
20 | #include <asm/cache.h> |
21 | #include <asm/processor-flags.h> |
22 | #include <asm/percpu.h> |
23 | #include <asm/nops.h> |
24 | #include "../entry/calling.h" |
25 | #include <asm/nospec-branch.h> |
26 | #include <asm/apicdef.h> |
27 | #include <asm/fixmap.h> |
28 | #include <asm/smp.h> |
29 | #include <asm/thread_info.h> |
30 | |
31 | /* |
32 | * We are not able to switch in one step to the final KERNEL ADDRESS SPACE |
33 | * because we need identity-mapped pages. |
34 | */ |
35 | #define l4_index(x) (((x) >> 39) & 511) |
36 | #define pud_index(x) (((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) |
37 | |
38 | L4_PAGE_OFFSET = l4_index(__PAGE_OFFSET_BASE_L4) |
39 | L4_START_KERNEL = l4_index(__START_KERNEL_map) |
40 | |
41 | L3_START_KERNEL = pud_index(__START_KERNEL_map) |
42 | |
43 | __HEAD |
44 | .code64 |
45 | SYM_CODE_START_NOALIGN(startup_64) |
46 | UNWIND_HINT_END_OF_STACK |
47 | /* |
48 | * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0, |
49 | * and someone has loaded an identity mapped page table |
50 | * for us. These identity mapped page tables map all of the |
51 | * kernel pages and possibly all of memory. |
52 | * |
53 | * %RSI holds the physical address of the boot_params structure |
54 | * provided by the bootloader. Preserve it in %R15 so C function calls |
55 | * will not clobber it. |
56 | * |
57 | * We come here either directly from a 64bit bootloader, or from |
58 | * arch/x86/boot/compressed/head_64.S. |
59 | * |
60 | * We only come here initially at boot nothing else comes here. |
61 | * |
62 | * Since we may be loaded at an address different from what we were |
63 | * compiled to run at we first fixup the physical addresses in our page |
64 | * tables and then reload them. |
65 | */ |
66 | mov %rsi, %r15 |
67 | |
68 | /* Set up the stack for verify_cpu() */ |
69 | leaq (__end_init_task - TOP_OF_KERNEL_STACK_PADDING - PTREGS_SIZE)(%rip), %rsp |
70 | |
71 | /* Setup GSBASE to allow stack canary access for C code */ |
72 | movl $MSR_GS_BASE, %ecx |
73 | leaq INIT_PER_CPU_VAR(fixed_percpu_data)(%rip), %rdx |
74 | movl %edx, %eax |
75 | shrq $32, %rdx |
76 | wrmsr |
77 | |
78 | call startup_64_setup_gdt_idt |
79 | |
80 | /* Now switch to __KERNEL_CS so IRET works reliably */ |
81 | pushq $__KERNEL_CS |
82 | leaq .Lon_kernel_cs(%rip), %rax |
83 | pushq %rax |
84 | lretq |
85 | |
86 | .Lon_kernel_cs: |
87 | UNWIND_HINT_END_OF_STACK |
88 | |
89 | #ifdef CONFIG_AMD_MEM_ENCRYPT |
90 | /* |
91 | * Activate SEV/SME memory encryption if supported/enabled. This needs to |
92 | * be done now, since this also includes setup of the SEV-SNP CPUID table, |
93 | * which needs to be done before any CPUID instructions are executed in |
94 | * subsequent code. Pass the boot_params pointer as the first argument. |
95 | */ |
96 | movq %r15, %rdi |
97 | call sme_enable |
98 | #endif |
99 | |
100 | /* Sanitize CPU configuration */ |
101 | call verify_cpu |
102 | |
103 | /* |
104 | * Perform pagetable fixups. Additionally, if SME is active, encrypt |
105 | * the kernel and retrieve the modifier (SME encryption mask if SME |
106 | * is active) to be added to the initial pgdir entry that will be |
107 | * programmed into CR3. |
108 | */ |
109 | leaq _text(%rip), %rdi |
110 | movq %r15, %rsi |
111 | call __startup_64 |
112 | |
113 | /* Form the CR3 value being sure to include the CR3 modifier */ |
114 | leaq early_top_pgt(%rip), %rcx |
115 | addq %rcx, %rax |
116 | |
117 | #ifdef CONFIG_AMD_MEM_ENCRYPT |
118 | mov %rax, %rdi |
119 | |
120 | /* |
121 | * For SEV guests: Verify that the C-bit is correct. A malicious |
122 | * hypervisor could lie about the C-bit position to perform a ROP |
123 | * attack on the guest by writing to the unencrypted stack and wait for |
124 | * the next RET instruction. |
125 | */ |
126 | call sev_verify_cbit |
127 | #endif |
128 | |
129 | /* |
130 | * Switch to early_top_pgt which still has the identity mappings |
131 | * present. |
132 | */ |
133 | movq %rax, %cr3 |
134 | |
135 | /* Branch to the common startup code at its kernel virtual address */ |
136 | ANNOTATE_RETPOLINE_SAFE |
137 | jmp *0f(%rip) |
138 | SYM_CODE_END(startup_64) |
139 | |
140 | __INITRODATA |
141 | 0: .quad common_startup_64 |
142 | |
143 | .text |
144 | SYM_CODE_START(secondary_startup_64) |
145 | UNWIND_HINT_END_OF_STACK |
146 | ANNOTATE_NOENDBR |
147 | /* |
148 | * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0, |
149 | * and someone has loaded a mapped page table. |
150 | * |
151 | * We come here either from startup_64 (using physical addresses) |
152 | * or from trampoline.S (using virtual addresses). |
153 | * |
154 | * Using virtual addresses from trampoline.S removes the need |
155 | * to have any identity mapped pages in the kernel page table |
156 | * after the boot processor executes this code. |
157 | */ |
158 | |
159 | /* Sanitize CPU configuration */ |
160 | call verify_cpu |
161 | |
162 | /* |
163 | * The secondary_startup_64_no_verify entry point is only used by |
164 | * SEV-ES guests. In those guests the call to verify_cpu() would cause |
165 | * #VC exceptions which can not be handled at this stage of secondary |
166 | * CPU bringup. |
167 | * |
168 | * All non SEV-ES systems, especially Intel systems, need to execute |
169 | * verify_cpu() above to make sure NX is enabled. |
170 | */ |
171 | SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL) |
172 | UNWIND_HINT_END_OF_STACK |
173 | ANNOTATE_NOENDBR |
174 | |
175 | /* Clear %R15 which holds the boot_params pointer on the boot CPU */ |
176 | xorl %r15d, %r15d |
177 | |
178 | /* Derive the runtime physical address of init_top_pgt[] */ |
179 | movq phys_base(%rip), %rax |
180 | addq $(init_top_pgt - __START_KERNEL_map), %rax |
181 | |
182 | /* |
183 | * Retrieve the modifier (SME encryption mask if SME is active) to be |
184 | * added to the initial pgdir entry that will be programmed into CR3. |
185 | */ |
186 | #ifdef CONFIG_AMD_MEM_ENCRYPT |
187 | addq sme_me_mask(%rip), %rax |
188 | #endif |
189 | /* |
190 | * Switch to the init_top_pgt here, away from the trampoline_pgd and |
191 | * unmap the identity mapped ranges. |
192 | */ |
193 | movq %rax, %cr3 |
194 | |
195 | SYM_INNER_LABEL(common_startup_64, SYM_L_LOCAL) |
196 | UNWIND_HINT_END_OF_STACK |
197 | ANNOTATE_NOENDBR |
198 | |
199 | /* |
200 | * Create a mask of CR4 bits to preserve. Omit PGE in order to flush |
201 | * global 1:1 translations from the TLBs. |
202 | * |
203 | * From the SDM: |
204 | * "If CR4.PGE is changing from 0 to 1, there were no global TLB |
205 | * entries before the execution; if CR4.PGE is changing from 1 to 0, |
206 | * there will be no global TLB entries after the execution." |
207 | */ |
208 | movl $(X86_CR4_PAE | X86_CR4_LA57), %edx |
209 | #ifdef CONFIG_X86_MCE |
210 | /* |
211 | * Preserve CR4.MCE if the kernel will enable #MC support. |
212 | * Clearing MCE may fault in some environments (that also force #MC |
213 | * support). Any machine check that occurs before #MC support is fully |
214 | * configured will crash the system regardless of the CR4.MCE value set |
215 | * here. |
216 | */ |
217 | orl $X86_CR4_MCE, %edx |
218 | #endif |
219 | movq %cr4, %rcx |
220 | andl %edx, %ecx |
221 | |
222 | /* Even if ignored in long mode, set PSE uniformly on all logical CPUs. */ |
223 | btsl $X86_CR4_PSE_BIT, %ecx |
224 | movq %rcx, %cr4 |
225 | |
226 | /* |
227 | * Set CR4.PGE to re-enable global translations. |
228 | */ |
229 | btsl $X86_CR4_PGE_BIT, %ecx |
230 | movq %rcx, %cr4 |
231 | |
232 | #ifdef CONFIG_SMP |
233 | /* |
234 | * For parallel boot, the APIC ID is read from the APIC, and then |
235 | * used to look up the CPU number. For booting a single CPU, the |
236 | * CPU number is encoded in smpboot_control. |
237 | * |
238 | * Bit 31 STARTUP_READ_APICID (Read APICID from APIC) |
239 | * Bit 0-23 CPU# if STARTUP_xx flags are not set |
240 | */ |
241 | movl smpboot_control(%rip), %ecx |
242 | testl $STARTUP_READ_APICID, %ecx |
243 | jnz .Lread_apicid |
244 | /* |
245 | * No control bit set, single CPU bringup. CPU number is provided |
246 | * in bit 0-23. This is also the boot CPU case (CPU number 0). |
247 | */ |
248 | andl $(~STARTUP_PARALLEL_MASK), %ecx |
249 | jmp .Lsetup_cpu |
250 | |
251 | .Lread_apicid: |
252 | /* Check whether X2APIC mode is already enabled */ |
253 | mov $MSR_IA32_APICBASE, %ecx |
254 | rdmsr |
255 | testl $X2APIC_ENABLE, %eax |
256 | jnz .Lread_apicid_msr |
257 | |
258 | #ifdef CONFIG_X86_X2APIC |
259 | /* |
260 | * If system is in X2APIC mode then MMIO base might not be |
261 | * mapped causing the MMIO read below to fault. Faults can't |
262 | * be handled at that point. |
263 | */ |
264 | cmpl $0, x2apic_mode(%rip) |
265 | jz .Lread_apicid_mmio |
266 | |
267 | /* Force the AP into X2APIC mode. */ |
268 | orl $X2APIC_ENABLE, %eax |
269 | wrmsr |
270 | jmp .Lread_apicid_msr |
271 | #endif |
272 | |
273 | .Lread_apicid_mmio: |
274 | /* Read the APIC ID from the fix-mapped MMIO space. */ |
275 | movq apic_mmio_base(%rip), %rcx |
276 | addq $APIC_ID, %rcx |
277 | movl (%rcx), %eax |
278 | shr $24, %eax |
279 | jmp .Llookup_AP |
280 | |
281 | .Lread_apicid_msr: |
282 | mov $APIC_X2APIC_ID_MSR, %ecx |
283 | rdmsr |
284 | |
285 | .Llookup_AP: |
286 | /* EAX contains the APIC ID of the current CPU */ |
287 | xorl %ecx, %ecx |
288 | leaq cpuid_to_apicid(%rip), %rbx |
289 | |
290 | .Lfind_cpunr: |
291 | cmpl (%rbx,%rcx,4), %eax |
292 | jz .Lsetup_cpu |
293 | inc %ecx |
294 | #ifdef CONFIG_FORCE_NR_CPUS |
295 | cmpl $NR_CPUS, %ecx |
296 | #else |
297 | cmpl nr_cpu_ids(%rip), %ecx |
298 | #endif |
299 | jb .Lfind_cpunr |
300 | |
301 | /* APIC ID not found in the table. Drop the trampoline lock and bail. */ |
302 | movq trampoline_lock(%rip), %rax |
303 | movl $0, (%rax) |
304 | |
305 | 1: cli |
306 | hlt |
307 | jmp 1b |
308 | |
309 | .Lsetup_cpu: |
310 | /* Get the per cpu offset for the given CPU# which is in ECX */ |
311 | movq __per_cpu_offset(,%rcx,8), %rdx |
312 | #else |
313 | xorl %edx, %edx /* zero-extended to clear all of RDX */ |
314 | #endif /* CONFIG_SMP */ |
315 | |
316 | /* |
317 | * Setup a boot time stack - Any secondary CPU will have lost its stack |
318 | * by now because the cr3-switch above unmaps the real-mode stack. |
319 | * |
320 | * RDX contains the per-cpu offset |
321 | */ |
322 | movq pcpu_hot + X86_current_task(%rdx), %rax |
323 | movq TASK_threadsp(%rax), %rsp |
324 | |
325 | /* |
326 | * Now that this CPU is running on its own stack, drop the realmode |
327 | * protection. For the boot CPU the pointer is NULL! |
328 | */ |
329 | movq trampoline_lock(%rip), %rax |
330 | testq %rax, %rax |
331 | jz .Lsetup_gdt |
332 | movl $0, (%rax) |
333 | |
334 | .Lsetup_gdt: |
335 | /* |
336 | * We must switch to a new descriptor in kernel space for the GDT |
337 | * because soon the kernel won't have access anymore to the userspace |
338 | * addresses where we're currently running on. We have to do that here |
339 | * because in 32bit we couldn't load a 64bit linear address. |
340 | */ |
341 | subq $16, %rsp |
342 | movw $(GDT_SIZE-1), (%rsp) |
343 | leaq gdt_page(%rdx), %rax |
344 | movq %rax, 2(%rsp) |
345 | lgdt (%rsp) |
346 | addq $16, %rsp |
347 | |
348 | /* set up data segments */ |
349 | xorl %eax,%eax |
350 | movl %eax,%ds |
351 | movl %eax,%ss |
352 | movl %eax,%es |
353 | |
354 | /* |
355 | * We don't really need to load %fs or %gs, but load them anyway |
356 | * to kill any stale realmode selectors. This allows execution |
357 | * under VT hardware. |
358 | */ |
359 | movl %eax,%fs |
360 | movl %eax,%gs |
361 | |
362 | /* Set up %gs. |
363 | * |
364 | * The base of %gs always points to fixed_percpu_data. If the |
365 | * stack protector canary is enabled, it is located at %gs:40. |
366 | * Note that, on SMP, the boot cpu uses init data section until |
367 | * the per cpu areas are set up. |
368 | */ |
369 | movl $MSR_GS_BASE,%ecx |
370 | #ifndef CONFIG_SMP |
371 | leaq INIT_PER_CPU_VAR(fixed_percpu_data)(%rip), %rdx |
372 | #endif |
373 | movl %edx, %eax |
374 | shrq $32, %rdx |
375 | wrmsr |
376 | |
377 | /* Setup and Load IDT */ |
378 | call early_setup_idt |
379 | |
380 | /* Check if nx is implemented */ |
381 | movl $0x80000001, %eax |
382 | cpuid |
383 | movl %edx,%edi |
384 | |
385 | /* Setup EFER (Extended Feature Enable Register) */ |
386 | movl $MSR_EFER, %ecx |
387 | rdmsr |
388 | /* |
389 | * Preserve current value of EFER for comparison and to skip |
390 | * EFER writes if no change was made (for TDX guest) |
391 | */ |
392 | movl %eax, %edx |
393 | btsl $_EFER_SCE, %eax /* Enable System Call */ |
394 | btl $20,%edi /* No Execute supported? */ |
395 | jnc 1f |
396 | btsl $_EFER_NX, %eax |
397 | btsq $_PAGE_BIT_NX,early_pmd_flags(%rip) |
398 | |
399 | /* Avoid writing EFER if no change was made (for TDX guest) */ |
400 | 1: cmpl %edx, %eax |
401 | je 1f |
402 | xor %edx, %edx |
403 | wrmsr /* Make changes effective */ |
404 | 1: |
405 | /* Setup cr0 */ |
406 | movl $CR0_STATE, %eax |
407 | /* Make changes effective */ |
408 | movq %rax, %cr0 |
409 | |
410 | /* zero EFLAGS after setting rsp */ |
411 | pushq $0 |
412 | popfq |
413 | |
414 | /* Pass the boot_params pointer as first argument */ |
415 | movq %r15, %rdi |
416 | |
417 | .Ljump_to_C_code: |
418 | xorl %ebp, %ebp # clear frame pointer |
419 | ANNOTATE_RETPOLINE_SAFE |
420 | callq *initial_code(%rip) |
421 | ud2 |
422 | SYM_CODE_END(secondary_startup_64) |
423 | |
424 | #include "verify_cpu.S" |
425 | #include "sev_verify_cbit.S" |
426 | |
427 | #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_AMD_MEM_ENCRYPT) |
428 | /* |
429 | * Entry point for soft restart of a CPU. Invoked from xxx_play_dead() for |
430 | * restarting the boot CPU or for restarting SEV guest CPUs after CPU hot |
431 | * unplug. Everything is set up already except the stack. |
432 | */ |
433 | SYM_CODE_START(soft_restart_cpu) |
434 | ANNOTATE_NOENDBR |
435 | UNWIND_HINT_END_OF_STACK |
436 | |
437 | /* Find the idle task stack */ |
438 | movq PER_CPU_VAR(pcpu_hot + X86_current_task), %rcx |
439 | movq TASK_threadsp(%rcx), %rsp |
440 | |
441 | jmp .Ljump_to_C_code |
442 | SYM_CODE_END(soft_restart_cpu) |
443 | #endif |
444 | |
445 | #ifdef CONFIG_AMD_MEM_ENCRYPT |
446 | /* |
447 | * VC Exception handler used during early boot when running on kernel |
448 | * addresses, but before the switch to the idt_table can be made. |
449 | * The early_idt_handler_array can't be used here because it calls into a lot |
450 | * of __init code and this handler is also used during CPU offlining/onlining. |
451 | * Therefore this handler ends up in the .text section so that it stays around |
452 | * when .init.text is freed. |
453 | */ |
454 | SYM_CODE_START_NOALIGN(vc_boot_ghcb) |
455 | UNWIND_HINT_IRET_REGS offset=8 |
456 | ENDBR |
457 | |
458 | /* Build pt_regs */ |
459 | PUSH_AND_CLEAR_REGS |
460 | |
461 | /* Call C handler */ |
462 | movq %rsp, %rdi |
463 | movq ORIG_RAX(%rsp), %rsi |
464 | movq initial_vc_handler(%rip), %rax |
465 | ANNOTATE_RETPOLINE_SAFE |
466 | call *%rax |
467 | |
468 | /* Unwind pt_regs */ |
469 | POP_REGS |
470 | |
471 | /* Remove Error Code */ |
472 | addq $8, %rsp |
473 | |
474 | iretq |
475 | SYM_CODE_END(vc_boot_ghcb) |
476 | #endif |
477 | |
478 | /* Both SMP bootup and ACPI suspend change these variables */ |
479 | __REFDATA |
480 | .balign 8 |
481 | SYM_DATA(initial_code, .quad x86_64_start_kernel) |
482 | #ifdef CONFIG_AMD_MEM_ENCRYPT |
483 | SYM_DATA(initial_vc_handler, .quad handle_vc_boot_ghcb) |
484 | #endif |
485 | |
486 | SYM_DATA(trampoline_lock, .quad 0); |
487 | __FINITDATA |
488 | |
489 | __INIT |
490 | SYM_CODE_START(early_idt_handler_array) |
491 | i = 0 |
492 | .rept NUM_EXCEPTION_VECTORS |
493 | .if ((EXCEPTION_ERRCODE_MASK >> i) & 1) == 0 |
494 | UNWIND_HINT_IRET_REGS |
495 | ENDBR |
496 | pushq $0 # Dummy error code, to make stack frame uniform |
497 | .else |
498 | UNWIND_HINT_IRET_REGS offset=8 |
499 | ENDBR |
500 | .endif |
501 | pushq $i # 72(%rsp) Vector number |
502 | jmp early_idt_handler_common |
503 | UNWIND_HINT_IRET_REGS |
504 | i = i + 1 |
505 | .fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc |
506 | .endr |
507 | SYM_CODE_END(early_idt_handler_array) |
508 | ANNOTATE_NOENDBR // early_idt_handler_array[NUM_EXCEPTION_VECTORS] |
509 | |
510 | SYM_CODE_START_LOCAL(early_idt_handler_common) |
511 | UNWIND_HINT_IRET_REGS offset=16 |
512 | /* |
513 | * The stack is the hardware frame, an error code or zero, and the |
514 | * vector number. |
515 | */ |
516 | cld |
517 | |
518 | incl early_recursion_flag(%rip) |
519 | |
520 | /* The vector number is currently in the pt_regs->di slot. */ |
521 | pushq %rsi /* pt_regs->si */ |
522 | movq 8(%rsp), %rsi /* RSI = vector number */ |
523 | movq %rdi, 8(%rsp) /* pt_regs->di = RDI */ |
524 | pushq %rdx /* pt_regs->dx */ |
525 | pushq %rcx /* pt_regs->cx */ |
526 | pushq %rax /* pt_regs->ax */ |
527 | pushq %r8 /* pt_regs->r8 */ |
528 | pushq %r9 /* pt_regs->r9 */ |
529 | pushq %r10 /* pt_regs->r10 */ |
530 | pushq %r11 /* pt_regs->r11 */ |
531 | pushq %rbx /* pt_regs->bx */ |
532 | pushq %rbp /* pt_regs->bp */ |
533 | pushq %r12 /* pt_regs->r12 */ |
534 | pushq %r13 /* pt_regs->r13 */ |
535 | pushq %r14 /* pt_regs->r14 */ |
536 | pushq %r15 /* pt_regs->r15 */ |
537 | UNWIND_HINT_REGS |
538 | |
539 | movq %rsp,%rdi /* RDI = pt_regs; RSI is already trapnr */ |
540 | call do_early_exception |
541 | |
542 | decl early_recursion_flag(%rip) |
543 | jmp restore_regs_and_return_to_kernel |
544 | SYM_CODE_END(early_idt_handler_common) |
545 | |
546 | #ifdef CONFIG_AMD_MEM_ENCRYPT |
547 | /* |
548 | * VC Exception handler used during very early boot. The |
549 | * early_idt_handler_array can't be used because it returns via the |
550 | * paravirtualized INTERRUPT_RETURN and pv-ops don't work that early. |
551 | * |
552 | * XXX it does, fix this. |
553 | * |
554 | * This handler will end up in the .init.text section and not be |
555 | * available to boot secondary CPUs. |
556 | */ |
557 | SYM_CODE_START_NOALIGN(vc_no_ghcb) |
558 | UNWIND_HINT_IRET_REGS offset=8 |
559 | ENDBR |
560 | |
561 | /* Build pt_regs */ |
562 | PUSH_AND_CLEAR_REGS |
563 | |
564 | /* Call C handler */ |
565 | movq %rsp, %rdi |
566 | movq ORIG_RAX(%rsp), %rsi |
567 | call do_vc_no_ghcb |
568 | |
569 | /* Unwind pt_regs */ |
570 | POP_REGS |
571 | |
572 | /* Remove Error Code */ |
573 | addq $8, %rsp |
574 | |
575 | /* Pure iret required here - don't use INTERRUPT_RETURN */ |
576 | iretq |
577 | SYM_CODE_END(vc_no_ghcb) |
578 | #endif |
579 | |
580 | #define SYM_DATA_START_PAGE_ALIGNED(name) \ |
581 | SYM_START(name, SYM_L_GLOBAL, .balign PAGE_SIZE) |
582 | |
583 | #ifdef CONFIG_MITIGATION_PAGE_TABLE_ISOLATION |
584 | /* |
585 | * Each PGD needs to be 8k long and 8k aligned. We do not |
586 | * ever go out to userspace with these, so we do not |
587 | * strictly *need* the second page, but this allows us to |
588 | * have a single set_pgd() implementation that does not |
589 | * need to worry about whether it has 4k or 8k to work |
590 | * with. |
591 | * |
592 | * This ensures PGDs are 8k long: |
593 | */ |
594 | #define PTI_USER_PGD_FILL 512 |
595 | /* This ensures they are 8k-aligned: */ |
596 | #define SYM_DATA_START_PTI_ALIGNED(name) \ |
597 | SYM_START(name, SYM_L_GLOBAL, .balign 2 * PAGE_SIZE) |
598 | #else |
599 | #define SYM_DATA_START_PTI_ALIGNED(name) \ |
600 | SYM_DATA_START_PAGE_ALIGNED(name) |
601 | #define PTI_USER_PGD_FILL 0 |
602 | #endif |
603 | |
604 | /* Automate the creation of 1 to 1 mapping pmd entries */ |
605 | #define PMDS(START, PERM, COUNT) \ |
606 | i = 0 ; \ |
607 | .rept (COUNT) ; \ |
608 | .quad (START) + (i << PMD_SHIFT) + (PERM) ; \ |
609 | i = i + 1 ; \ |
610 | .endr |
611 | |
612 | __INITDATA |
613 | .balign 4 |
614 | |
615 | SYM_DATA_START_PTI_ALIGNED(early_top_pgt) |
616 | .fill 511,8,0 |
617 | .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC |
618 | .fill PTI_USER_PGD_FILL,8,0 |
619 | SYM_DATA_END(early_top_pgt) |
620 | |
621 | SYM_DATA_START_PAGE_ALIGNED(early_dynamic_pgts) |
622 | .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0 |
623 | SYM_DATA_END(early_dynamic_pgts) |
624 | |
625 | SYM_DATA(early_recursion_flag, .long 0) |
626 | |
627 | .data |
628 | |
629 | #if defined(CONFIG_XEN_PV) || defined(CONFIG_PVH) |
630 | SYM_DATA_START_PTI_ALIGNED(init_top_pgt) |
631 | .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC |
632 | .org init_top_pgt + L4_PAGE_OFFSET*8, 0 |
633 | .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC |
634 | .org init_top_pgt + L4_START_KERNEL*8, 0 |
635 | /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */ |
636 | .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC |
637 | .fill PTI_USER_PGD_FILL,8,0 |
638 | SYM_DATA_END(init_top_pgt) |
639 | |
640 | SYM_DATA_START_PAGE_ALIGNED(level3_ident_pgt) |
641 | .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC |
642 | .fill 511, 8, 0 |
643 | SYM_DATA_END(level3_ident_pgt) |
644 | SYM_DATA_START_PAGE_ALIGNED(level2_ident_pgt) |
645 | /* |
646 | * Since I easily can, map the first 1G. |
647 | * Don't set NX because code runs from these pages. |
648 | * |
649 | * Note: This sets _PAGE_GLOBAL despite whether |
650 | * the CPU supports it or it is enabled. But, |
651 | * the CPU should ignore the bit. |
652 | */ |
653 | PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD) |
654 | SYM_DATA_END(level2_ident_pgt) |
655 | #else |
656 | SYM_DATA_START_PTI_ALIGNED(init_top_pgt) |
657 | .fill 512,8,0 |
658 | .fill PTI_USER_PGD_FILL,8,0 |
659 | SYM_DATA_END(init_top_pgt) |
660 | #endif |
661 | |
662 | #ifdef CONFIG_X86_5LEVEL |
663 | SYM_DATA_START_PAGE_ALIGNED(level4_kernel_pgt) |
664 | .fill 511,8,0 |
665 | .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC |
666 | SYM_DATA_END(level4_kernel_pgt) |
667 | #endif |
668 | |
669 | SYM_DATA_START_PAGE_ALIGNED(level3_kernel_pgt) |
670 | .fill L3_START_KERNEL,8,0 |
671 | /* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */ |
672 | .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC |
673 | .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC |
674 | SYM_DATA_END(level3_kernel_pgt) |
675 | |
676 | SYM_DATA_START_PAGE_ALIGNED(level2_kernel_pgt) |
677 | /* |
678 | * Kernel high mapping. |
679 | * |
680 | * The kernel code+data+bss must be located below KERNEL_IMAGE_SIZE in |
681 | * virtual address space, which is 1 GiB if RANDOMIZE_BASE is enabled, |
682 | * 512 MiB otherwise. |
683 | * |
684 | * (NOTE: after that starts the module area, see MODULES_VADDR.) |
685 | * |
686 | * This table is eventually used by the kernel during normal runtime. |
687 | * Care must be taken to clear out undesired bits later, like _PAGE_RW |
688 | * or _PAGE_GLOBAL in some cases. |
689 | */ |
690 | PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE) |
691 | SYM_DATA_END(level2_kernel_pgt) |
692 | |
693 | SYM_DATA_START_PAGE_ALIGNED(level2_fixmap_pgt) |
694 | .fill (512 - 4 - FIXMAP_PMD_NUM),8,0 |
695 | pgtno = 0 |
696 | .rept (FIXMAP_PMD_NUM) |
697 | .quad level1_fixmap_pgt + (pgtno << PAGE_SHIFT) - __START_KERNEL_map \ |
698 | + _PAGE_TABLE_NOENC; |
699 | pgtno = pgtno + 1 |
700 | .endr |
701 | /* 6 MB reserved space + a 2MB hole */ |
702 | .fill 4,8,0 |
703 | SYM_DATA_END(level2_fixmap_pgt) |
704 | |
705 | SYM_DATA_START_PAGE_ALIGNED(level1_fixmap_pgt) |
706 | .rept (FIXMAP_PMD_NUM) |
707 | .fill 512,8,0 |
708 | .endr |
709 | SYM_DATA_END(level1_fixmap_pgt) |
710 | |
711 | #undef PMDS |
712 | |
713 | .data |
714 | .align 16 |
715 | |
716 | SYM_DATA(smpboot_control, .long 0) |
717 | |
718 | .align 16 |
719 | /* This must match the first entry in level2_kernel_pgt */ |
720 | SYM_DATA(phys_base, .quad 0x0) |
721 | EXPORT_SYMBOL(phys_base) |
722 | |
723 | #include "../../x86/xen/xen-head.S" |
724 | |
725 | __PAGE_ALIGNED_BSS |
726 | SYM_DATA_START_PAGE_ALIGNED(empty_zero_page) |
727 | .skip PAGE_SIZE |
728 | SYM_DATA_END(empty_zero_page) |
729 | EXPORT_SYMBOL(empty_zero_page) |
730 | |
731 | |