1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | /* |
3 | * linux/boot/head.S |
4 | * |
5 | * Copyright (C) 1991, 1992, 1993 Linus Torvalds |
6 | */ |
7 | |
8 | /* |
9 | * head.S contains the 32-bit startup code. |
10 | * |
11 | * NOTE!!! Startup happens at absolute address 0x00001000, which is also where |
12 | * the page directory will exist. The startup code will be overwritten by |
13 | * the page directory. [According to comments etc elsewhere on a compressed |
14 | * kernel it will end up at 0x1000 + 1Mb I hope so as I assume this. - AC] |
15 | * |
16 | * Page 0 is deliberately kept safe, since System Management Mode code in |
17 | * laptops may need to access the BIOS data stored there. This is also |
18 | * useful for future device drivers that either access the BIOS via VM86 |
19 | * mode. |
20 | */ |
21 | |
22 | /* |
23 | * High loaded stuff by Hans Lermen & Werner Almesberger, Feb. 1996 |
24 | */ |
25 | .code32 |
26 | .text |
27 | |
28 | #include <linux/init.h> |
29 | #include <linux/linkage.h> |
30 | #include <asm/segment.h> |
31 | #include <asm/boot.h> |
32 | #include <asm/msr.h> |
33 | #include <asm/processor-flags.h> |
34 | #include <asm/asm-offsets.h> |
35 | #include <asm/bootparam.h> |
36 | #include <asm/desc_defs.h> |
37 | #include <asm/trapnr.h> |
38 | #include "pgtable.h" |
39 | |
40 | /* |
41 | * Fix alignment at 16 bytes. Following CONFIG_FUNCTION_ALIGNMENT will result |
42 | * in assembly errors due to trying to move .org backward due to the excessive |
43 | * alignment. |
44 | */ |
45 | #undef __ALIGN |
46 | #define __ALIGN .balign 16, 0x90 |
47 | |
48 | /* |
49 | * Locally defined symbols should be marked hidden: |
50 | */ |
51 | .hidden _bss |
52 | .hidden _ebss |
53 | .hidden _end |
54 | |
55 | __HEAD |
56 | |
57 | /* |
58 | * This macro gives the relative virtual address of X, i.e. the offset of X |
59 | * from startup_32. This is the same as the link-time virtual address of X, |
60 | * since startup_32 is at 0, but defining it this way tells the |
61 | * assembler/linker that we do not want the actual run-time address of X. This |
62 | * prevents the linker from trying to create unwanted run-time relocation |
63 | * entries for the reference when the compressed kernel is linked as PIE. |
64 | * |
65 | * A reference X(%reg) will result in the link-time VA of X being stored with |
66 | * the instruction, and a run-time R_X86_64_RELATIVE relocation entry that |
67 | * adds the 64-bit base address where the kernel is loaded. |
68 | * |
69 | * Replacing it with (X-startup_32)(%reg) results in the offset being stored, |
70 | * and no run-time relocation. |
71 | * |
72 | * The macro should be used as a displacement with a base register containing |
73 | * the run-time address of startup_32 [i.e. rva(X)(%reg)], or as an immediate |
74 | * [$ rva(X)]. |
75 | * |
76 | * This macro can only be used from within the .head.text section, since the |
77 | * expression requires startup_32 to be in the same section as the code being |
78 | * assembled. |
79 | */ |
80 | #define rva(X) ((X) - startup_32) |
81 | |
82 | .code32 |
83 | SYM_FUNC_START(startup_32) |
84 | /* |
85 | * 32bit entry is 0 and it is ABI so immutable! |
86 | * If we come here directly from a bootloader, |
87 | * kernel(text+data+bss+brk) ramdisk, zero_page, command line |
88 | * all need to be under the 4G limit. |
89 | */ |
90 | cld |
91 | cli |
92 | |
93 | /* |
94 | * Calculate the delta between where we were compiled to run |
95 | * at and where we were actually loaded at. This can only be done |
96 | * with a short local call on x86. Nothing else will tell us what |
97 | * address we are running at. The reserved chunk of the real-mode |
98 | * data at 0x1e4 (defined as a scratch field) are used as the stack |
99 | * for this calculation. Only 4 bytes are needed. |
100 | */ |
101 | leal (BP_scratch+4)(%esi), %esp |
102 | call 1f |
103 | 1: popl %ebp |
104 | subl $ rva(1b), %ebp |
105 | |
106 | /* Load new GDT with the 64bit segments using 32bit descriptor */ |
107 | leal rva(gdt)(%ebp), %eax |
108 | movl %eax, 2(%eax) |
109 | lgdt (%eax) |
110 | |
111 | /* Load segment registers with our descriptors */ |
112 | movl $__BOOT_DS, %eax |
113 | movl %eax, %ds |
114 | movl %eax, %es |
115 | movl %eax, %fs |
116 | movl %eax, %gs |
117 | movl %eax, %ss |
118 | |
119 | /* Setup a stack and load CS from current GDT */ |
120 | leal rva(boot_stack_end)(%ebp), %esp |
121 | |
122 | pushl $__KERNEL32_CS |
123 | leal rva(1f)(%ebp), %eax |
124 | pushl %eax |
125 | lretl |
126 | 1: |
127 | |
128 | /* Setup Exception handling for SEV-ES */ |
129 | #ifdef CONFIG_AMD_MEM_ENCRYPT |
130 | call startup32_load_idt |
131 | #endif |
132 | |
133 | /* Make sure cpu supports long mode. */ |
134 | call verify_cpu |
135 | testl %eax, %eax |
136 | jnz .Lno_longmode |
137 | |
138 | /* |
139 | * Compute the delta between where we were compiled to run at |
140 | * and where the code will actually run at. |
141 | * |
142 | * %ebp contains the address we are loaded at by the boot loader and %ebx |
143 | * contains the address where we should move the kernel image temporarily |
144 | * for safe in-place decompression. |
145 | */ |
146 | |
147 | #ifdef CONFIG_RELOCATABLE |
148 | movl %ebp, %ebx |
149 | movl BP_kernel_alignment(%esi), %eax |
150 | decl %eax |
151 | addl %eax, %ebx |
152 | notl %eax |
153 | andl %eax, %ebx |
154 | cmpl $LOAD_PHYSICAL_ADDR, %ebx |
155 | jae 1f |
156 | #endif |
157 | movl $LOAD_PHYSICAL_ADDR, %ebx |
158 | 1: |
159 | |
160 | /* Target address to relocate to for decompression */ |
161 | addl BP_init_size(%esi), %ebx |
162 | subl $ rva(_end), %ebx |
163 | |
164 | /* |
165 | * Prepare for entering 64 bit mode |
166 | */ |
167 | |
168 | /* Enable PAE mode */ |
169 | movl %cr4, %eax |
170 | orl $X86_CR4_PAE, %eax |
171 | movl %eax, %cr4 |
172 | |
173 | /* |
174 | * Build early 4G boot pagetable |
175 | */ |
176 | /* |
177 | * If SEV is active then set the encryption mask in the page tables. |
178 | * This will ensure that when the kernel is copied and decompressed |
179 | * it will be done so encrypted. |
180 | */ |
181 | xorl %edx, %edx |
182 | #ifdef CONFIG_AMD_MEM_ENCRYPT |
183 | call get_sev_encryption_bit |
184 | xorl %edx, %edx |
185 | testl %eax, %eax |
186 | jz 1f |
187 | subl $32, %eax /* Encryption bit is always above bit 31 */ |
188 | bts %eax, %edx /* Set encryption mask for page tables */ |
189 | /* |
190 | * Set MSR_AMD64_SEV_ENABLED_BIT in sev_status so that |
191 | * startup32_check_sev_cbit() will do a check. sev_enable() will |
192 | * initialize sev_status with all the bits reported by |
193 | * MSR_AMD_SEV_STATUS later, but only MSR_AMD64_SEV_ENABLED_BIT |
194 | * needs to be set for now. |
195 | */ |
196 | movl $1, rva(sev_status)(%ebp) |
197 | 1: |
198 | #endif |
199 | |
200 | /* Initialize Page tables to 0 */ |
201 | leal rva(pgtable)(%ebx), %edi |
202 | xorl %eax, %eax |
203 | movl $(BOOT_INIT_PGT_SIZE/4), %ecx |
204 | rep stosl |
205 | |
206 | /* Build Level 4 */ |
207 | leal rva(pgtable + 0)(%ebx), %edi |
208 | leal 0x1007 (%edi), %eax |
209 | movl %eax, 0(%edi) |
210 | addl %edx, 4(%edi) |
211 | |
212 | /* Build Level 3 */ |
213 | leal rva(pgtable + 0x1000)(%ebx), %edi |
214 | leal 0x1007(%edi), %eax |
215 | movl $4, %ecx |
216 | 1: movl %eax, 0x00(%edi) |
217 | addl %edx, 0x04(%edi) |
218 | addl $0x00001000, %eax |
219 | addl $8, %edi |
220 | decl %ecx |
221 | jnz 1b |
222 | |
223 | /* Build Level 2 */ |
224 | leal rva(pgtable + 0x2000)(%ebx), %edi |
225 | movl $0x00000183, %eax |
226 | movl $2048, %ecx |
227 | 1: movl %eax, 0(%edi) |
228 | addl %edx, 4(%edi) |
229 | addl $0x00200000, %eax |
230 | addl $8, %edi |
231 | decl %ecx |
232 | jnz 1b |
233 | |
234 | /* Enable the boot page tables */ |
235 | leal rva(pgtable)(%ebx), %eax |
236 | movl %eax, %cr3 |
237 | |
238 | /* Enable Long mode in EFER (Extended Feature Enable Register) */ |
239 | movl $MSR_EFER, %ecx |
240 | rdmsr |
241 | btsl $_EFER_LME, %eax |
242 | wrmsr |
243 | |
244 | /* After gdt is loaded */ |
245 | xorl %eax, %eax |
246 | lldt %ax |
247 | movl $__BOOT_TSS, %eax |
248 | ltr %ax |
249 | |
250 | #ifdef CONFIG_AMD_MEM_ENCRYPT |
251 | /* Check if the C-bit position is correct when SEV is active */ |
252 | call startup32_check_sev_cbit |
253 | #endif |
254 | |
255 | /* |
256 | * Setup for the jump to 64bit mode |
257 | * |
258 | * When the jump is performed we will be in long mode but |
259 | * in 32bit compatibility mode with EFER.LME = 1, CS.L = 0, CS.D = 1 |
260 | * (and in turn EFER.LMA = 1). To jump into 64bit mode we use |
261 | * the new gdt/idt that has __KERNEL_CS with CS.L = 1. |
262 | * We place all of the values on our mini stack so lret can |
263 | * used to perform that far jump. |
264 | */ |
265 | leal rva(startup_64)(%ebp), %eax |
266 | #ifdef CONFIG_EFI_MIXED |
267 | cmpb $1, rva(efi_is64)(%ebp) |
268 | je 1f |
269 | leal rva(startup_64_mixed_mode)(%ebp), %eax |
270 | 1: |
271 | #endif |
272 | |
273 | pushl $__KERNEL_CS |
274 | pushl %eax |
275 | |
276 | /* Enter paged protected Mode, activating Long Mode */ |
277 | movl $CR0_STATE, %eax |
278 | movl %eax, %cr0 |
279 | |
280 | /* Jump from 32bit compatibility mode into 64bit mode. */ |
281 | lret |
282 | SYM_FUNC_END(startup_32) |
283 | |
284 | .code64 |
285 | .org 0x200 |
286 | SYM_CODE_START(startup_64) |
287 | /* |
288 | * 64bit entry is 0x200 and it is ABI so immutable! |
289 | * We come here either from startup_32 or directly from a |
290 | * 64bit bootloader. |
291 | * If we come here from a bootloader, kernel(text+data+bss+brk), |
292 | * ramdisk, zero_page, command line could be above 4G. |
293 | * We depend on an identity mapped page table being provided |
294 | * that maps our entire kernel(text+data+bss+brk), zero page |
295 | * and command line. |
296 | */ |
297 | |
298 | cld |
299 | cli |
300 | |
301 | /* Setup data segments. */ |
302 | xorl %eax, %eax |
303 | movl %eax, %ds |
304 | movl %eax, %es |
305 | movl %eax, %ss |
306 | movl %eax, %fs |
307 | movl %eax, %gs |
308 | |
309 | /* |
310 | * Compute the decompressed kernel start address. It is where |
311 | * we were loaded at aligned to a 2M boundary. %rbp contains the |
312 | * decompressed kernel start address. |
313 | * |
314 | * If it is a relocatable kernel then decompress and run the kernel |
315 | * from load address aligned to 2MB addr, otherwise decompress and |
316 | * run the kernel from LOAD_PHYSICAL_ADDR |
317 | * |
318 | * We cannot rely on the calculation done in 32-bit mode, since we |
319 | * may have been invoked via the 64-bit entry point. |
320 | */ |
321 | |
322 | /* Start with the delta to where the kernel will run at. */ |
323 | #ifdef CONFIG_RELOCATABLE |
324 | leaq startup_32(%rip) /* - $startup_32 */, %rbp |
325 | movl BP_kernel_alignment(%rsi), %eax |
326 | decl %eax |
327 | addq %rax, %rbp |
328 | notq %rax |
329 | andq %rax, %rbp |
330 | cmpq $LOAD_PHYSICAL_ADDR, %rbp |
331 | jae 1f |
332 | #endif |
333 | movq $LOAD_PHYSICAL_ADDR, %rbp |
334 | 1: |
335 | |
336 | /* Target address to relocate to for decompression */ |
337 | movl BP_init_size(%rsi), %ebx |
338 | subl $ rva(_end), %ebx |
339 | addq %rbp, %rbx |
340 | |
341 | /* Set up the stack */ |
342 | leaq rva(boot_stack_end)(%rbx), %rsp |
343 | |
344 | /* |
345 | * At this point we are in long mode with 4-level paging enabled, |
346 | * but we might want to enable 5-level paging or vice versa. |
347 | * |
348 | * The problem is that we cannot do it directly. Setting or clearing |
349 | * CR4.LA57 in long mode would trigger #GP. So we need to switch off |
350 | * long mode and paging first. |
351 | * |
352 | * We also need a trampoline in lower memory to switch over from |
353 | * 4- to 5-level paging for cases when the bootloader puts the kernel |
354 | * above 4G, but didn't enable 5-level paging for us. |
355 | * |
356 | * The same trampoline can be used to switch from 5- to 4-level paging |
357 | * mode, like when starting 4-level paging kernel via kexec() when |
358 | * original kernel worked in 5-level paging mode. |
359 | * |
360 | * For the trampoline, we need the top page table to reside in lower |
361 | * memory as we don't have a way to load 64-bit values into CR3 in |
362 | * 32-bit mode. |
363 | */ |
364 | |
365 | /* Make sure we have GDT with 32-bit code segment */ |
366 | leaq gdt64(%rip), %rax |
367 | addq %rax, 2(%rax) |
368 | lgdt (%rax) |
369 | |
370 | /* Reload CS so IRET returns to a CS actually in the GDT */ |
371 | pushq $__KERNEL_CS |
372 | leaq .Lon_kernel_cs(%rip), %rax |
373 | pushq %rax |
374 | lretq |
375 | |
376 | .Lon_kernel_cs: |
377 | /* |
378 | * RSI holds a pointer to a boot_params structure provided by the |
379 | * loader, and this needs to be preserved across C function calls. So |
380 | * move it into a callee saved register. |
381 | */ |
382 | movq %rsi, %r15 |
383 | |
384 | call load_stage1_idt |
385 | |
386 | #ifdef CONFIG_AMD_MEM_ENCRYPT |
387 | /* |
388 | * Now that the stage1 interrupt handlers are set up, #VC exceptions from |
389 | * CPUID instructions can be properly handled for SEV-ES guests. |
390 | * |
391 | * For SEV-SNP, the CPUID table also needs to be set up in advance of any |
392 | * CPUID instructions being issued, so go ahead and do that now via |
393 | * sev_enable(), which will also handle the rest of the SEV-related |
394 | * detection/setup to ensure that has been done in advance of any dependent |
395 | * code. Pass the boot_params pointer as the first argument. |
396 | */ |
397 | movq %r15, %rdi |
398 | call sev_enable |
399 | #endif |
400 | |
401 | /* |
402 | * configure_5level_paging() updates the number of paging levels using |
403 | * a trampoline in 32-bit addressable memory if the current number does |
404 | * not match the desired number. |
405 | * |
406 | * Pass the boot_params pointer as the first argument. The second |
407 | * argument is the relocated address of the page table to use instead |
408 | * of the page table in trampoline memory (if required). |
409 | */ |
410 | movq %r15, %rdi |
411 | leaq rva(top_pgtable)(%rbx), %rsi |
412 | call configure_5level_paging |
413 | |
414 | /* Zero EFLAGS */ |
415 | pushq $0 |
416 | popfq |
417 | |
418 | /* |
419 | * Copy the compressed kernel to the end of our buffer |
420 | * where decompression in place becomes safe. |
421 | */ |
422 | leaq (_bss-8)(%rip), %rsi |
423 | leaq rva(_bss-8)(%rbx), %rdi |
424 | movl $(_bss - startup_32), %ecx |
425 | shrl $3, %ecx |
426 | std |
427 | rep movsq |
428 | cld |
429 | |
430 | /* |
431 | * The GDT may get overwritten either during the copy we just did or |
432 | * during extract_kernel below. To avoid any issues, repoint the GDTR |
433 | * to the new copy of the GDT. |
434 | */ |
435 | leaq rva(gdt64)(%rbx), %rax |
436 | leaq rva(gdt)(%rbx), %rdx |
437 | movq %rdx, 2(%rax) |
438 | lgdt (%rax) |
439 | |
440 | /* |
441 | * Jump to the relocated address. |
442 | */ |
443 | leaq rva(.Lrelocated)(%rbx), %rax |
444 | jmp *%rax |
445 | SYM_CODE_END(startup_64) |
446 | |
447 | .text |
448 | SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated) |
449 | |
450 | /* |
451 | * Clear BSS (stack is currently empty) |
452 | */ |
453 | xorl %eax, %eax |
454 | leaq _bss(%rip), %rdi |
455 | leaq _ebss(%rip), %rcx |
456 | subq %rdi, %rcx |
457 | shrq $3, %rcx |
458 | rep stosq |
459 | |
460 | call load_stage2_idt |
461 | |
462 | /* Pass boot_params to initialize_identity_maps() */ |
463 | movq %r15, %rdi |
464 | call initialize_identity_maps |
465 | |
466 | /* |
467 | * Do the extraction, and jump to the new kernel.. |
468 | */ |
469 | /* pass struct boot_params pointer and output target address */ |
470 | movq %r15, %rdi |
471 | movq %rbp, %rsi |
472 | call extract_kernel /* returns kernel entry point in %rax */ |
473 | |
474 | /* |
475 | * Jump to the decompressed kernel. |
476 | */ |
477 | movq %r15, %rsi |
478 | jmp *%rax |
479 | SYM_FUNC_END(.Lrelocated) |
480 | |
481 | /* |
482 | * This is the 32-bit trampoline that will be copied over to low memory. It |
483 | * will be called using the ordinary 64-bit calling convention from code |
484 | * running in 64-bit mode. |
485 | * |
486 | * Return address is at the top of the stack (might be above 4G). |
487 | * The first argument (EDI) contains the address of the temporary PGD level |
488 | * page table in 32-bit addressable memory which will be programmed into |
489 | * register CR3. |
490 | */ |
491 | .section ".rodata" , "a" , @progbits |
492 | SYM_CODE_START(trampoline_32bit_src) |
493 | /* |
494 | * Preserve callee save 64-bit registers on the stack: this is |
495 | * necessary because the architecture does not guarantee that GPRs will |
496 | * retain their full 64-bit values across a 32-bit mode switch. |
497 | */ |
498 | pushq %r15 |
499 | pushq %r14 |
500 | pushq %r13 |
501 | pushq %r12 |
502 | pushq %rbp |
503 | pushq %rbx |
504 | |
505 | /* Preserve top half of RSP in a legacy mode GPR to avoid truncation */ |
506 | movq %rsp, %rbx |
507 | shrq $32, %rbx |
508 | |
509 | /* Switch to compatibility mode (CS.L = 0 CS.D = 1) via far return */ |
510 | pushq $__KERNEL32_CS |
511 | leaq 0f(%rip), %rax |
512 | pushq %rax |
513 | lretq |
514 | |
515 | /* |
516 | * The 32-bit code below will do a far jump back to long mode and end |
517 | * up here after reconfiguring the number of paging levels. First, the |
518 | * stack pointer needs to be restored to its full 64-bit value before |
519 | * the callee save register contents can be popped from the stack. |
520 | */ |
521 | .Lret: |
522 | shlq $32, %rbx |
523 | orq %rbx, %rsp |
524 | |
525 | /* Restore the preserved 64-bit registers */ |
526 | popq %rbx |
527 | popq %rbp |
528 | popq %r12 |
529 | popq %r13 |
530 | popq %r14 |
531 | popq %r15 |
532 | retq |
533 | |
534 | .code32 |
535 | 0: |
536 | /* Disable paging */ |
537 | movl %cr0, %eax |
538 | btrl $X86_CR0_PG_BIT, %eax |
539 | movl %eax, %cr0 |
540 | |
541 | /* Point CR3 to the trampoline's new top level page table */ |
542 | movl %edi, %cr3 |
543 | |
544 | /* Set EFER.LME=1 as a precaution in case hypervsior pulls the rug */ |
545 | movl $MSR_EFER, %ecx |
546 | rdmsr |
547 | btsl $_EFER_LME, %eax |
548 | /* Avoid writing EFER if no change was made (for TDX guest) */ |
549 | jc 1f |
550 | wrmsr |
551 | 1: |
552 | /* Toggle CR4.LA57 */ |
553 | movl %cr4, %eax |
554 | btcl $X86_CR4_LA57_BIT, %eax |
555 | movl %eax, %cr4 |
556 | |
557 | /* Enable paging again. */ |
558 | movl %cr0, %eax |
559 | btsl $X86_CR0_PG_BIT, %eax |
560 | movl %eax, %cr0 |
561 | |
562 | /* |
563 | * Return to the 64-bit calling code using LJMP rather than LRET, to |
564 | * avoid the need for a 32-bit addressable stack. The destination |
565 | * address will be adjusted after the template code is copied into a |
566 | * 32-bit addressable buffer. |
567 | */ |
568 | .Ljmp: ljmpl $__KERNEL_CS, $(.Lret - trampoline_32bit_src) |
569 | SYM_CODE_END(trampoline_32bit_src) |
570 | |
571 | /* |
572 | * This symbol is placed right after trampoline_32bit_src() so its address can |
573 | * be used to infer the size of the trampoline code. |
574 | */ |
575 | SYM_DATA(trampoline_ljmp_imm_offset, .word .Ljmp + 1 - trampoline_32bit_src) |
576 | |
577 | /* |
578 | * The trampoline code has a size limit. |
579 | * Make sure we fail to compile if the trampoline code grows |
580 | * beyond TRAMPOLINE_32BIT_CODE_SIZE bytes. |
581 | */ |
582 | .org trampoline_32bit_src + TRAMPOLINE_32BIT_CODE_SIZE |
583 | |
584 | .text |
585 | SYM_FUNC_START_LOCAL_NOALIGN(.Lno_longmode) |
586 | /* This isn't an x86-64 CPU, so hang intentionally, we cannot continue */ |
587 | 1: |
588 | hlt |
589 | jmp 1b |
590 | SYM_FUNC_END(.Lno_longmode) |
591 | |
592 | .globl verify_cpu |
593 | #include "../../kernel/verify_cpu.S" |
594 | |
595 | .data |
596 | SYM_DATA_START_LOCAL(gdt64) |
597 | .word gdt_end - gdt - 1 |
598 | .quad gdt - gdt64 |
599 | SYM_DATA_END(gdt64) |
600 | .balign 8 |
601 | SYM_DATA_START_LOCAL(gdt) |
602 | .word gdt_end - gdt - 1 |
603 | .long 0 |
604 | .word 0 |
605 | .quad 0x00cf9a000000ffff /* __KERNEL32_CS */ |
606 | .quad 0x00af9a000000ffff /* __KERNEL_CS */ |
607 | .quad 0x00cf92000000ffff /* __KERNEL_DS */ |
608 | .quad 0x0080890000000000 /* TS descriptor */ |
609 | .quad 0x0000000000000000 /* TS continued */ |
610 | SYM_DATA_END_LABEL(gdt, SYM_L_LOCAL, gdt_end) |
611 | |
612 | SYM_DATA_START(boot_idt_desc) |
613 | .word boot_idt_end - boot_idt - 1 |
614 | .quad 0 |
615 | SYM_DATA_END(boot_idt_desc) |
616 | .balign 8 |
617 | SYM_DATA_START(boot_idt) |
618 | .rept BOOT_IDT_ENTRIES |
619 | .quad 0 |
620 | .quad 0 |
621 | .endr |
622 | SYM_DATA_END_LABEL(boot_idt, SYM_L_GLOBAL, boot_idt_end) |
623 | |
624 | /* |
625 | * Stack and heap for uncompression |
626 | */ |
627 | .bss |
628 | .balign 4 |
629 | SYM_DATA_START_LOCAL(boot_stack) |
630 | .fill BOOT_STACK_SIZE, 1, 0 |
631 | .balign 16 |
632 | SYM_DATA_END_LABEL(boot_stack, SYM_L_LOCAL, boot_stack_end) |
633 | |
634 | /* |
635 | * Space for page tables (not in .bss so not zeroed) |
636 | */ |
637 | .section ".pgtable" ,"aw" ,@nobits |
638 | .balign 4096 |
639 | SYM_DATA_LOCAL(pgtable, .fill BOOT_PGT_SIZE, 1, 0) |
640 | |
641 | /* |
642 | * The page table is going to be used instead of page table in the trampoline |
643 | * memory. |
644 | */ |
645 | SYM_DATA_LOCAL(top_pgtable, .fill PAGE_SIZE, 1, 0) |
646 | |