1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | /* |
3 | * ld script for the x86 kernel |
4 | * |
5 | * Historic 32-bit version written by Martin Mares <mj@atrey.karlin.mff.cuni.cz> |
6 | * |
7 | * Modernisation, unification and other changes and fixes: |
8 | * Copyright (C) 2007-2009 Sam Ravnborg <sam@ravnborg.org> |
9 | * |
10 | * |
11 | * Don't define absolute symbols until and unless you know that symbol |
12 | * value is should remain constant even if kernel image is relocated |
13 | * at run time. Absolute symbols are not relocated. If symbol value should |
14 | * change if kernel is relocated, make the symbol section relative and |
15 | * put it inside the section definition. |
16 | */ |
17 | |
18 | #ifdef CONFIG_X86_32 |
19 | #define LOAD_OFFSET __PAGE_OFFSET |
20 | #else |
21 | #define LOAD_OFFSET __START_KERNEL_map |
22 | #endif |
23 | |
24 | #define RUNTIME_DISCARD_EXIT |
25 | #define EMITS_PT_NOTE |
26 | #define RO_EXCEPTION_TABLE_ALIGN 16 |
27 | |
28 | #include <asm-generic/vmlinux.lds.h> |
29 | #include <asm/asm-offsets.h> |
30 | #include <asm/thread_info.h> |
31 | #include <asm/page_types.h> |
32 | #include <asm/orc_lookup.h> |
33 | #include <asm/cache.h> |
34 | #include <asm/boot.h> |
35 | |
36 | #undef i386 /* in case the preprocessor is a 32bit one */ |
37 | |
38 | OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT) |
39 | |
40 | #ifdef CONFIG_X86_32 |
41 | OUTPUT_ARCH(i386) |
42 | ENTRY(phys_startup_32) |
43 | #else |
44 | OUTPUT_ARCH(i386:x86-64) |
45 | ENTRY(phys_startup_64) |
46 | #endif |
47 | |
48 | jiffies = jiffies_64; |
49 | const_pcpu_hot = pcpu_hot; |
50 | |
51 | #if defined(CONFIG_X86_64) |
52 | /* |
53 | * On 64-bit, align RODATA to 2MB so we retain large page mappings for |
54 | * boundaries spanning kernel text, rodata and data sections. |
55 | * |
56 | * However, kernel identity mappings will have different RWX permissions |
57 | * to the pages mapping to text and to the pages padding (which are freed) the |
58 | * text section. Hence kernel identity mappings will be broken to smaller |
59 | * pages. For 64-bit, kernel text and kernel identity mappings are different, |
60 | * so we can enable protection checks as well as retain 2MB large page |
61 | * mappings for kernel text. |
62 | */ |
63 | #define X86_ALIGN_RODATA_BEGIN . = ALIGN(HPAGE_SIZE); |
64 | |
65 | #define X86_ALIGN_RODATA_END \ |
66 | . = ALIGN(HPAGE_SIZE); \ |
67 | __end_rodata_hpage_align = .; \ |
68 | __end_rodata_aligned = .; |
69 | |
70 | #define ALIGN_ENTRY_TEXT_BEGIN . = ALIGN(PMD_SIZE); |
71 | #define ALIGN_ENTRY_TEXT_END . = ALIGN(PMD_SIZE); |
72 | |
73 | /* |
74 | * This section contains data which will be mapped as decrypted. Memory |
75 | * encryption operates on a page basis. Make this section PMD-aligned |
76 | * to avoid splitting the pages while mapping the section early. |
77 | * |
78 | * Note: We use a separate section so that only this section gets |
79 | * decrypted to avoid exposing more than we wish. |
80 | */ |
81 | #define BSS_DECRYPTED \ |
82 | . = ALIGN(PMD_SIZE); \ |
83 | __start_bss_decrypted = .; \ |
84 | *(.bss..decrypted); \ |
85 | . = ALIGN(PAGE_SIZE); \ |
86 | __start_bss_decrypted_unused = .; \ |
87 | . = ALIGN(PMD_SIZE); \ |
88 | __end_bss_decrypted = .; \ |
89 | |
90 | #else |
91 | |
92 | #define X86_ALIGN_RODATA_BEGIN |
93 | #define X86_ALIGN_RODATA_END \ |
94 | . = ALIGN(PAGE_SIZE); \ |
95 | __end_rodata_aligned = .; |
96 | |
97 | #define ALIGN_ENTRY_TEXT_BEGIN |
98 | #define ALIGN_ENTRY_TEXT_END |
99 | #define BSS_DECRYPTED |
100 | |
101 | #endif |
102 | |
103 | PHDRS { |
104 | text PT_LOAD FLAGS(5); /* R_E */ |
105 | data PT_LOAD FLAGS(6); /* RW_ */ |
106 | #ifdef CONFIG_X86_64 |
107 | #ifdef CONFIG_SMP |
108 | percpu PT_LOAD FLAGS(6); /* RW_ */ |
109 | #endif |
110 | init PT_LOAD FLAGS(7); /* RWE */ |
111 | #endif |
112 | note PT_NOTE FLAGS(0); /* ___ */ |
113 | } |
114 | |
115 | SECTIONS |
116 | { |
117 | #ifdef CONFIG_X86_32 |
118 | . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR; |
119 | phys_startup_32 = ABSOLUTE(startup_32 - LOAD_OFFSET); |
120 | #else |
121 | . = __START_KERNEL; |
122 | phys_startup_64 = ABSOLUTE(startup_64 - LOAD_OFFSET); |
123 | #endif |
124 | |
125 | /* Text and read-only data */ |
126 | .text : AT(ADDR(.text) - LOAD_OFFSET) { |
127 | _text = .; |
128 | _stext = .; |
129 | /* bootstrapping code */ |
130 | HEAD_TEXT |
131 | TEXT_TEXT |
132 | SCHED_TEXT |
133 | LOCK_TEXT |
134 | KPROBES_TEXT |
135 | SOFTIRQENTRY_TEXT |
136 | #ifdef CONFIG_MITIGATION_RETPOLINE |
137 | *(.text..__x86.indirect_thunk) |
138 | *(.text..__x86.return_thunk) |
139 | #endif |
140 | STATIC_CALL_TEXT |
141 | |
142 | ALIGN_ENTRY_TEXT_BEGIN |
143 | *(.text..__x86.rethunk_untrain) |
144 | ENTRY_TEXT |
145 | |
146 | #ifdef CONFIG_MITIGATION_SRSO |
147 | /* |
148 | * See the comment above srso_alias_untrain_ret()'s |
149 | * definition. |
150 | */ |
151 | . = srso_alias_untrain_ret | (1 << 2) | (1 << 8) | (1 << 14) | (1 << 20); |
152 | *(.text..__x86.rethunk_safe) |
153 | #endif |
154 | ALIGN_ENTRY_TEXT_END |
155 | *(.gnu.warning) |
156 | |
157 | } :text = 0xcccccccc |
158 | |
159 | /* End of text section, which should occupy whole number of pages */ |
160 | _etext = .; |
161 | . = ALIGN(PAGE_SIZE); |
162 | |
163 | X86_ALIGN_RODATA_BEGIN |
164 | RO_DATA(PAGE_SIZE) |
165 | X86_ALIGN_RODATA_END |
166 | |
167 | /* Data */ |
168 | .data : AT(ADDR(.data) - LOAD_OFFSET) { |
169 | /* Start of data section */ |
170 | _sdata = .; |
171 | |
172 | /* init_task */ |
173 | INIT_TASK_DATA(THREAD_SIZE) |
174 | |
175 | #ifdef CONFIG_X86_32 |
176 | /* 32 bit has nosave before _edata */ |
177 | NOSAVE_DATA |
178 | #endif |
179 | |
180 | PAGE_ALIGNED_DATA(PAGE_SIZE) |
181 | |
182 | CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES) |
183 | |
184 | DATA_DATA |
185 | CONSTRUCTORS |
186 | |
187 | /* rarely changed data like cpu maps */ |
188 | READ_MOSTLY_DATA(INTERNODE_CACHE_BYTES) |
189 | |
190 | /* End of data section */ |
191 | _edata = .; |
192 | } :data |
193 | |
194 | BUG_TABLE |
195 | |
196 | ORC_UNWIND_TABLE |
197 | |
198 | . = ALIGN(PAGE_SIZE); |
199 | __vvar_page = .; |
200 | |
201 | .vvar : AT(ADDR(.vvar) - LOAD_OFFSET) { |
202 | /* work around gold bug 13023 */ |
203 | __vvar_beginning_hack = .; |
204 | |
205 | /* Place all vvars at the offsets in asm/vvar.h. */ |
206 | #define EMIT_VVAR(name, offset) \ |
207 | . = __vvar_beginning_hack + offset; \ |
208 | *(.vvar_ ## name) |
209 | #include <asm/vvar.h> |
210 | #undef EMIT_VVAR |
211 | |
212 | /* |
213 | * Pad the rest of the page with zeros. Otherwise the loader |
214 | * can leave garbage here. |
215 | */ |
216 | . = __vvar_beginning_hack + PAGE_SIZE; |
217 | } :data |
218 | |
219 | . = ALIGN(__vvar_page + PAGE_SIZE, PAGE_SIZE); |
220 | |
221 | /* Init code and data - will be freed after init */ |
222 | . = ALIGN(PAGE_SIZE); |
223 | .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) { |
224 | __init_begin = .; /* paired with __init_end */ |
225 | } |
226 | |
227 | #if defined(CONFIG_X86_64) && defined(CONFIG_SMP) |
228 | /* |
229 | * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the |
230 | * output PHDR, so the next output section - .init.text - should |
231 | * start another segment - init. |
232 | */ |
233 | PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu) |
234 | ASSERT(SIZEOF(.data..percpu) < CONFIG_PHYSICAL_START, |
235 | "per-CPU data too large - increase CONFIG_PHYSICAL_START" ) |
236 | #endif |
237 | |
238 | INIT_TEXT_SECTION(PAGE_SIZE) |
239 | #ifdef CONFIG_X86_64 |
240 | :init |
241 | #endif |
242 | |
243 | /* |
244 | * Section for code used exclusively before alternatives are run. All |
245 | * references to such code must be patched out by alternatives, normally |
246 | * by using X86_FEATURE_ALWAYS CPU feature bit. |
247 | * |
248 | * See static_cpu_has() for an example. |
249 | */ |
250 | .altinstr_aux : AT(ADDR(.altinstr_aux) - LOAD_OFFSET) { |
251 | *(.altinstr_aux) |
252 | } |
253 | |
254 | INIT_DATA_SECTION(16) |
255 | |
256 | .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) { |
257 | __x86_cpu_dev_start = .; |
258 | *(.x86_cpu_dev.init) |
259 | __x86_cpu_dev_end = .; |
260 | } |
261 | |
262 | #ifdef CONFIG_X86_INTEL_MID |
263 | .x86_intel_mid_dev.init : AT(ADDR(.x86_intel_mid_dev.init) - \ |
264 | LOAD_OFFSET) { |
265 | __x86_intel_mid_dev_start = .; |
266 | *(.x86_intel_mid_dev.init) |
267 | __x86_intel_mid_dev_end = .; |
268 | } |
269 | #endif |
270 | |
271 | #ifdef CONFIG_MITIGATION_RETPOLINE |
272 | /* |
273 | * List of instructions that call/jmp/jcc to retpoline thunks |
274 | * __x86_indirect_thunk_*(). These instructions can be patched along |
275 | * with alternatives, after which the section can be freed. |
276 | */ |
277 | . = ALIGN(8); |
278 | .retpoline_sites : AT(ADDR(.retpoline_sites) - LOAD_OFFSET) { |
279 | __retpoline_sites = .; |
280 | *(.retpoline_sites) |
281 | __retpoline_sites_end = .; |
282 | } |
283 | |
284 | . = ALIGN(8); |
285 | .return_sites : AT(ADDR(.return_sites) - LOAD_OFFSET) { |
286 | __return_sites = .; |
287 | *(.return_sites) |
288 | __return_sites_end = .; |
289 | } |
290 | |
291 | . = ALIGN(8); |
292 | .call_sites : AT(ADDR(.call_sites) - LOAD_OFFSET) { |
293 | __call_sites = .; |
294 | *(.call_sites) |
295 | __call_sites_end = .; |
296 | } |
297 | #endif |
298 | |
299 | #ifdef CONFIG_X86_KERNEL_IBT |
300 | . = ALIGN(8); |
301 | .ibt_endbr_seal : AT(ADDR(.ibt_endbr_seal) - LOAD_OFFSET) { |
302 | __ibt_endbr_seal = .; |
303 | *(.ibt_endbr_seal) |
304 | __ibt_endbr_seal_end = .; |
305 | } |
306 | #endif |
307 | |
308 | #ifdef CONFIG_FINEIBT |
309 | . = ALIGN(8); |
310 | .cfi_sites : AT(ADDR(.cfi_sites) - LOAD_OFFSET) { |
311 | __cfi_sites = .; |
312 | *(.cfi_sites) |
313 | __cfi_sites_end = .; |
314 | } |
315 | #endif |
316 | |
317 | /* |
318 | * struct alt_inst entries. From the header (alternative.h): |
319 | * "Alternative instructions for different CPU types or capabilities" |
320 | * Think locking instructions on spinlocks. |
321 | */ |
322 | . = ALIGN(8); |
323 | .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) { |
324 | __alt_instructions = .; |
325 | *(.altinstructions) |
326 | __alt_instructions_end = .; |
327 | } |
328 | |
329 | /* |
330 | * And here are the replacement instructions. The linker sticks |
331 | * them as binary blobs. The .altinstructions has enough data to |
332 | * get the address and the length of them to patch the kernel safely. |
333 | */ |
334 | .altinstr_replacement : AT(ADDR(.altinstr_replacement) - LOAD_OFFSET) { |
335 | *(.altinstr_replacement) |
336 | } |
337 | |
338 | . = ALIGN(8); |
339 | .apicdrivers : AT(ADDR(.apicdrivers) - LOAD_OFFSET) { |
340 | __apicdrivers = .; |
341 | *(.apicdrivers); |
342 | __apicdrivers_end = .; |
343 | } |
344 | |
345 | . = ALIGN(8); |
346 | /* |
347 | * .exit.text is discarded at runtime, not link time, to deal with |
348 | * references from .altinstructions |
349 | */ |
350 | .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) { |
351 | EXIT_TEXT |
352 | } |
353 | |
354 | .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) { |
355 | EXIT_DATA |
356 | } |
357 | |
358 | #if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP) |
359 | PERCPU_SECTION(INTERNODE_CACHE_BYTES) |
360 | #endif |
361 | |
362 | . = ALIGN(PAGE_SIZE); |
363 | |
364 | /* freed after init ends here */ |
365 | .init.end : AT(ADDR(.init.end) - LOAD_OFFSET) { |
366 | __init_end = .; |
367 | } |
368 | |
369 | /* |
370 | * smp_locks might be freed after init |
371 | * start/end must be page aligned |
372 | */ |
373 | . = ALIGN(PAGE_SIZE); |
374 | .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) { |
375 | __smp_locks = .; |
376 | *(.smp_locks) |
377 | . = ALIGN(PAGE_SIZE); |
378 | __smp_locks_end = .; |
379 | } |
380 | |
381 | #ifdef CONFIG_X86_64 |
382 | .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) { |
383 | NOSAVE_DATA |
384 | } |
385 | #endif |
386 | |
387 | /* BSS */ |
388 | . = ALIGN(PAGE_SIZE); |
389 | .bss : AT(ADDR(.bss) - LOAD_OFFSET) { |
390 | __bss_start = .; |
391 | *(.bss..page_aligned) |
392 | . = ALIGN(PAGE_SIZE); |
393 | *(BSS_MAIN) |
394 | BSS_DECRYPTED |
395 | . = ALIGN(PAGE_SIZE); |
396 | __bss_stop = .; |
397 | } |
398 | |
399 | /* |
400 | * The memory occupied from _text to here, __end_of_kernel_reserve, is |
401 | * automatically reserved in setup_arch(). Anything after here must be |
402 | * explicitly reserved using memblock_reserve() or it will be discarded |
403 | * and treated as available memory. |
404 | */ |
405 | __end_of_kernel_reserve = .; |
406 | |
407 | . = ALIGN(PAGE_SIZE); |
408 | .brk : AT(ADDR(.brk) - LOAD_OFFSET) { |
409 | __brk_base = .; |
410 | . += 64 * 1024; /* 64k alignment slop space */ |
411 | *(.bss..brk) /* areas brk users have reserved */ |
412 | __brk_limit = .; |
413 | } |
414 | |
415 | . = ALIGN(PAGE_SIZE); /* keep VO_INIT_SIZE page aligned */ |
416 | _end = .; |
417 | |
418 | #ifdef CONFIG_AMD_MEM_ENCRYPT |
419 | /* |
420 | * Early scratch/workarea section: Lives outside of the kernel proper |
421 | * (_text - _end). |
422 | * |
423 | * Resides after _end because even though the .brk section is after |
424 | * __end_of_kernel_reserve, the .brk section is later reserved as a |
425 | * part of the kernel. Since it is located after __end_of_kernel_reserve |
426 | * it will be discarded and become part of the available memory. As |
427 | * such, it can only be used by very early boot code and must not be |
428 | * needed afterwards. |
429 | * |
430 | * Currently used by SME for performing in-place encryption of the |
431 | * kernel during boot. Resides on a 2MB boundary to simplify the |
432 | * pagetable setup used for SME in-place encryption. |
433 | */ |
434 | . = ALIGN(HPAGE_SIZE); |
435 | .init.scratch : AT(ADDR(.init.scratch) - LOAD_OFFSET) { |
436 | __init_scratch_begin = .; |
437 | *(.init.scratch) |
438 | . = ALIGN(HPAGE_SIZE); |
439 | __init_scratch_end = .; |
440 | } |
441 | #endif |
442 | |
443 | STABS_DEBUG |
444 | DWARF_DEBUG |
445 | ELF_DETAILS |
446 | |
447 | DISCARDS |
448 | |
449 | /* |
450 | * Make sure that the .got.plt is either completely empty or it |
451 | * contains only the lazy dispatch entries. |
452 | */ |
453 | .got.plt (INFO) : { *(.got.plt) } |
454 | ASSERT(SIZEOF(.got.plt) == 0 || |
455 | #ifdef CONFIG_X86_64 |
456 | SIZEOF(.got.plt) == 0x18, |
457 | #else |
458 | SIZEOF(.got.plt) == 0xc, |
459 | #endif |
460 | "Unexpected GOT/PLT entries detected!" ) |
461 | |
462 | /* |
463 | * Sections that should stay zero sized, which is safer to |
464 | * explicitly check instead of blindly discarding. |
465 | */ |
466 | .got : { |
467 | *(.got) *(.igot.*) |
468 | } |
469 | ASSERT(SIZEOF(.got) == 0, "Unexpected GOT entries detected!" ) |
470 | |
471 | .plt : { |
472 | *(.plt) *(.plt.*) *(.iplt) |
473 | } |
474 | ASSERT(SIZEOF(.plt) == 0, "Unexpected run-time procedure linkages detected!" ) |
475 | |
476 | .rel.dyn : { |
477 | *(.rel.*) *(.rel_*) |
478 | } |
479 | ASSERT(SIZEOF(.rel.dyn) == 0, "Unexpected run-time relocations (.rel) detected!" ) |
480 | |
481 | .rela.dyn : { |
482 | *(.rela.*) *(.rela_*) |
483 | } |
484 | ASSERT(SIZEOF(.rela.dyn) == 0, "Unexpected run-time relocations (.rela) detected!" ) |
485 | } |
486 | |
487 | /* |
488 | * The ASSERT() sink to . is intentional, for binutils 2.14 compatibility: |
489 | */ |
490 | . = ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE), |
491 | "kernel image bigger than KERNEL_IMAGE_SIZE" ); |
492 | |
493 | #ifdef CONFIG_X86_64 |
494 | /* |
495 | * Per-cpu symbols which need to be offset from __per_cpu_load |
496 | * for the boot processor. |
497 | */ |
498 | #define INIT_PER_CPU(x) init_per_cpu__##x = ABSOLUTE(x) + __per_cpu_load |
499 | INIT_PER_CPU(gdt_page); |
500 | INIT_PER_CPU(fixed_percpu_data); |
501 | INIT_PER_CPU(irq_stack_backing_store); |
502 | |
503 | #ifdef CONFIG_SMP |
504 | . = ASSERT((fixed_percpu_data == 0), |
505 | "fixed_percpu_data is not at start of per-cpu area" ); |
506 | #endif |
507 | |
508 | #ifdef CONFIG_MITIGATION_UNRET_ENTRY |
509 | . = ASSERT((retbleed_return_thunk & 0x3f) == 0, "retbleed_return_thunk not cacheline-aligned" ); |
510 | #endif |
511 | |
512 | #ifdef CONFIG_MITIGATION_SRSO |
513 | . = ASSERT((srso_safe_ret & 0x3f) == 0, "srso_safe_ret not cacheline-aligned" ); |
514 | /* |
515 | * GNU ld cannot do XOR until 2.41. |
516 | * https://sourceware.org/git/?p=binutils-gdb.git;a=commit;h=f6f78318fca803c4907fb8d7f6ded8295f1947b1 |
517 | * |
518 | * LLVM lld cannot do XOR until lld-17. |
519 | * https://github.com/llvm/llvm-project/commit/fae96104d4378166cbe5c875ef8ed808a356f3fb |
520 | * |
521 | * Instead do: (A | B) - (A & B) in order to compute the XOR |
522 | * of the two function addresses: |
523 | */ |
524 | . = ASSERT(((ABSOLUTE(srso_alias_untrain_ret) | srso_alias_safe_ret) - |
525 | (ABSOLUTE(srso_alias_untrain_ret) & srso_alias_safe_ret)) == ((1 << 2) | (1 << 8) | (1 << 14) | (1 << 20)), |
526 | "SRSO function pair won't alias" ); |
527 | #endif |
528 | |
529 | #endif /* CONFIG_X86_64 */ |
530 | |