1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright (C) 1995 Linus Torvalds |
4 | * |
5 | * This file contains the setup_arch() code, which handles the architecture-dependent |
6 | * parts of early kernel initialization. |
7 | */ |
8 | #include <linux/acpi.h> |
9 | #include <linux/console.h> |
10 | #include <linux/crash_dump.h> |
11 | #include <linux/dma-map-ops.h> |
12 | #include <linux/efi.h> |
13 | #include <linux/ima.h> |
14 | #include <linux/init_ohci1394_dma.h> |
15 | #include <linux/initrd.h> |
16 | #include <linux/iscsi_ibft.h> |
17 | #include <linux/memblock.h> |
18 | #include <linux/panic_notifier.h> |
19 | #include <linux/pci.h> |
20 | #include <linux/root_dev.h> |
21 | #include <linux/hugetlb.h> |
22 | #include <linux/tboot.h> |
23 | #include <linux/usb/xhci-dbgp.h> |
24 | #include <linux/static_call.h> |
25 | #include <linux/swiotlb.h> |
26 | #include <linux/random.h> |
27 | |
28 | #include <uapi/linux/mount.h> |
29 | |
30 | #include <xen/xen.h> |
31 | |
32 | #include <asm/apic.h> |
33 | #include <asm/efi.h> |
34 | #include <asm/numa.h> |
35 | #include <asm/bios_ebda.h> |
36 | #include <asm/bugs.h> |
37 | #include <asm/cacheinfo.h> |
38 | #include <asm/coco.h> |
39 | #include <asm/cpu.h> |
40 | #include <asm/efi.h> |
41 | #include <asm/gart.h> |
42 | #include <asm/hypervisor.h> |
43 | #include <asm/io_apic.h> |
44 | #include <asm/kasan.h> |
45 | #include <asm/kaslr.h> |
46 | #include <asm/mce.h> |
47 | #include <asm/memtype.h> |
48 | #include <asm/mtrr.h> |
49 | #include <asm/realmode.h> |
50 | #include <asm/olpc_ofw.h> |
51 | #include <asm/pci-direct.h> |
52 | #include <asm/prom.h> |
53 | #include <asm/proto.h> |
54 | #include <asm/thermal.h> |
55 | #include <asm/unwind.h> |
56 | #include <asm/vsyscall.h> |
57 | #include <linux/vmalloc.h> |
58 | |
59 | /* |
60 | * max_low_pfn_mapped: highest directly mapped pfn < 4 GB |
61 | * max_pfn_mapped: highest directly mapped pfn > 4 GB |
62 | * |
63 | * The direct mapping only covers E820_TYPE_RAM regions, so the ranges and gaps are |
64 | * represented by pfn_mapped[]. |
65 | */ |
66 | unsigned long max_low_pfn_mapped; |
67 | unsigned long max_pfn_mapped; |
68 | |
69 | #ifdef CONFIG_DMI |
70 | RESERVE_BRK(dmi_alloc, 65536); |
71 | #endif |
72 | |
73 | |
74 | unsigned long _brk_start = (unsigned long)__brk_base; |
75 | unsigned long _brk_end = (unsigned long)__brk_base; |
76 | |
77 | struct boot_params boot_params; |
78 | |
79 | /* |
80 | * These are the four main kernel memory regions, we put them into |
81 | * the resource tree so that kdump tools and other debugging tools |
82 | * recover it: |
83 | */ |
84 | |
85 | static struct resource rodata_resource = { |
86 | .name = "Kernel rodata" , |
87 | .start = 0, |
88 | .end = 0, |
89 | .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM |
90 | }; |
91 | |
92 | static struct resource data_resource = { |
93 | .name = "Kernel data" , |
94 | .start = 0, |
95 | .end = 0, |
96 | .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM |
97 | }; |
98 | |
99 | static struct resource code_resource = { |
100 | .name = "Kernel code" , |
101 | .start = 0, |
102 | .end = 0, |
103 | .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM |
104 | }; |
105 | |
106 | static struct resource bss_resource = { |
107 | .name = "Kernel bss" , |
108 | .start = 0, |
109 | .end = 0, |
110 | .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM |
111 | }; |
112 | |
113 | |
114 | #ifdef CONFIG_X86_32 |
115 | /* CPU data as detected by the assembly code in head_32.S */ |
116 | struct cpuinfo_x86 new_cpu_data; |
117 | |
118 | struct apm_info apm_info; |
119 | EXPORT_SYMBOL(apm_info); |
120 | |
121 | #if defined(CONFIG_X86_SPEEDSTEP_SMI) || \ |
122 | defined(CONFIG_X86_SPEEDSTEP_SMI_MODULE) |
123 | struct ist_info ist_info; |
124 | EXPORT_SYMBOL(ist_info); |
125 | #else |
126 | struct ist_info ist_info; |
127 | #endif |
128 | |
129 | #endif |
130 | |
131 | struct cpuinfo_x86 boot_cpu_data __read_mostly; |
132 | EXPORT_SYMBOL(boot_cpu_data); |
133 | |
134 | #if !defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64) |
135 | __visible unsigned long mmu_cr4_features __ro_after_init; |
136 | #else |
137 | __visible unsigned long mmu_cr4_features __ro_after_init = X86_CR4_PAE; |
138 | #endif |
139 | |
140 | #ifdef CONFIG_IMA |
141 | static phys_addr_t ima_kexec_buffer_phys; |
142 | static size_t ima_kexec_buffer_size; |
143 | #endif |
144 | |
145 | /* Boot loader ID and version as integers, for the benefit of proc_dointvec */ |
146 | int bootloader_type, bootloader_version; |
147 | |
148 | /* |
149 | * Setup options |
150 | */ |
151 | struct screen_info screen_info; |
152 | EXPORT_SYMBOL(screen_info); |
153 | struct edid_info edid_info; |
154 | EXPORT_SYMBOL_GPL(edid_info); |
155 | |
156 | extern int root_mountflags; |
157 | |
158 | unsigned long saved_video_mode; |
159 | |
160 | #define RAMDISK_IMAGE_START_MASK 0x07FF |
161 | #define RAMDISK_PROMPT_FLAG 0x8000 |
162 | #define RAMDISK_LOAD_FLAG 0x4000 |
163 | |
164 | static char __initdata command_line[COMMAND_LINE_SIZE]; |
165 | #ifdef CONFIG_CMDLINE_BOOL |
166 | static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE; |
167 | #endif |
168 | |
169 | #if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE) |
170 | struct edd edd; |
171 | #ifdef CONFIG_EDD_MODULE |
172 | EXPORT_SYMBOL(edd); |
173 | #endif |
174 | /** |
175 | * copy_edd() - Copy the BIOS EDD information |
176 | * from boot_params into a safe place. |
177 | * |
178 | */ |
179 | static inline void __init copy_edd(void) |
180 | { |
181 | memcpy(edd.mbr_signature, boot_params.edd_mbr_sig_buffer, |
182 | sizeof(edd.mbr_signature)); |
183 | memcpy(edd.edd_info, boot_params.eddbuf, sizeof(edd.edd_info)); |
184 | edd.mbr_signature_nr = boot_params.edd_mbr_sig_buf_entries; |
185 | edd.edd_info_nr = boot_params.eddbuf_entries; |
186 | } |
187 | #else |
188 | static inline void __init copy_edd(void) |
189 | { |
190 | } |
191 | #endif |
192 | |
193 | void * __init extend_brk(size_t size, size_t align) |
194 | { |
195 | size_t mask = align - 1; |
196 | void *ret; |
197 | |
198 | BUG_ON(_brk_start == 0); |
199 | BUG_ON(align & mask); |
200 | |
201 | _brk_end = (_brk_end + mask) & ~mask; |
202 | BUG_ON((char *)(_brk_end + size) > __brk_limit); |
203 | |
204 | ret = (void *)_brk_end; |
205 | _brk_end += size; |
206 | |
207 | memset(ret, 0, size); |
208 | |
209 | return ret; |
210 | } |
211 | |
212 | #ifdef CONFIG_X86_32 |
213 | static void __init cleanup_highmap(void) |
214 | { |
215 | } |
216 | #endif |
217 | |
218 | static void __init reserve_brk(void) |
219 | { |
220 | if (_brk_end > _brk_start) |
221 | memblock_reserve(__pa_symbol(_brk_start), |
222 | size: _brk_end - _brk_start); |
223 | |
224 | /* Mark brk area as locked down and no longer taking any |
225 | new allocations */ |
226 | _brk_start = 0; |
227 | } |
228 | |
229 | #ifdef CONFIG_BLK_DEV_INITRD |
230 | |
231 | static u64 __init get_ramdisk_image(void) |
232 | { |
233 | u64 ramdisk_image = boot_params.hdr.ramdisk_image; |
234 | |
235 | ramdisk_image |= (u64)boot_params.ext_ramdisk_image << 32; |
236 | |
237 | if (ramdisk_image == 0) |
238 | ramdisk_image = phys_initrd_start; |
239 | |
240 | return ramdisk_image; |
241 | } |
242 | static u64 __init get_ramdisk_size(void) |
243 | { |
244 | u64 ramdisk_size = boot_params.hdr.ramdisk_size; |
245 | |
246 | ramdisk_size |= (u64)boot_params.ext_ramdisk_size << 32; |
247 | |
248 | if (ramdisk_size == 0) |
249 | ramdisk_size = phys_initrd_size; |
250 | |
251 | return ramdisk_size; |
252 | } |
253 | |
254 | static void __init relocate_initrd(void) |
255 | { |
256 | /* Assume only end is not page aligned */ |
257 | u64 ramdisk_image = get_ramdisk_image(); |
258 | u64 ramdisk_size = get_ramdisk_size(); |
259 | u64 area_size = PAGE_ALIGN(ramdisk_size); |
260 | |
261 | /* We need to move the initrd down into directly mapped mem */ |
262 | u64 relocated_ramdisk = memblock_phys_alloc_range(size: area_size, PAGE_SIZE, start: 0, |
263 | PFN_PHYS(max_pfn_mapped)); |
264 | if (!relocated_ramdisk) |
265 | panic(fmt: "Cannot find place for new RAMDISK of size %lld\n" , |
266 | ramdisk_size); |
267 | |
268 | initrd_start = relocated_ramdisk + PAGE_OFFSET; |
269 | initrd_end = initrd_start + ramdisk_size; |
270 | printk(KERN_INFO "Allocated new RAMDISK: [mem %#010llx-%#010llx]\n" , |
271 | relocated_ramdisk, relocated_ramdisk + ramdisk_size - 1); |
272 | |
273 | copy_from_early_mem(dest: (void *)initrd_start, src: ramdisk_image, size: ramdisk_size); |
274 | |
275 | printk(KERN_INFO "Move RAMDISK from [mem %#010llx-%#010llx] to" |
276 | " [mem %#010llx-%#010llx]\n" , |
277 | ramdisk_image, ramdisk_image + ramdisk_size - 1, |
278 | relocated_ramdisk, relocated_ramdisk + ramdisk_size - 1); |
279 | } |
280 | |
281 | static void __init early_reserve_initrd(void) |
282 | { |
283 | /* Assume only end is not page aligned */ |
284 | u64 ramdisk_image = get_ramdisk_image(); |
285 | u64 ramdisk_size = get_ramdisk_size(); |
286 | u64 ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size); |
287 | |
288 | if (!boot_params.hdr.type_of_loader || |
289 | !ramdisk_image || !ramdisk_size) |
290 | return; /* No initrd provided by bootloader */ |
291 | |
292 | memblock_reserve(base: ramdisk_image, size: ramdisk_end - ramdisk_image); |
293 | } |
294 | |
295 | static void __init reserve_initrd(void) |
296 | { |
297 | /* Assume only end is not page aligned */ |
298 | u64 ramdisk_image = get_ramdisk_image(); |
299 | u64 ramdisk_size = get_ramdisk_size(); |
300 | u64 ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size); |
301 | |
302 | if (!boot_params.hdr.type_of_loader || |
303 | !ramdisk_image || !ramdisk_size) |
304 | return; /* No initrd provided by bootloader */ |
305 | |
306 | initrd_start = 0; |
307 | |
308 | printk(KERN_INFO "RAMDISK: [mem %#010llx-%#010llx]\n" , ramdisk_image, |
309 | ramdisk_end - 1); |
310 | |
311 | if (pfn_range_is_mapped(PFN_DOWN(ramdisk_image), |
312 | PFN_DOWN(ramdisk_end))) { |
313 | /* All are mapped, easy case */ |
314 | initrd_start = ramdisk_image + PAGE_OFFSET; |
315 | initrd_end = initrd_start + ramdisk_size; |
316 | return; |
317 | } |
318 | |
319 | relocate_initrd(); |
320 | |
321 | memblock_phys_free(base: ramdisk_image, size: ramdisk_end - ramdisk_image); |
322 | } |
323 | |
324 | #else |
325 | static void __init early_reserve_initrd(void) |
326 | { |
327 | } |
328 | static void __init reserve_initrd(void) |
329 | { |
330 | } |
331 | #endif /* CONFIG_BLK_DEV_INITRD */ |
332 | |
333 | static void __init add_early_ima_buffer(u64 phys_addr) |
334 | { |
335 | #ifdef CONFIG_IMA |
336 | struct ima_setup_data *data; |
337 | |
338 | data = early_memremap(phys_addr: phys_addr + sizeof(struct setup_data), size: sizeof(*data)); |
339 | if (!data) { |
340 | pr_warn("setup: failed to memremap ima_setup_data entry\n" ); |
341 | return; |
342 | } |
343 | |
344 | if (data->size) { |
345 | memblock_reserve(base: data->addr, size: data->size); |
346 | ima_kexec_buffer_phys = data->addr; |
347 | ima_kexec_buffer_size = data->size; |
348 | } |
349 | |
350 | early_memunmap(addr: data, size: sizeof(*data)); |
351 | #else |
352 | pr_warn("Passed IMA kexec data, but CONFIG_IMA not set. Ignoring.\n" ); |
353 | #endif |
354 | } |
355 | |
356 | #if defined(CONFIG_HAVE_IMA_KEXEC) && !defined(CONFIG_OF_FLATTREE) |
357 | int __init ima_free_kexec_buffer(void) |
358 | { |
359 | if (!ima_kexec_buffer_size) |
360 | return -ENOENT; |
361 | |
362 | memblock_free_late(ima_kexec_buffer_phys, |
363 | ima_kexec_buffer_size); |
364 | |
365 | ima_kexec_buffer_phys = 0; |
366 | ima_kexec_buffer_size = 0; |
367 | |
368 | return 0; |
369 | } |
370 | |
371 | int __init ima_get_kexec_buffer(void **addr, size_t *size) |
372 | { |
373 | if (!ima_kexec_buffer_size) |
374 | return -ENOENT; |
375 | |
376 | *addr = __va(ima_kexec_buffer_phys); |
377 | *size = ima_kexec_buffer_size; |
378 | |
379 | return 0; |
380 | } |
381 | #endif |
382 | |
383 | static void __init parse_setup_data(void) |
384 | { |
385 | struct setup_data *data; |
386 | u64 pa_data, pa_next; |
387 | |
388 | pa_data = boot_params.hdr.setup_data; |
389 | while (pa_data) { |
390 | u32 data_len, data_type; |
391 | |
392 | data = early_memremap(phys_addr: pa_data, size: sizeof(*data)); |
393 | data_len = data->len + sizeof(struct setup_data); |
394 | data_type = data->type; |
395 | pa_next = data->next; |
396 | early_memunmap(addr: data, size: sizeof(*data)); |
397 | |
398 | switch (data_type) { |
399 | case SETUP_E820_EXT: |
400 | e820__memory_setup_extended(phys_addr: pa_data, data_len); |
401 | break; |
402 | case SETUP_DTB: |
403 | add_dtb(data: pa_data); |
404 | break; |
405 | case SETUP_EFI: |
406 | parse_efi_setup(phys_addr: pa_data, data_len); |
407 | break; |
408 | case SETUP_IMA: |
409 | add_early_ima_buffer(phys_addr: pa_data); |
410 | break; |
411 | case SETUP_RNG_SEED: |
412 | data = early_memremap(phys_addr: pa_data, size: data_len); |
413 | add_bootloader_randomness(buf: data->data, len: data->len); |
414 | /* Zero seed for forward secrecy. */ |
415 | memzero_explicit(s: data->data, count: data->len); |
416 | /* Zero length in case we find ourselves back here by accident. */ |
417 | memzero_explicit(s: &data->len, count: sizeof(data->len)); |
418 | early_memunmap(addr: data, size: data_len); |
419 | break; |
420 | default: |
421 | break; |
422 | } |
423 | pa_data = pa_next; |
424 | } |
425 | } |
426 | |
427 | static void __init memblock_x86_reserve_range_setup_data(void) |
428 | { |
429 | struct setup_indirect *indirect; |
430 | struct setup_data *data; |
431 | u64 pa_data, pa_next; |
432 | u32 len; |
433 | |
434 | pa_data = boot_params.hdr.setup_data; |
435 | while (pa_data) { |
436 | data = early_memremap(phys_addr: pa_data, size: sizeof(*data)); |
437 | if (!data) { |
438 | pr_warn("setup: failed to memremap setup_data entry\n" ); |
439 | return; |
440 | } |
441 | |
442 | len = sizeof(*data); |
443 | pa_next = data->next; |
444 | |
445 | memblock_reserve(base: pa_data, size: sizeof(*data) + data->len); |
446 | |
447 | if (data->type == SETUP_INDIRECT) { |
448 | len += data->len; |
449 | early_memunmap(addr: data, size: sizeof(*data)); |
450 | data = early_memremap(phys_addr: pa_data, size: len); |
451 | if (!data) { |
452 | pr_warn("setup: failed to memremap indirect setup_data\n" ); |
453 | return; |
454 | } |
455 | |
456 | indirect = (struct setup_indirect *)data->data; |
457 | |
458 | if (indirect->type != SETUP_INDIRECT) |
459 | memblock_reserve(base: indirect->addr, size: indirect->len); |
460 | } |
461 | |
462 | pa_data = pa_next; |
463 | early_memunmap(addr: data, size: len); |
464 | } |
465 | } |
466 | |
467 | static void __init arch_reserve_crashkernel(void) |
468 | { |
469 | unsigned long long crash_base, crash_size, low_size = 0; |
470 | char *cmdline = boot_command_line; |
471 | bool high = false; |
472 | int ret; |
473 | |
474 | if (!IS_ENABLED(CONFIG_CRASH_RESERVE)) |
475 | return; |
476 | |
477 | ret = parse_crashkernel(cmdline, system_ram: memblock_phys_mem_size(), |
478 | crash_size: &crash_size, crash_base: &crash_base, |
479 | low_size: &low_size, high: &high); |
480 | if (ret) |
481 | return; |
482 | |
483 | if (xen_pv_domain()) { |
484 | pr_info("Ignoring crashkernel for a Xen PV domain\n" ); |
485 | return; |
486 | } |
487 | |
488 | reserve_crashkernel_generic(cmdline, crash_size, crash_base, |
489 | crash_low_size: low_size, high); |
490 | } |
491 | |
492 | static struct resource standard_io_resources[] = { |
493 | { .name = "dma1" , .start = 0x00, .end = 0x1f, |
494 | .flags = IORESOURCE_BUSY | IORESOURCE_IO }, |
495 | { .name = "pic1" , .start = 0x20, .end = 0x21, |
496 | .flags = IORESOURCE_BUSY | IORESOURCE_IO }, |
497 | { .name = "timer0" , .start = 0x40, .end = 0x43, |
498 | .flags = IORESOURCE_BUSY | IORESOURCE_IO }, |
499 | { .name = "timer1" , .start = 0x50, .end = 0x53, |
500 | .flags = IORESOURCE_BUSY | IORESOURCE_IO }, |
501 | { .name = "keyboard" , .start = 0x60, .end = 0x60, |
502 | .flags = IORESOURCE_BUSY | IORESOURCE_IO }, |
503 | { .name = "keyboard" , .start = 0x64, .end = 0x64, |
504 | .flags = IORESOURCE_BUSY | IORESOURCE_IO }, |
505 | { .name = "dma page reg" , .start = 0x80, .end = 0x8f, |
506 | .flags = IORESOURCE_BUSY | IORESOURCE_IO }, |
507 | { .name = "pic2" , .start = 0xa0, .end = 0xa1, |
508 | .flags = IORESOURCE_BUSY | IORESOURCE_IO }, |
509 | { .name = "dma2" , .start = 0xc0, .end = 0xdf, |
510 | .flags = IORESOURCE_BUSY | IORESOURCE_IO }, |
511 | { .name = "fpu" , .start = 0xf0, .end = 0xff, |
512 | .flags = IORESOURCE_BUSY | IORESOURCE_IO } |
513 | }; |
514 | |
515 | void __init reserve_standard_io_resources(void) |
516 | { |
517 | int i; |
518 | |
519 | /* request I/O space for devices used on all i[345]86 PCs */ |
520 | for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++) |
521 | request_resource(root: &ioport_resource, new: &standard_io_resources[i]); |
522 | |
523 | } |
524 | |
525 | static bool __init snb_gfx_workaround_needed(void) |
526 | { |
527 | #ifdef CONFIG_PCI |
528 | int i; |
529 | u16 vendor, devid; |
530 | static const __initconst u16 snb_ids[] = { |
531 | 0x0102, |
532 | 0x0112, |
533 | 0x0122, |
534 | 0x0106, |
535 | 0x0116, |
536 | 0x0126, |
537 | 0x010a, |
538 | }; |
539 | |
540 | /* Assume no if something weird is going on with PCI */ |
541 | if (!early_pci_allowed()) |
542 | return false; |
543 | |
544 | vendor = read_pci_config_16(bus: 0, slot: 2, func: 0, PCI_VENDOR_ID); |
545 | if (vendor != 0x8086) |
546 | return false; |
547 | |
548 | devid = read_pci_config_16(bus: 0, slot: 2, func: 0, PCI_DEVICE_ID); |
549 | for (i = 0; i < ARRAY_SIZE(snb_ids); i++) |
550 | if (devid == snb_ids[i]) |
551 | return true; |
552 | #endif |
553 | |
554 | return false; |
555 | } |
556 | |
557 | /* |
558 | * Sandy Bridge graphics has trouble with certain ranges, exclude |
559 | * them from allocation. |
560 | */ |
561 | static void __init trim_snb_memory(void) |
562 | { |
563 | static const __initconst unsigned long bad_pages[] = { |
564 | 0x20050000, |
565 | 0x20110000, |
566 | 0x20130000, |
567 | 0x20138000, |
568 | 0x40004000, |
569 | }; |
570 | int i; |
571 | |
572 | if (!snb_gfx_workaround_needed()) |
573 | return; |
574 | |
575 | printk(KERN_DEBUG "reserving inaccessible SNB gfx pages\n" ); |
576 | |
577 | /* |
578 | * SandyBridge integrated graphics devices have a bug that prevents |
579 | * them from accessing certain memory ranges, namely anything below |
580 | * 1M and in the pages listed in bad_pages[] above. |
581 | * |
582 | * To avoid these pages being ever accessed by SNB gfx devices reserve |
583 | * bad_pages that have not already been reserved at boot time. |
584 | * All memory below the 1 MB mark is anyway reserved later during |
585 | * setup_arch(), so there is no need to reserve it here. |
586 | */ |
587 | |
588 | for (i = 0; i < ARRAY_SIZE(bad_pages); i++) { |
589 | if (memblock_reserve(base: bad_pages[i], PAGE_SIZE)) |
590 | printk(KERN_WARNING "failed to reserve 0x%08lx\n" , |
591 | bad_pages[i]); |
592 | } |
593 | } |
594 | |
595 | static void __init trim_bios_range(void) |
596 | { |
597 | /* |
598 | * A special case is the first 4Kb of memory; |
599 | * This is a BIOS owned area, not kernel ram, but generally |
600 | * not listed as such in the E820 table. |
601 | * |
602 | * This typically reserves additional memory (64KiB by default) |
603 | * since some BIOSes are known to corrupt low memory. See the |
604 | * Kconfig help text for X86_RESERVE_LOW. |
605 | */ |
606 | e820__range_update(start: 0, PAGE_SIZE, old_type: E820_TYPE_RAM, new_type: E820_TYPE_RESERVED); |
607 | |
608 | /* |
609 | * special case: Some BIOSes report the PC BIOS |
610 | * area (640Kb -> 1Mb) as RAM even though it is not. |
611 | * take them out. |
612 | */ |
613 | e820__range_remove(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, old_type: E820_TYPE_RAM, check_type: 1); |
614 | |
615 | e820__update_table(table: e820_table); |
616 | } |
617 | |
618 | /* called before trim_bios_range() to spare extra sanitize */ |
619 | static void __init e820_add_kernel_range(void) |
620 | { |
621 | u64 start = __pa_symbol(_text); |
622 | u64 size = __pa_symbol(_end) - start; |
623 | |
624 | /* |
625 | * Complain if .text .data and .bss are not marked as E820_TYPE_RAM and |
626 | * attempt to fix it by adding the range. We may have a confused BIOS, |
627 | * or the user may have used memmap=exactmap or memmap=xxM$yyM to |
628 | * exclude kernel range. If we really are running on top non-RAM, |
629 | * we will crash later anyways. |
630 | */ |
631 | if (e820__mapped_all(start, end: start + size, type: E820_TYPE_RAM)) |
632 | return; |
633 | |
634 | pr_warn(".text .data .bss are not marked as E820_TYPE_RAM!\n" ); |
635 | e820__range_remove(start, size, old_type: E820_TYPE_RAM, check_type: 0); |
636 | e820__range_add(start, size, type: E820_TYPE_RAM); |
637 | } |
638 | |
639 | static void __init early_reserve_memory(void) |
640 | { |
641 | /* |
642 | * Reserve the memory occupied by the kernel between _text and |
643 | * __end_of_kernel_reserve symbols. Any kernel sections after the |
644 | * __end_of_kernel_reserve symbol must be explicitly reserved with a |
645 | * separate memblock_reserve() or they will be discarded. |
646 | */ |
647 | memblock_reserve(__pa_symbol(_text), |
648 | size: (unsigned long)__end_of_kernel_reserve - (unsigned long)_text); |
649 | |
650 | /* |
651 | * The first 4Kb of memory is a BIOS owned area, but generally it is |
652 | * not listed as such in the E820 table. |
653 | * |
654 | * Reserve the first 64K of memory since some BIOSes are known to |
655 | * corrupt low memory. After the real mode trampoline is allocated the |
656 | * rest of the memory below 640k is reserved. |
657 | * |
658 | * In addition, make sure page 0 is always reserved because on |
659 | * systems with L1TF its contents can be leaked to user processes. |
660 | */ |
661 | memblock_reserve(base: 0, SZ_64K); |
662 | |
663 | early_reserve_initrd(); |
664 | |
665 | memblock_x86_reserve_range_setup_data(); |
666 | |
667 | reserve_bios_regions(); |
668 | trim_snb_memory(); |
669 | } |
670 | |
671 | /* |
672 | * Dump out kernel offset information on panic. |
673 | */ |
674 | static int |
675 | dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p) |
676 | { |
677 | if (kaslr_enabled()) { |
678 | pr_emerg("Kernel Offset: 0x%lx from 0x%lx (relocation range: 0x%lx-0x%lx)\n" , |
679 | kaslr_offset(), |
680 | __START_KERNEL, |
681 | __START_KERNEL_map, |
682 | MODULES_VADDR-1); |
683 | } else { |
684 | pr_emerg("Kernel Offset: disabled\n" ); |
685 | } |
686 | |
687 | return 0; |
688 | } |
689 | |
690 | void x86_configure_nx(void) |
691 | { |
692 | if (boot_cpu_has(X86_FEATURE_NX)) |
693 | __supported_pte_mask |= _PAGE_NX; |
694 | else |
695 | __supported_pte_mask &= ~_PAGE_NX; |
696 | } |
697 | |
698 | static void __init x86_report_nx(void) |
699 | { |
700 | if (!boot_cpu_has(X86_FEATURE_NX)) { |
701 | printk(KERN_NOTICE "Notice: NX (Execute Disable) protection " |
702 | "missing in CPU!\n" ); |
703 | } else { |
704 | #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) |
705 | printk(KERN_INFO "NX (Execute Disable) protection: active\n" ); |
706 | #else |
707 | /* 32bit non-PAE kernel, NX cannot be used */ |
708 | printk(KERN_NOTICE "Notice: NX (Execute Disable) protection " |
709 | "cannot be enabled: non-PAE kernel!\n" ); |
710 | #endif |
711 | } |
712 | } |
713 | |
714 | /* |
715 | * Determine if we were loaded by an EFI loader. If so, then we have also been |
716 | * passed the efi memmap, systab, etc., so we should use these data structures |
717 | * for initialization. Note, the efi init code path is determined by the |
718 | * global efi_enabled. This allows the same kernel image to be used on existing |
719 | * systems (with a traditional BIOS) as well as on EFI systems. |
720 | */ |
721 | /* |
722 | * setup_arch - architecture-specific boot-time initializations |
723 | * |
724 | * Note: On x86_64, fixmaps are ready for use even before this is called. |
725 | */ |
726 | |
727 | void __init setup_arch(char **cmdline_p) |
728 | { |
729 | #ifdef CONFIG_X86_32 |
730 | memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data)); |
731 | |
732 | /* |
733 | * copy kernel address range established so far and switch |
734 | * to the proper swapper page table |
735 | */ |
736 | clone_pgd_range(swapper_pg_dir + KERNEL_PGD_BOUNDARY, |
737 | initial_page_table + KERNEL_PGD_BOUNDARY, |
738 | KERNEL_PGD_PTRS); |
739 | |
740 | load_cr3(swapper_pg_dir); |
741 | /* |
742 | * Note: Quark X1000 CPUs advertise PGE incorrectly and require |
743 | * a cr3 based tlb flush, so the following __flush_tlb_all() |
744 | * will not flush anything because the CPU quirk which clears |
745 | * X86_FEATURE_PGE has not been invoked yet. Though due to the |
746 | * load_cr3() above the TLB has been flushed already. The |
747 | * quirk is invoked before subsequent calls to __flush_tlb_all() |
748 | * so proper operation is guaranteed. |
749 | */ |
750 | __flush_tlb_all(); |
751 | #else |
752 | printk(KERN_INFO "Command line: %s\n" , boot_command_line); |
753 | boot_cpu_data.x86_phys_bits = MAX_PHYSMEM_BITS; |
754 | #endif |
755 | |
756 | /* |
757 | * If we have OLPC OFW, we might end up relocating the fixmap due to |
758 | * reserve_top(), so do this before touching the ioremap area. |
759 | */ |
760 | olpc_ofw_detect(); |
761 | |
762 | idt_setup_early_traps(); |
763 | early_cpu_init(); |
764 | jump_label_init(); |
765 | static_call_init(); |
766 | early_ioremap_init(); |
767 | |
768 | setup_olpc_ofw_pgd(); |
769 | |
770 | ROOT_DEV = old_decode_dev(val: boot_params.hdr.root_dev); |
771 | screen_info = boot_params.screen_info; |
772 | edid_info = boot_params.edid_info; |
773 | #ifdef CONFIG_X86_32 |
774 | apm_info.bios = boot_params.apm_bios_info; |
775 | ist_info = boot_params.ist_info; |
776 | #endif |
777 | saved_video_mode = boot_params.hdr.vid_mode; |
778 | bootloader_type = boot_params.hdr.type_of_loader; |
779 | if ((bootloader_type >> 4) == 0xe) { |
780 | bootloader_type &= 0xf; |
781 | bootloader_type |= (boot_params.hdr.ext_loader_type+0x10) << 4; |
782 | } |
783 | bootloader_version = bootloader_type & 0xf; |
784 | bootloader_version |= boot_params.hdr.ext_loader_ver << 4; |
785 | |
786 | #ifdef CONFIG_BLK_DEV_RAM |
787 | rd_image_start = boot_params.hdr.ram_size & RAMDISK_IMAGE_START_MASK; |
788 | #endif |
789 | #ifdef CONFIG_EFI |
790 | if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature, |
791 | EFI32_LOADER_SIGNATURE, 4)) { |
792 | set_bit(EFI_BOOT, addr: &efi.flags); |
793 | } else if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature, |
794 | EFI64_LOADER_SIGNATURE, 4)) { |
795 | set_bit(EFI_BOOT, addr: &efi.flags); |
796 | set_bit(EFI_64BIT, addr: &efi.flags); |
797 | } |
798 | #endif |
799 | |
800 | x86_init.oem.arch_setup(); |
801 | |
802 | /* |
803 | * Do some memory reservations *before* memory is added to memblock, so |
804 | * memblock allocations won't overwrite it. |
805 | * |
806 | * After this point, everything still needed from the boot loader or |
807 | * firmware or kernel text should be early reserved or marked not RAM in |
808 | * e820. All other memory is free game. |
809 | * |
810 | * This call needs to happen before e820__memory_setup() which calls the |
811 | * xen_memory_setup() on Xen dom0 which relies on the fact that those |
812 | * early reservations have happened already. |
813 | */ |
814 | early_reserve_memory(); |
815 | |
816 | iomem_resource.end = (1ULL << boot_cpu_data.x86_phys_bits) - 1; |
817 | e820__memory_setup(); |
818 | parse_setup_data(); |
819 | |
820 | copy_edd(); |
821 | |
822 | if (!boot_params.hdr.root_flags) |
823 | root_mountflags &= ~MS_RDONLY; |
824 | setup_initial_init_mm(start_code: _text, end_code: _etext, end_data: _edata, brk: (void *)_brk_end); |
825 | |
826 | code_resource.start = __pa_symbol(_text); |
827 | code_resource.end = __pa_symbol(_etext)-1; |
828 | rodata_resource.start = __pa_symbol(__start_rodata); |
829 | rodata_resource.end = __pa_symbol(__end_rodata)-1; |
830 | data_resource.start = __pa_symbol(_sdata); |
831 | data_resource.end = __pa_symbol(_edata)-1; |
832 | bss_resource.start = __pa_symbol(__bss_start); |
833 | bss_resource.end = __pa_symbol(__bss_stop)-1; |
834 | |
835 | #ifdef CONFIG_CMDLINE_BOOL |
836 | #ifdef CONFIG_CMDLINE_OVERRIDE |
837 | strscpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE); |
838 | #else |
839 | if (builtin_cmdline[0]) { |
840 | /* append boot loader cmdline to builtin */ |
841 | strlcat(p: builtin_cmdline, q: " " , COMMAND_LINE_SIZE); |
842 | strlcat(p: builtin_cmdline, q: boot_command_line, COMMAND_LINE_SIZE); |
843 | strscpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE); |
844 | } |
845 | #endif |
846 | #endif |
847 | |
848 | strscpy(command_line, boot_command_line, COMMAND_LINE_SIZE); |
849 | *cmdline_p = command_line; |
850 | |
851 | /* |
852 | * x86_configure_nx() is called before parse_early_param() to detect |
853 | * whether hardware doesn't support NX (so that the early EHCI debug |
854 | * console setup can safely call set_fixmap()). |
855 | */ |
856 | x86_configure_nx(); |
857 | |
858 | parse_early_param(); |
859 | |
860 | if (efi_enabled(EFI_BOOT)) |
861 | efi_memblock_x86_reserve_range(); |
862 | |
863 | #ifdef CONFIG_MEMORY_HOTPLUG |
864 | /* |
865 | * Memory used by the kernel cannot be hot-removed because Linux |
866 | * cannot migrate the kernel pages. When memory hotplug is |
867 | * enabled, we should prevent memblock from allocating memory |
868 | * for the kernel. |
869 | * |
870 | * ACPI SRAT records all hotpluggable memory ranges. But before |
871 | * SRAT is parsed, we don't know about it. |
872 | * |
873 | * The kernel image is loaded into memory at very early time. We |
874 | * cannot prevent this anyway. So on NUMA system, we set any |
875 | * node the kernel resides in as un-hotpluggable. |
876 | * |
877 | * Since on modern servers, one node could have double-digit |
878 | * gigabytes memory, we can assume the memory around the kernel |
879 | * image is also un-hotpluggable. So before SRAT is parsed, just |
880 | * allocate memory near the kernel image to try the best to keep |
881 | * the kernel away from hotpluggable memory. |
882 | */ |
883 | if (movable_node_is_enabled()) |
884 | memblock_set_bottom_up(enable: true); |
885 | #endif |
886 | |
887 | x86_report_nx(); |
888 | |
889 | apic_setup_apic_calls(); |
890 | |
891 | if (acpi_mps_check()) { |
892 | #ifdef CONFIG_X86_LOCAL_APIC |
893 | apic_is_disabled = true; |
894 | #endif |
895 | setup_clear_cpu_cap(X86_FEATURE_APIC); |
896 | } |
897 | |
898 | e820__reserve_setup_data(); |
899 | e820__finish_early_params(); |
900 | |
901 | if (efi_enabled(EFI_BOOT)) |
902 | efi_init(); |
903 | |
904 | reserve_ibft_region(); |
905 | x86_init.resources.dmi_setup(); |
906 | |
907 | /* |
908 | * VMware detection requires dmi to be available, so this |
909 | * needs to be done after dmi_setup(), for the boot CPU. |
910 | * For some guest types (Xen PV, SEV-SNP, TDX) it is required to be |
911 | * called before cache_bp_init() for setting up MTRR state. |
912 | */ |
913 | init_hypervisor_platform(); |
914 | |
915 | tsc_early_init(); |
916 | x86_init.resources.probe_roms(); |
917 | |
918 | /* after parse_early_param, so could debug it */ |
919 | insert_resource(parent: &iomem_resource, new: &code_resource); |
920 | insert_resource(parent: &iomem_resource, new: &rodata_resource); |
921 | insert_resource(parent: &iomem_resource, new: &data_resource); |
922 | insert_resource(parent: &iomem_resource, new: &bss_resource); |
923 | |
924 | e820_add_kernel_range(); |
925 | trim_bios_range(); |
926 | #ifdef CONFIG_X86_32 |
927 | if (ppro_with_ram_bug()) { |
928 | e820__range_update(0x70000000ULL, 0x40000ULL, E820_TYPE_RAM, |
929 | E820_TYPE_RESERVED); |
930 | e820__update_table(e820_table); |
931 | printk(KERN_INFO "fixed physical RAM map:\n" ); |
932 | e820__print_table("bad_ppro" ); |
933 | } |
934 | #else |
935 | early_gart_iommu_check(); |
936 | #endif |
937 | |
938 | /* |
939 | * partially used pages are not usable - thus |
940 | * we are rounding upwards: |
941 | */ |
942 | max_pfn = e820__end_of_ram_pfn(); |
943 | |
944 | /* update e820 for memory not covered by WB MTRRs */ |
945 | cache_bp_init(); |
946 | if (mtrr_trim_uncached_memory(end_pfn: max_pfn)) |
947 | max_pfn = e820__end_of_ram_pfn(); |
948 | |
949 | max_possible_pfn = max_pfn; |
950 | |
951 | /* |
952 | * Define random base addresses for memory sections after max_pfn is |
953 | * defined and before each memory section base is used. |
954 | */ |
955 | kernel_randomize_memory(); |
956 | |
957 | #ifdef CONFIG_X86_32 |
958 | /* max_low_pfn get updated here */ |
959 | find_low_pfn_range(); |
960 | #else |
961 | check_x2apic(); |
962 | |
963 | /* How many end-of-memory variables you have, grandma! */ |
964 | /* need this before calling reserve_initrd */ |
965 | if (max_pfn > (1UL<<(32 - PAGE_SHIFT))) |
966 | max_low_pfn = e820__end_of_low_ram_pfn(); |
967 | else |
968 | max_low_pfn = max_pfn; |
969 | |
970 | high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1; |
971 | #endif |
972 | |
973 | /* Find and reserve MPTABLE area */ |
974 | x86_init.mpparse.find_mptable(); |
975 | |
976 | early_alloc_pgt_buf(); |
977 | |
978 | /* |
979 | * Need to conclude brk, before e820__memblock_setup() |
980 | * it could use memblock_find_in_range, could overlap with |
981 | * brk area. |
982 | */ |
983 | reserve_brk(); |
984 | |
985 | cleanup_highmap(); |
986 | |
987 | memblock_set_current_limit(ISA_END_ADDRESS); |
988 | e820__memblock_setup(); |
989 | |
990 | /* |
991 | * Needs to run after memblock setup because it needs the physical |
992 | * memory size. |
993 | */ |
994 | mem_encrypt_setup_arch(); |
995 | cc_random_init(); |
996 | |
997 | efi_fake_memmap(); |
998 | efi_find_mirror(); |
999 | efi_esrt_init(); |
1000 | efi_mokvar_table_init(); |
1001 | |
1002 | /* |
1003 | * The EFI specification says that boot service code won't be |
1004 | * called after ExitBootServices(). This is, in fact, a lie. |
1005 | */ |
1006 | efi_reserve_boot_services(); |
1007 | |
1008 | /* preallocate 4k for mptable mpc */ |
1009 | e820__memblock_alloc_reserved_mpc_new(); |
1010 | |
1011 | #ifdef CONFIG_X86_CHECK_BIOS_CORRUPTION |
1012 | setup_bios_corruption_check(); |
1013 | #endif |
1014 | |
1015 | #ifdef CONFIG_X86_32 |
1016 | printk(KERN_DEBUG "initial memory mapped: [mem 0x00000000-%#010lx]\n" , |
1017 | (max_pfn_mapped<<PAGE_SHIFT) - 1); |
1018 | #endif |
1019 | |
1020 | /* |
1021 | * Find free memory for the real mode trampoline and place it there. If |
1022 | * there is not enough free memory under 1M, on EFI-enabled systems |
1023 | * there will be additional attempt to reclaim the memory for the real |
1024 | * mode trampoline at efi_free_boot_services(). |
1025 | * |
1026 | * Unconditionally reserve the entire first 1M of RAM because BIOSes |
1027 | * are known to corrupt low memory and several hundred kilobytes are not |
1028 | * worth complex detection what memory gets clobbered. Windows does the |
1029 | * same thing for very similar reasons. |
1030 | * |
1031 | * Moreover, on machines with SandyBridge graphics or in setups that use |
1032 | * crashkernel the entire 1M is reserved anyway. |
1033 | * |
1034 | * Note the host kernel TDX also requires the first 1MB being reserved. |
1035 | */ |
1036 | x86_platform.realmode_reserve(); |
1037 | |
1038 | init_mem_mapping(); |
1039 | |
1040 | idt_setup_early_pf(); |
1041 | |
1042 | /* |
1043 | * Update mmu_cr4_features (and, indirectly, trampoline_cr4_features) |
1044 | * with the current CR4 value. This may not be necessary, but |
1045 | * auditing all the early-boot CR4 manipulation would be needed to |
1046 | * rule it out. |
1047 | * |
1048 | * Mask off features that don't work outside long mode (just |
1049 | * PCIDE for now). |
1050 | */ |
1051 | mmu_cr4_features = __read_cr4() & ~X86_CR4_PCIDE; |
1052 | |
1053 | memblock_set_current_limit(limit: get_max_mapped()); |
1054 | |
1055 | /* |
1056 | * NOTE: On x86-32, only from this point on, fixmaps are ready for use. |
1057 | */ |
1058 | |
1059 | #ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT |
1060 | if (init_ohci1394_dma_early) |
1061 | init_ohci1394_dma_on_all_controllers(); |
1062 | #endif |
1063 | /* Allocate bigger log buffer */ |
1064 | setup_log_buf(1); |
1065 | |
1066 | if (efi_enabled(EFI_BOOT)) { |
1067 | switch (boot_params.secure_boot) { |
1068 | case efi_secureboot_mode_disabled: |
1069 | pr_info("Secure boot disabled\n" ); |
1070 | break; |
1071 | case efi_secureboot_mode_enabled: |
1072 | pr_info("Secure boot enabled\n" ); |
1073 | break; |
1074 | default: |
1075 | pr_info("Secure boot could not be determined\n" ); |
1076 | break; |
1077 | } |
1078 | } |
1079 | |
1080 | reserve_initrd(); |
1081 | |
1082 | acpi_table_upgrade(); |
1083 | /* Look for ACPI tables and reserve memory occupied by them. */ |
1084 | acpi_boot_table_init(); |
1085 | |
1086 | vsmp_init(); |
1087 | |
1088 | io_delay_init(); |
1089 | |
1090 | early_platform_quirks(); |
1091 | |
1092 | /* Some platforms need the APIC registered for NUMA configuration */ |
1093 | early_acpi_boot_init(); |
1094 | x86_init.mpparse.early_parse_smp_cfg(); |
1095 | |
1096 | x86_flattree_get_config(); |
1097 | |
1098 | initmem_init(); |
1099 | dma_contiguous_reserve(addr_limit: max_pfn_mapped << PAGE_SHIFT); |
1100 | |
1101 | if (boot_cpu_has(X86_FEATURE_GBPAGES)) |
1102 | hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT); |
1103 | |
1104 | /* |
1105 | * Reserve memory for crash kernel after SRAT is parsed so that it |
1106 | * won't consume hotpluggable memory. |
1107 | */ |
1108 | arch_reserve_crashkernel(); |
1109 | |
1110 | memblock_find_dma_reserve(); |
1111 | |
1112 | if (!early_xdbc_setup_hardware()) |
1113 | early_xdbc_register_console(); |
1114 | |
1115 | x86_init.paging.pagetable_init(); |
1116 | |
1117 | kasan_init(); |
1118 | |
1119 | /* |
1120 | * Sync back kernel address range. |
1121 | * |
1122 | * FIXME: Can the later sync in setup_cpu_entry_areas() replace |
1123 | * this call? |
1124 | */ |
1125 | sync_initial_page_table(); |
1126 | |
1127 | tboot_probe(); |
1128 | |
1129 | map_vsyscall(); |
1130 | |
1131 | x86_32_probe_apic(); |
1132 | |
1133 | early_quirks(); |
1134 | |
1135 | topology_apply_cmdline_limits_early(); |
1136 | |
1137 | /* |
1138 | * Parse SMP configuration. Try ACPI first and then the platform |
1139 | * specific parser. |
1140 | */ |
1141 | acpi_boot_init(); |
1142 | x86_init.mpparse.parse_smp_cfg(); |
1143 | |
1144 | /* Last opportunity to detect and map the local APIC */ |
1145 | init_apic_mappings(); |
1146 | |
1147 | topology_init_possible_cpus(); |
1148 | |
1149 | init_cpu_to_node(); |
1150 | init_gi_nodes(); |
1151 | |
1152 | io_apic_init_mappings(); |
1153 | |
1154 | x86_init.hyper.guest_late_init(); |
1155 | |
1156 | e820__reserve_resources(); |
1157 | e820__register_nosave_regions(limit_pfn: max_pfn); |
1158 | |
1159 | x86_init.resources.reserve_resources(); |
1160 | |
1161 | e820__setup_pci_gap(); |
1162 | |
1163 | #ifdef CONFIG_VT |
1164 | #if defined(CONFIG_VGA_CONSOLE) |
1165 | if (!efi_enabled(EFI_BOOT) || (efi_mem_type(phys_addr: 0xa0000) != EFI_CONVENTIONAL_MEMORY)) |
1166 | vgacon_register_screen(si: &screen_info); |
1167 | #endif |
1168 | #endif |
1169 | x86_init.oem.banner(); |
1170 | |
1171 | x86_init.timers.wallclock_init(); |
1172 | |
1173 | /* |
1174 | * This needs to run before setup_local_APIC() which soft-disables the |
1175 | * local APIC temporarily and that masks the thermal LVT interrupt, |
1176 | * leading to softlockups on machines which have configured SMI |
1177 | * interrupt delivery. |
1178 | */ |
1179 | therm_lvt_init(); |
1180 | |
1181 | mcheck_init(); |
1182 | |
1183 | register_refined_jiffies(CLOCK_TICK_RATE); |
1184 | |
1185 | #ifdef CONFIG_EFI |
1186 | if (efi_enabled(EFI_BOOT)) |
1187 | efi_apply_memmap_quirks(); |
1188 | #endif |
1189 | |
1190 | unwind_init(); |
1191 | } |
1192 | |
1193 | #ifdef CONFIG_X86_32 |
1194 | |
1195 | static struct resource video_ram_resource = { |
1196 | .name = "Video RAM area" , |
1197 | .start = 0xa0000, |
1198 | .end = 0xbffff, |
1199 | .flags = IORESOURCE_BUSY | IORESOURCE_MEM |
1200 | }; |
1201 | |
1202 | void __init i386_reserve_resources(void) |
1203 | { |
1204 | request_resource(&iomem_resource, &video_ram_resource); |
1205 | reserve_standard_io_resources(); |
1206 | } |
1207 | |
1208 | #endif /* CONFIG_X86_32 */ |
1209 | |
1210 | static struct notifier_block kernel_offset_notifier = { |
1211 | .notifier_call = dump_kernel_offset |
1212 | }; |
1213 | |
1214 | static int __init register_kernel_offset_dumper(void) |
1215 | { |
1216 | atomic_notifier_chain_register(nh: &panic_notifier_list, |
1217 | nb: &kernel_offset_notifier); |
1218 | return 0; |
1219 | } |
1220 | __initcall(register_kernel_offset_dumper); |
1221 | |