1 | /* |
2 | * Copyright (C) 1995 Linus Torvalds |
3 | * |
4 | * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 |
5 | * |
6 | * Memory region support |
7 | * David Parsons <orc@pell.chi.il.us>, July-August 1999 |
8 | * |
9 | * Added E820 sanitization routine (removes overlapping memory regions); |
10 | * Brian Moyle <bmoyle@mvista.com>, February 2001 |
11 | * |
12 | * Moved CPU detection code to cpu/${cpu}.c |
13 | * Patrick Mochel <mochel@osdl.org>, March 2002 |
14 | * |
15 | * Provisions for empty E820 memory regions (reported by certain BIOSes). |
16 | * Alex Achenbach <xela@slit.de>, December 2002. |
17 | * |
18 | */ |
19 | |
20 | /* |
21 | * This file handles the architecture-dependent parts of initialization |
22 | */ |
23 | |
24 | #include <linux/sched.h> |
25 | #include <linux/mm.h> |
26 | #include <linux/mmzone.h> |
27 | #include <linux/screen_info.h> |
28 | #include <linux/ioport.h> |
29 | #include <linux/acpi.h> |
30 | #include <linux/sfi.h> |
31 | #include <linux/apm_bios.h> |
32 | #include <linux/initrd.h> |
33 | #include <linux/memblock.h> |
34 | #include <linux/seq_file.h> |
35 | #include <linux/console.h> |
36 | #include <linux/root_dev.h> |
37 | #include <linux/highmem.h> |
38 | #include <linux/export.h> |
39 | #include <linux/efi.h> |
40 | #include <linux/init.h> |
41 | #include <linux/edd.h> |
42 | #include <linux/iscsi_ibft.h> |
43 | #include <linux/nodemask.h> |
44 | #include <linux/kexec.h> |
45 | #include <linux/dmi.h> |
46 | #include <linux/pfn.h> |
47 | #include <linux/pci.h> |
48 | #include <asm/pci-direct.h> |
49 | #include <linux/init_ohci1394_dma.h> |
50 | #include <linux/kvm_para.h> |
51 | #include <linux/dma-contiguous.h> |
52 | #include <xen/xen.h> |
53 | #include <uapi/linux/mount.h> |
54 | |
55 | #include <linux/errno.h> |
56 | #include <linux/kernel.h> |
57 | #include <linux/stddef.h> |
58 | #include <linux/unistd.h> |
59 | #include <linux/ptrace.h> |
60 | #include <linux/user.h> |
61 | #include <linux/delay.h> |
62 | |
63 | #include <linux/kallsyms.h> |
64 | #include <linux/cpufreq.h> |
65 | #include <linux/dma-mapping.h> |
66 | #include <linux/ctype.h> |
67 | #include <linux/uaccess.h> |
68 | |
69 | #include <linux/percpu.h> |
70 | #include <linux/crash_dump.h> |
71 | #include <linux/tboot.h> |
72 | #include <linux/jiffies.h> |
73 | #include <linux/mem_encrypt.h> |
74 | |
75 | #include <linux/usb/xhci-dbgp.h> |
76 | #include <video/edid.h> |
77 | |
78 | #include <asm/mtrr.h> |
79 | #include <asm/apic.h> |
80 | #include <asm/realmode.h> |
81 | #include <asm/e820/api.h> |
82 | #include <asm/mpspec.h> |
83 | #include <asm/setup.h> |
84 | #include <asm/efi.h> |
85 | #include <asm/timer.h> |
86 | #include <asm/i8259.h> |
87 | #include <asm/sections.h> |
88 | #include <asm/io_apic.h> |
89 | #include <asm/ist.h> |
90 | #include <asm/setup_arch.h> |
91 | #include <asm/bios_ebda.h> |
92 | #include <asm/cacheflush.h> |
93 | #include <asm/processor.h> |
94 | #include <asm/bugs.h> |
95 | #include <asm/kasan.h> |
96 | |
97 | #include <asm/vsyscall.h> |
98 | #include <asm/cpu.h> |
99 | #include <asm/desc.h> |
100 | #include <asm/dma.h> |
101 | #include <asm/iommu.h> |
102 | #include <asm/gart.h> |
103 | #include <asm/mmu_context.h> |
104 | #include <asm/proto.h> |
105 | |
106 | #include <asm/paravirt.h> |
107 | #include <asm/hypervisor.h> |
108 | #include <asm/olpc_ofw.h> |
109 | |
110 | #include <asm/percpu.h> |
111 | #include <asm/topology.h> |
112 | #include <asm/apicdef.h> |
113 | #include <asm/amd_nb.h> |
114 | #include <asm/mce.h> |
115 | #include <asm/alternative.h> |
116 | #include <asm/prom.h> |
117 | #include <asm/microcode.h> |
118 | #include <asm/kaslr.h> |
119 | #include <asm/unwind.h> |
120 | |
121 | /* |
122 | * max_low_pfn_mapped: highest direct mapped pfn under 4GB |
123 | * max_pfn_mapped: highest direct mapped pfn over 4GB |
124 | * |
125 | * The direct mapping only covers E820_TYPE_RAM regions, so the ranges and gaps are |
126 | * represented by pfn_mapped |
127 | */ |
128 | unsigned long max_low_pfn_mapped; |
129 | unsigned long max_pfn_mapped; |
130 | |
131 | #ifdef CONFIG_DMI |
132 | RESERVE_BRK(dmi_alloc, 65536); |
133 | #endif |
134 | |
135 | |
136 | static __initdata unsigned long _brk_start = (unsigned long)__brk_base; |
137 | unsigned long _brk_end = (unsigned long)__brk_base; |
138 | |
139 | struct boot_params boot_params; |
140 | |
141 | /* |
142 | * Machine setup.. |
143 | */ |
144 | static struct resource data_resource = { |
145 | .name = "Kernel data" , |
146 | .start = 0, |
147 | .end = 0, |
148 | .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM |
149 | }; |
150 | |
151 | static struct resource code_resource = { |
152 | .name = "Kernel code" , |
153 | .start = 0, |
154 | .end = 0, |
155 | .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM |
156 | }; |
157 | |
158 | static struct resource bss_resource = { |
159 | .name = "Kernel bss" , |
160 | .start = 0, |
161 | .end = 0, |
162 | .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM |
163 | }; |
164 | |
165 | |
166 | #ifdef CONFIG_X86_32 |
167 | /* cpu data as detected by the assembly code in head_32.S */ |
168 | struct cpuinfo_x86 new_cpu_data; |
169 | |
170 | /* common cpu data for all cpus */ |
171 | struct cpuinfo_x86 boot_cpu_data __read_mostly; |
172 | EXPORT_SYMBOL(boot_cpu_data); |
173 | |
174 | unsigned int def_to_bigsmp; |
175 | |
176 | /* for MCA, but anyone else can use it if they want */ |
177 | unsigned int machine_id; |
178 | unsigned int machine_submodel_id; |
179 | unsigned int BIOS_revision; |
180 | |
181 | struct apm_info apm_info; |
182 | EXPORT_SYMBOL(apm_info); |
183 | |
184 | #if defined(CONFIG_X86_SPEEDSTEP_SMI) || \ |
185 | defined(CONFIG_X86_SPEEDSTEP_SMI_MODULE) |
186 | struct ist_info ist_info; |
187 | EXPORT_SYMBOL(ist_info); |
188 | #else |
189 | struct ist_info ist_info; |
190 | #endif |
191 | |
192 | #else |
193 | struct cpuinfo_x86 boot_cpu_data __read_mostly; |
194 | EXPORT_SYMBOL(boot_cpu_data); |
195 | #endif |
196 | |
197 | |
198 | #if !defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64) |
199 | __visible unsigned long mmu_cr4_features __ro_after_init; |
200 | #else |
201 | __visible unsigned long mmu_cr4_features __ro_after_init = X86_CR4_PAE; |
202 | #endif |
203 | |
204 | /* Boot loader ID and version as integers, for the benefit of proc_dointvec */ |
205 | int bootloader_type, bootloader_version; |
206 | |
207 | /* |
208 | * Setup options |
209 | */ |
210 | struct screen_info screen_info; |
211 | EXPORT_SYMBOL(screen_info); |
212 | struct edid_info edid_info; |
213 | EXPORT_SYMBOL_GPL(edid_info); |
214 | |
215 | extern int root_mountflags; |
216 | |
217 | unsigned long saved_video_mode; |
218 | |
219 | #define RAMDISK_IMAGE_START_MASK 0x07FF |
220 | #define RAMDISK_PROMPT_FLAG 0x8000 |
221 | #define RAMDISK_LOAD_FLAG 0x4000 |
222 | |
223 | static char __initdata command_line[COMMAND_LINE_SIZE]; |
224 | #ifdef CONFIG_CMDLINE_BOOL |
225 | static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE; |
226 | #endif |
227 | |
228 | #if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE) |
229 | struct edd edd; |
230 | #ifdef CONFIG_EDD_MODULE |
231 | EXPORT_SYMBOL(edd); |
232 | #endif |
233 | /** |
234 | * copy_edd() - Copy the BIOS EDD information |
235 | * from boot_params into a safe place. |
236 | * |
237 | */ |
238 | static inline void __init copy_edd(void) |
239 | { |
240 | memcpy(edd.mbr_signature, boot_params.edd_mbr_sig_buffer, |
241 | sizeof(edd.mbr_signature)); |
242 | memcpy(edd.edd_info, boot_params.eddbuf, sizeof(edd.edd_info)); |
243 | edd.mbr_signature_nr = boot_params.edd_mbr_sig_buf_entries; |
244 | edd.edd_info_nr = boot_params.eddbuf_entries; |
245 | } |
246 | #else |
247 | static inline void __init copy_edd(void) |
248 | { |
249 | } |
250 | #endif |
251 | |
252 | void * __init extend_brk(size_t size, size_t align) |
253 | { |
254 | size_t mask = align - 1; |
255 | void *ret; |
256 | |
257 | BUG_ON(_brk_start == 0); |
258 | BUG_ON(align & mask); |
259 | |
260 | _brk_end = (_brk_end + mask) & ~mask; |
261 | BUG_ON((char *)(_brk_end + size) > __brk_limit); |
262 | |
263 | ret = (void *)_brk_end; |
264 | _brk_end += size; |
265 | |
266 | memset(ret, 0, size); |
267 | |
268 | return ret; |
269 | } |
270 | |
271 | #ifdef CONFIG_X86_32 |
272 | static void __init cleanup_highmap(void) |
273 | { |
274 | } |
275 | #endif |
276 | |
277 | static void __init reserve_brk(void) |
278 | { |
279 | if (_brk_end > _brk_start) |
280 | memblock_reserve(__pa_symbol(_brk_start), |
281 | _brk_end - _brk_start); |
282 | |
283 | /* Mark brk area as locked down and no longer taking any |
284 | new allocations */ |
285 | _brk_start = 0; |
286 | } |
287 | |
288 | u64 relocated_ramdisk; |
289 | |
290 | #ifdef CONFIG_BLK_DEV_INITRD |
291 | |
292 | static u64 __init get_ramdisk_image(void) |
293 | { |
294 | u64 ramdisk_image = boot_params.hdr.ramdisk_image; |
295 | |
296 | ramdisk_image |= (u64)boot_params.ext_ramdisk_image << 32; |
297 | |
298 | return ramdisk_image; |
299 | } |
300 | static u64 __init get_ramdisk_size(void) |
301 | { |
302 | u64 ramdisk_size = boot_params.hdr.ramdisk_size; |
303 | |
304 | ramdisk_size |= (u64)boot_params.ext_ramdisk_size << 32; |
305 | |
306 | return ramdisk_size; |
307 | } |
308 | |
309 | static void __init relocate_initrd(void) |
310 | { |
311 | /* Assume only end is not page aligned */ |
312 | u64 ramdisk_image = get_ramdisk_image(); |
313 | u64 ramdisk_size = get_ramdisk_size(); |
314 | u64 area_size = PAGE_ALIGN(ramdisk_size); |
315 | |
316 | /* We need to move the initrd down into directly mapped mem */ |
317 | relocated_ramdisk = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped), |
318 | area_size, PAGE_SIZE); |
319 | |
320 | if (!relocated_ramdisk) |
321 | panic("Cannot find place for new RAMDISK of size %lld\n" , |
322 | ramdisk_size); |
323 | |
324 | /* Note: this includes all the mem currently occupied by |
325 | the initrd, we rely on that fact to keep the data intact. */ |
326 | memblock_reserve(relocated_ramdisk, area_size); |
327 | initrd_start = relocated_ramdisk + PAGE_OFFSET; |
328 | initrd_end = initrd_start + ramdisk_size; |
329 | printk(KERN_INFO "Allocated new RAMDISK: [mem %#010llx-%#010llx]\n" , |
330 | relocated_ramdisk, relocated_ramdisk + ramdisk_size - 1); |
331 | |
332 | copy_from_early_mem((void *)initrd_start, ramdisk_image, ramdisk_size); |
333 | |
334 | printk(KERN_INFO "Move RAMDISK from [mem %#010llx-%#010llx] to" |
335 | " [mem %#010llx-%#010llx]\n" , |
336 | ramdisk_image, ramdisk_image + ramdisk_size - 1, |
337 | relocated_ramdisk, relocated_ramdisk + ramdisk_size - 1); |
338 | } |
339 | |
340 | static void __init early_reserve_initrd(void) |
341 | { |
342 | /* Assume only end is not page aligned */ |
343 | u64 ramdisk_image = get_ramdisk_image(); |
344 | u64 ramdisk_size = get_ramdisk_size(); |
345 | u64 ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size); |
346 | |
347 | if (!boot_params.hdr.type_of_loader || |
348 | !ramdisk_image || !ramdisk_size) |
349 | return; /* No initrd provided by bootloader */ |
350 | |
351 | memblock_reserve(ramdisk_image, ramdisk_end - ramdisk_image); |
352 | } |
353 | static void __init reserve_initrd(void) |
354 | { |
355 | /* Assume only end is not page aligned */ |
356 | u64 ramdisk_image = get_ramdisk_image(); |
357 | u64 ramdisk_size = get_ramdisk_size(); |
358 | u64 ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size); |
359 | u64 mapped_size; |
360 | |
361 | if (!boot_params.hdr.type_of_loader || |
362 | !ramdisk_image || !ramdisk_size) |
363 | return; /* No initrd provided by bootloader */ |
364 | |
365 | initrd_start = 0; |
366 | |
367 | mapped_size = memblock_mem_size(max_pfn_mapped); |
368 | if (ramdisk_size >= (mapped_size>>1)) |
369 | panic("initrd too large to handle, " |
370 | "disabling initrd (%lld needed, %lld available)\n" , |
371 | ramdisk_size, mapped_size>>1); |
372 | |
373 | printk(KERN_INFO "RAMDISK: [mem %#010llx-%#010llx]\n" , ramdisk_image, |
374 | ramdisk_end - 1); |
375 | |
376 | if (pfn_range_is_mapped(PFN_DOWN(ramdisk_image), |
377 | PFN_DOWN(ramdisk_end))) { |
378 | /* All are mapped, easy case */ |
379 | initrd_start = ramdisk_image + PAGE_OFFSET; |
380 | initrd_end = initrd_start + ramdisk_size; |
381 | return; |
382 | } |
383 | |
384 | relocate_initrd(); |
385 | |
386 | memblock_free(ramdisk_image, ramdisk_end - ramdisk_image); |
387 | } |
388 | |
389 | #else |
390 | static void __init early_reserve_initrd(void) |
391 | { |
392 | } |
393 | static void __init reserve_initrd(void) |
394 | { |
395 | } |
396 | #endif /* CONFIG_BLK_DEV_INITRD */ |
397 | |
398 | static void __init parse_setup_data(void) |
399 | { |
400 | struct setup_data *data; |
401 | u64 pa_data, pa_next; |
402 | |
403 | pa_data = boot_params.hdr.setup_data; |
404 | while (pa_data) { |
405 | u32 data_len, data_type; |
406 | |
407 | data = early_memremap(pa_data, sizeof(*data)); |
408 | data_len = data->len + sizeof(struct setup_data); |
409 | data_type = data->type; |
410 | pa_next = data->next; |
411 | early_memunmap(data, sizeof(*data)); |
412 | |
413 | switch (data_type) { |
414 | case SETUP_E820_EXT: |
415 | e820__memory_setup_extended(pa_data, data_len); |
416 | break; |
417 | case SETUP_DTB: |
418 | add_dtb(pa_data); |
419 | break; |
420 | case SETUP_EFI: |
421 | parse_efi_setup(pa_data, data_len); |
422 | break; |
423 | default: |
424 | break; |
425 | } |
426 | pa_data = pa_next; |
427 | } |
428 | } |
429 | |
430 | static void __init memblock_x86_reserve_range_setup_data(void) |
431 | { |
432 | struct setup_data *data; |
433 | u64 pa_data; |
434 | |
435 | pa_data = boot_params.hdr.setup_data; |
436 | while (pa_data) { |
437 | data = early_memremap(pa_data, sizeof(*data)); |
438 | memblock_reserve(pa_data, sizeof(*data) + data->len); |
439 | pa_data = data->next; |
440 | early_memunmap(data, sizeof(*data)); |
441 | } |
442 | } |
443 | |
444 | /* |
445 | * --------- Crashkernel reservation ------------------------------ |
446 | */ |
447 | |
448 | #ifdef CONFIG_KEXEC_CORE |
449 | |
450 | /* 16M alignment for crash kernel regions */ |
451 | #define CRASH_ALIGN (16 << 20) |
452 | |
453 | /* |
454 | * Keep the crash kernel below this limit. On 32 bits earlier kernels |
455 | * would limit the kernel to the low 512 MiB due to mapping restrictions. |
456 | * On 64bit, old kexec-tools need to under 896MiB. |
457 | */ |
458 | #ifdef CONFIG_X86_32 |
459 | # define CRASH_ADDR_LOW_MAX (512 << 20) |
460 | # define CRASH_ADDR_HIGH_MAX (512 << 20) |
461 | #else |
462 | # define CRASH_ADDR_LOW_MAX (896UL << 20) |
463 | # define CRASH_ADDR_HIGH_MAX MAXMEM |
464 | #endif |
465 | |
466 | static int __init reserve_crashkernel_low(void) |
467 | { |
468 | #ifdef CONFIG_X86_64 |
469 | unsigned long long base, low_base = 0, low_size = 0; |
470 | unsigned long total_low_mem; |
471 | int ret; |
472 | |
473 | total_low_mem = memblock_mem_size(1UL << (32 - PAGE_SHIFT)); |
474 | |
475 | /* crashkernel=Y,low */ |
476 | ret = parse_crashkernel_low(boot_command_line, total_low_mem, &low_size, &base); |
477 | if (ret) { |
478 | /* |
479 | * two parts from lib/swiotlb.c: |
480 | * -swiotlb size: user-specified with swiotlb= or default. |
481 | * |
482 | * -swiotlb overflow buffer: now hardcoded to 32k. We round it |
483 | * to 8M for other buffers that may need to stay low too. Also |
484 | * make sure we allocate enough extra low memory so that we |
485 | * don't run out of DMA buffers for 32-bit devices. |
486 | */ |
487 | low_size = max(swiotlb_size_or_default() + (8UL << 20), 256UL << 20); |
488 | } else { |
489 | /* passed with crashkernel=0,low ? */ |
490 | if (!low_size) |
491 | return 0; |
492 | } |
493 | |
494 | low_base = memblock_find_in_range(0, 1ULL << 32, low_size, CRASH_ALIGN); |
495 | if (!low_base) { |
496 | pr_err("Cannot reserve %ldMB crashkernel low memory, please try smaller size.\n" , |
497 | (unsigned long)(low_size >> 20)); |
498 | return -ENOMEM; |
499 | } |
500 | |
501 | ret = memblock_reserve(low_base, low_size); |
502 | if (ret) { |
503 | pr_err("%s: Error reserving crashkernel low memblock.\n" , __func__); |
504 | return ret; |
505 | } |
506 | |
507 | pr_info("Reserving %ldMB of low memory at %ldMB for crashkernel (System low RAM: %ldMB)\n" , |
508 | (unsigned long)(low_size >> 20), |
509 | (unsigned long)(low_base >> 20), |
510 | (unsigned long)(total_low_mem >> 20)); |
511 | |
512 | crashk_low_res.start = low_base; |
513 | crashk_low_res.end = low_base + low_size - 1; |
514 | insert_resource(&iomem_resource, &crashk_low_res); |
515 | #endif |
516 | return 0; |
517 | } |
518 | |
519 | static void __init reserve_crashkernel(void) |
520 | { |
521 | unsigned long long crash_size, crash_base, total_mem; |
522 | bool high = false; |
523 | int ret; |
524 | |
525 | total_mem = memblock_phys_mem_size(); |
526 | |
527 | /* crashkernel=XM */ |
528 | ret = parse_crashkernel(boot_command_line, total_mem, &crash_size, &crash_base); |
529 | if (ret != 0 || crash_size <= 0) { |
530 | /* crashkernel=X,high */ |
531 | ret = parse_crashkernel_high(boot_command_line, total_mem, |
532 | &crash_size, &crash_base); |
533 | if (ret != 0 || crash_size <= 0) |
534 | return; |
535 | high = true; |
536 | } |
537 | |
538 | if (xen_pv_domain()) { |
539 | pr_info("Ignoring crashkernel for a Xen PV domain\n" ); |
540 | return; |
541 | } |
542 | |
543 | /* 0 means: find the address automatically */ |
544 | if (crash_base <= 0) { |
545 | /* |
546 | * Set CRASH_ADDR_LOW_MAX upper bound for crash memory, |
547 | * as old kexec-tools loads bzImage below that, unless |
548 | * "crashkernel=size[KMG],high" is specified. |
549 | */ |
550 | crash_base = memblock_find_in_range(CRASH_ALIGN, |
551 | high ? CRASH_ADDR_HIGH_MAX |
552 | : CRASH_ADDR_LOW_MAX, |
553 | crash_size, CRASH_ALIGN); |
554 | if (!crash_base) { |
555 | pr_info("crashkernel reservation failed - No suitable area found.\n" ); |
556 | return; |
557 | } |
558 | |
559 | } else { |
560 | unsigned long long start; |
561 | |
562 | start = memblock_find_in_range(crash_base, |
563 | crash_base + crash_size, |
564 | crash_size, 1 << 20); |
565 | if (start != crash_base) { |
566 | pr_info("crashkernel reservation failed - memory is in use.\n" ); |
567 | return; |
568 | } |
569 | } |
570 | ret = memblock_reserve(crash_base, crash_size); |
571 | if (ret) { |
572 | pr_err("%s: Error reserving crashkernel memblock.\n" , __func__); |
573 | return; |
574 | } |
575 | |
576 | if (crash_base >= (1ULL << 32) && reserve_crashkernel_low()) { |
577 | memblock_free(crash_base, crash_size); |
578 | return; |
579 | } |
580 | |
581 | pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n" , |
582 | (unsigned long)(crash_size >> 20), |
583 | (unsigned long)(crash_base >> 20), |
584 | (unsigned long)(total_mem >> 20)); |
585 | |
586 | crashk_res.start = crash_base; |
587 | crashk_res.end = crash_base + crash_size - 1; |
588 | insert_resource(&iomem_resource, &crashk_res); |
589 | } |
590 | #else |
591 | static void __init reserve_crashkernel(void) |
592 | { |
593 | } |
594 | #endif |
595 | |
596 | static struct resource standard_io_resources[] = { |
597 | { .name = "dma1" , .start = 0x00, .end = 0x1f, |
598 | .flags = IORESOURCE_BUSY | IORESOURCE_IO }, |
599 | { .name = "pic1" , .start = 0x20, .end = 0x21, |
600 | .flags = IORESOURCE_BUSY | IORESOURCE_IO }, |
601 | { .name = "timer0" , .start = 0x40, .end = 0x43, |
602 | .flags = IORESOURCE_BUSY | IORESOURCE_IO }, |
603 | { .name = "timer1" , .start = 0x50, .end = 0x53, |
604 | .flags = IORESOURCE_BUSY | IORESOURCE_IO }, |
605 | { .name = "keyboard" , .start = 0x60, .end = 0x60, |
606 | .flags = IORESOURCE_BUSY | IORESOURCE_IO }, |
607 | { .name = "keyboard" , .start = 0x64, .end = 0x64, |
608 | .flags = IORESOURCE_BUSY | IORESOURCE_IO }, |
609 | { .name = "dma page reg" , .start = 0x80, .end = 0x8f, |
610 | .flags = IORESOURCE_BUSY | IORESOURCE_IO }, |
611 | { .name = "pic2" , .start = 0xa0, .end = 0xa1, |
612 | .flags = IORESOURCE_BUSY | IORESOURCE_IO }, |
613 | { .name = "dma2" , .start = 0xc0, .end = 0xdf, |
614 | .flags = IORESOURCE_BUSY | IORESOURCE_IO }, |
615 | { .name = "fpu" , .start = 0xf0, .end = 0xff, |
616 | .flags = IORESOURCE_BUSY | IORESOURCE_IO } |
617 | }; |
618 | |
619 | void __init reserve_standard_io_resources(void) |
620 | { |
621 | int i; |
622 | |
623 | /* request I/O space for devices used on all i[345]86 PCs */ |
624 | for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++) |
625 | request_resource(&ioport_resource, &standard_io_resources[i]); |
626 | |
627 | } |
628 | |
629 | static __init void reserve_ibft_region(void) |
630 | { |
631 | unsigned long addr, size = 0; |
632 | |
633 | addr = find_ibft_region(&size); |
634 | |
635 | if (size) |
636 | memblock_reserve(addr, size); |
637 | } |
638 | |
639 | static bool __init snb_gfx_workaround_needed(void) |
640 | { |
641 | #ifdef CONFIG_PCI |
642 | int i; |
643 | u16 vendor, devid; |
644 | static const __initconst u16 snb_ids[] = { |
645 | 0x0102, |
646 | 0x0112, |
647 | 0x0122, |
648 | 0x0106, |
649 | 0x0116, |
650 | 0x0126, |
651 | 0x010a, |
652 | }; |
653 | |
654 | /* Assume no if something weird is going on with PCI */ |
655 | if (!early_pci_allowed()) |
656 | return false; |
657 | |
658 | vendor = read_pci_config_16(0, 2, 0, PCI_VENDOR_ID); |
659 | if (vendor != 0x8086) |
660 | return false; |
661 | |
662 | devid = read_pci_config_16(0, 2, 0, PCI_DEVICE_ID); |
663 | for (i = 0; i < ARRAY_SIZE(snb_ids); i++) |
664 | if (devid == snb_ids[i]) |
665 | return true; |
666 | #endif |
667 | |
668 | return false; |
669 | } |
670 | |
671 | /* |
672 | * Sandy Bridge graphics has trouble with certain ranges, exclude |
673 | * them from allocation. |
674 | */ |
675 | static void __init trim_snb_memory(void) |
676 | { |
677 | static const __initconst unsigned long bad_pages[] = { |
678 | 0x20050000, |
679 | 0x20110000, |
680 | 0x20130000, |
681 | 0x20138000, |
682 | 0x40004000, |
683 | }; |
684 | int i; |
685 | |
686 | if (!snb_gfx_workaround_needed()) |
687 | return; |
688 | |
689 | printk(KERN_DEBUG "reserving inaccessible SNB gfx pages\n" ); |
690 | |
691 | /* |
692 | * Reserve all memory below the 1 MB mark that has not |
693 | * already been reserved. |
694 | */ |
695 | memblock_reserve(0, 1<<20); |
696 | |
697 | for (i = 0; i < ARRAY_SIZE(bad_pages); i++) { |
698 | if (memblock_reserve(bad_pages[i], PAGE_SIZE)) |
699 | printk(KERN_WARNING "failed to reserve 0x%08lx\n" , |
700 | bad_pages[i]); |
701 | } |
702 | } |
703 | |
704 | /* |
705 | * Here we put platform-specific memory range workarounds, i.e. |
706 | * memory known to be corrupt or otherwise in need to be reserved on |
707 | * specific platforms. |
708 | * |
709 | * If this gets used more widely it could use a real dispatch mechanism. |
710 | */ |
711 | static void __init trim_platform_memory_ranges(void) |
712 | { |
713 | trim_snb_memory(); |
714 | } |
715 | |
716 | static void __init trim_bios_range(void) |
717 | { |
718 | /* |
719 | * A special case is the first 4Kb of memory; |
720 | * This is a BIOS owned area, not kernel ram, but generally |
721 | * not listed as such in the E820 table. |
722 | * |
723 | * This typically reserves additional memory (64KiB by default) |
724 | * since some BIOSes are known to corrupt low memory. See the |
725 | * Kconfig help text for X86_RESERVE_LOW. |
726 | */ |
727 | e820__range_update(0, PAGE_SIZE, E820_TYPE_RAM, E820_TYPE_RESERVED); |
728 | |
729 | /* |
730 | * special case: Some BIOSen report the PC BIOS |
731 | * area (640->1Mb) as ram even though it is not. |
732 | * take them out. |
733 | */ |
734 | e820__range_remove(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_TYPE_RAM, 1); |
735 | |
736 | e820__update_table(e820_table); |
737 | } |
738 | |
739 | /* called before trim_bios_range() to spare extra sanitize */ |
740 | static void __init e820_add_kernel_range(void) |
741 | { |
742 | u64 start = __pa_symbol(_text); |
743 | u64 size = __pa_symbol(_end) - start; |
744 | |
745 | /* |
746 | * Complain if .text .data and .bss are not marked as E820_TYPE_RAM and |
747 | * attempt to fix it by adding the range. We may have a confused BIOS, |
748 | * or the user may have used memmap=exactmap or memmap=xxM$yyM to |
749 | * exclude kernel range. If we really are running on top non-RAM, |
750 | * we will crash later anyways. |
751 | */ |
752 | if (e820__mapped_all(start, start + size, E820_TYPE_RAM)) |
753 | return; |
754 | |
755 | pr_warn(".text .data .bss are not marked as E820_TYPE_RAM!\n" ); |
756 | e820__range_remove(start, size, E820_TYPE_RAM, 0); |
757 | e820__range_add(start, size, E820_TYPE_RAM); |
758 | } |
759 | |
760 | static unsigned reserve_low = CONFIG_X86_RESERVE_LOW << 10; |
761 | |
762 | static int __init parse_reservelow(char *p) |
763 | { |
764 | unsigned long long size; |
765 | |
766 | if (!p) |
767 | return -EINVAL; |
768 | |
769 | size = memparse(p, &p); |
770 | |
771 | if (size < 4096) |
772 | size = 4096; |
773 | |
774 | if (size > 640*1024) |
775 | size = 640*1024; |
776 | |
777 | reserve_low = size; |
778 | |
779 | return 0; |
780 | } |
781 | |
782 | early_param("reservelow" , parse_reservelow); |
783 | |
784 | static void __init trim_low_memory_range(void) |
785 | { |
786 | memblock_reserve(0, ALIGN(reserve_low, PAGE_SIZE)); |
787 | } |
788 | |
789 | /* |
790 | * Dump out kernel offset information on panic. |
791 | */ |
792 | static int |
793 | dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p) |
794 | { |
795 | if (kaslr_enabled()) { |
796 | pr_emerg("Kernel Offset: 0x%lx from 0x%lx (relocation range: 0x%lx-0x%lx)\n" , |
797 | kaslr_offset(), |
798 | __START_KERNEL, |
799 | __START_KERNEL_map, |
800 | MODULES_VADDR-1); |
801 | } else { |
802 | pr_emerg("Kernel Offset: disabled\n" ); |
803 | } |
804 | |
805 | return 0; |
806 | } |
807 | |
808 | /* |
809 | * Determine if we were loaded by an EFI loader. If so, then we have also been |
810 | * passed the efi memmap, systab, etc., so we should use these data structures |
811 | * for initialization. Note, the efi init code path is determined by the |
812 | * global efi_enabled. This allows the same kernel image to be used on existing |
813 | * systems (with a traditional BIOS) as well as on EFI systems. |
814 | */ |
815 | /* |
816 | * setup_arch - architecture-specific boot-time initializations |
817 | * |
818 | * Note: On x86_64, fixmaps are ready for use even before this is called. |
819 | */ |
820 | |
821 | void __init setup_arch(char **cmdline_p) |
822 | { |
823 | memblock_reserve(__pa_symbol(_text), |
824 | (unsigned long)__bss_stop - (unsigned long)_text); |
825 | |
826 | /* |
827 | * Make sure page 0 is always reserved because on systems with |
828 | * L1TF its contents can be leaked to user processes. |
829 | */ |
830 | memblock_reserve(0, PAGE_SIZE); |
831 | |
832 | early_reserve_initrd(); |
833 | |
834 | /* |
835 | * At this point everything still needed from the boot loader |
836 | * or BIOS or kernel text should be early reserved or marked not |
837 | * RAM in e820. All other memory is free game. |
838 | */ |
839 | |
840 | #ifdef CONFIG_X86_32 |
841 | memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data)); |
842 | |
843 | /* |
844 | * copy kernel address range established so far and switch |
845 | * to the proper swapper page table |
846 | */ |
847 | clone_pgd_range(swapper_pg_dir + KERNEL_PGD_BOUNDARY, |
848 | initial_page_table + KERNEL_PGD_BOUNDARY, |
849 | KERNEL_PGD_PTRS); |
850 | |
851 | load_cr3(swapper_pg_dir); |
852 | /* |
853 | * Note: Quark X1000 CPUs advertise PGE incorrectly and require |
854 | * a cr3 based tlb flush, so the following __flush_tlb_all() |
855 | * will not flush anything because the cpu quirk which clears |
856 | * X86_FEATURE_PGE has not been invoked yet. Though due to the |
857 | * load_cr3() above the TLB has been flushed already. The |
858 | * quirk is invoked before subsequent calls to __flush_tlb_all() |
859 | * so proper operation is guaranteed. |
860 | */ |
861 | __flush_tlb_all(); |
862 | #else |
863 | printk(KERN_INFO "Command line: %s\n" , boot_command_line); |
864 | boot_cpu_data.x86_phys_bits = MAX_PHYSMEM_BITS; |
865 | #endif |
866 | |
867 | /* |
868 | * If we have OLPC OFW, we might end up relocating the fixmap due to |
869 | * reserve_top(), so do this before touching the ioremap area. |
870 | */ |
871 | olpc_ofw_detect(); |
872 | |
873 | idt_setup_early_traps(); |
874 | early_cpu_init(); |
875 | arch_init_ideal_nops(); |
876 | jump_label_init(); |
877 | early_ioremap_init(); |
878 | |
879 | setup_olpc_ofw_pgd(); |
880 | |
881 | ROOT_DEV = old_decode_dev(boot_params.hdr.root_dev); |
882 | screen_info = boot_params.screen_info; |
883 | edid_info = boot_params.edid_info; |
884 | #ifdef CONFIG_X86_32 |
885 | apm_info.bios = boot_params.apm_bios_info; |
886 | ist_info = boot_params.ist_info; |
887 | #endif |
888 | saved_video_mode = boot_params.hdr.vid_mode; |
889 | bootloader_type = boot_params.hdr.type_of_loader; |
890 | if ((bootloader_type >> 4) == 0xe) { |
891 | bootloader_type &= 0xf; |
892 | bootloader_type |= (boot_params.hdr.ext_loader_type+0x10) << 4; |
893 | } |
894 | bootloader_version = bootloader_type & 0xf; |
895 | bootloader_version |= boot_params.hdr.ext_loader_ver << 4; |
896 | |
897 | #ifdef CONFIG_BLK_DEV_RAM |
898 | rd_image_start = boot_params.hdr.ram_size & RAMDISK_IMAGE_START_MASK; |
899 | rd_prompt = ((boot_params.hdr.ram_size & RAMDISK_PROMPT_FLAG) != 0); |
900 | rd_doload = ((boot_params.hdr.ram_size & RAMDISK_LOAD_FLAG) != 0); |
901 | #endif |
902 | #ifdef CONFIG_EFI |
903 | if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature, |
904 | EFI32_LOADER_SIGNATURE, 4)) { |
905 | set_bit(EFI_BOOT, &efi.flags); |
906 | } else if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature, |
907 | EFI64_LOADER_SIGNATURE, 4)) { |
908 | set_bit(EFI_BOOT, &efi.flags); |
909 | set_bit(EFI_64BIT, &efi.flags); |
910 | } |
911 | #endif |
912 | |
913 | x86_init.oem.arch_setup(); |
914 | |
915 | iomem_resource.end = (1ULL << boot_cpu_data.x86_phys_bits) - 1; |
916 | e820__memory_setup(); |
917 | parse_setup_data(); |
918 | |
919 | copy_edd(); |
920 | |
921 | if (!boot_params.hdr.root_flags) |
922 | root_mountflags &= ~MS_RDONLY; |
923 | init_mm.start_code = (unsigned long) _text; |
924 | init_mm.end_code = (unsigned long) _etext; |
925 | init_mm.end_data = (unsigned long) _edata; |
926 | init_mm.brk = _brk_end; |
927 | |
928 | mpx_mm_init(&init_mm); |
929 | |
930 | code_resource.start = __pa_symbol(_text); |
931 | code_resource.end = __pa_symbol(_etext)-1; |
932 | data_resource.start = __pa_symbol(_etext); |
933 | data_resource.end = __pa_symbol(_edata)-1; |
934 | bss_resource.start = __pa_symbol(__bss_start); |
935 | bss_resource.end = __pa_symbol(__bss_stop)-1; |
936 | |
937 | #ifdef CONFIG_CMDLINE_BOOL |
938 | #ifdef CONFIG_CMDLINE_OVERRIDE |
939 | strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE); |
940 | #else |
941 | if (builtin_cmdline[0]) { |
942 | /* append boot loader cmdline to builtin */ |
943 | strlcat(builtin_cmdline, " " , COMMAND_LINE_SIZE); |
944 | strlcat(builtin_cmdline, boot_command_line, COMMAND_LINE_SIZE); |
945 | strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE); |
946 | } |
947 | #endif |
948 | #endif |
949 | |
950 | strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE); |
951 | *cmdline_p = command_line; |
952 | |
953 | /* |
954 | * x86_configure_nx() is called before parse_early_param() to detect |
955 | * whether hardware doesn't support NX (so that the early EHCI debug |
956 | * console setup can safely call set_fixmap()). It may then be called |
957 | * again from within noexec_setup() during parsing early parameters |
958 | * to honor the respective command line option. |
959 | */ |
960 | x86_configure_nx(); |
961 | |
962 | parse_early_param(); |
963 | |
964 | if (efi_enabled(EFI_BOOT)) |
965 | efi_memblock_x86_reserve_range(); |
966 | #ifdef CONFIG_MEMORY_HOTPLUG |
967 | /* |
968 | * Memory used by the kernel cannot be hot-removed because Linux |
969 | * cannot migrate the kernel pages. When memory hotplug is |
970 | * enabled, we should prevent memblock from allocating memory |
971 | * for the kernel. |
972 | * |
973 | * ACPI SRAT records all hotpluggable memory ranges. But before |
974 | * SRAT is parsed, we don't know about it. |
975 | * |
976 | * The kernel image is loaded into memory at very early time. We |
977 | * cannot prevent this anyway. So on NUMA system, we set any |
978 | * node the kernel resides in as un-hotpluggable. |
979 | * |
980 | * Since on modern servers, one node could have double-digit |
981 | * gigabytes memory, we can assume the memory around the kernel |
982 | * image is also un-hotpluggable. So before SRAT is parsed, just |
983 | * allocate memory near the kernel image to try the best to keep |
984 | * the kernel away from hotpluggable memory. |
985 | */ |
986 | if (movable_node_is_enabled()) |
987 | memblock_set_bottom_up(true); |
988 | #endif |
989 | |
990 | x86_report_nx(); |
991 | |
992 | /* after early param, so could get panic from serial */ |
993 | memblock_x86_reserve_range_setup_data(); |
994 | |
995 | if (acpi_mps_check()) { |
996 | #ifdef CONFIG_X86_LOCAL_APIC |
997 | disable_apic = 1; |
998 | #endif |
999 | setup_clear_cpu_cap(X86_FEATURE_APIC); |
1000 | } |
1001 | |
1002 | e820__reserve_setup_data(); |
1003 | e820__finish_early_params(); |
1004 | |
1005 | if (efi_enabled(EFI_BOOT)) |
1006 | efi_init(); |
1007 | |
1008 | dmi_scan_machine(); |
1009 | dmi_memdev_walk(); |
1010 | dmi_set_dump_stack_arch_desc(); |
1011 | |
1012 | /* |
1013 | * VMware detection requires dmi to be available, so this |
1014 | * needs to be done after dmi_scan_machine(), for the boot CPU. |
1015 | */ |
1016 | init_hypervisor_platform(); |
1017 | |
1018 | tsc_early_init(); |
1019 | x86_init.resources.probe_roms(); |
1020 | |
1021 | /* after parse_early_param, so could debug it */ |
1022 | insert_resource(&iomem_resource, &code_resource); |
1023 | insert_resource(&iomem_resource, &data_resource); |
1024 | insert_resource(&iomem_resource, &bss_resource); |
1025 | |
1026 | e820_add_kernel_range(); |
1027 | trim_bios_range(); |
1028 | #ifdef CONFIG_X86_32 |
1029 | if (ppro_with_ram_bug()) { |
1030 | e820__range_update(0x70000000ULL, 0x40000ULL, E820_TYPE_RAM, |
1031 | E820_TYPE_RESERVED); |
1032 | e820__update_table(e820_table); |
1033 | printk(KERN_INFO "fixed physical RAM map:\n" ); |
1034 | e820__print_table("bad_ppro" ); |
1035 | } |
1036 | #else |
1037 | early_gart_iommu_check(); |
1038 | #endif |
1039 | |
1040 | /* |
1041 | * partially used pages are not usable - thus |
1042 | * we are rounding upwards: |
1043 | */ |
1044 | max_pfn = e820__end_of_ram_pfn(); |
1045 | |
1046 | /* update e820 for memory not covered by WB MTRRs */ |
1047 | mtrr_bp_init(); |
1048 | if (mtrr_trim_uncached_memory(max_pfn)) |
1049 | max_pfn = e820__end_of_ram_pfn(); |
1050 | |
1051 | max_possible_pfn = max_pfn; |
1052 | |
1053 | /* |
1054 | * This call is required when the CPU does not support PAT. If |
1055 | * mtrr_bp_init() invoked it already via pat_init() the call has no |
1056 | * effect. |
1057 | */ |
1058 | init_cache_modes(); |
1059 | |
1060 | /* |
1061 | * Define random base addresses for memory sections after max_pfn is |
1062 | * defined and before each memory section base is used. |
1063 | */ |
1064 | kernel_randomize_memory(); |
1065 | |
1066 | #ifdef CONFIG_X86_32 |
1067 | /* max_low_pfn get updated here */ |
1068 | find_low_pfn_range(); |
1069 | #else |
1070 | check_x2apic(); |
1071 | |
1072 | /* How many end-of-memory variables you have, grandma! */ |
1073 | /* need this before calling reserve_initrd */ |
1074 | if (max_pfn > (1UL<<(32 - PAGE_SHIFT))) |
1075 | max_low_pfn = e820__end_of_low_ram_pfn(); |
1076 | else |
1077 | max_low_pfn = max_pfn; |
1078 | |
1079 | high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1; |
1080 | #endif |
1081 | |
1082 | /* |
1083 | * Find and reserve possible boot-time SMP configuration: |
1084 | */ |
1085 | find_smp_config(); |
1086 | |
1087 | reserve_ibft_region(); |
1088 | |
1089 | early_alloc_pgt_buf(); |
1090 | |
1091 | /* |
1092 | * Need to conclude brk, before e820__memblock_setup() |
1093 | * it could use memblock_find_in_range, could overlap with |
1094 | * brk area. |
1095 | */ |
1096 | reserve_brk(); |
1097 | |
1098 | cleanup_highmap(); |
1099 | |
1100 | memblock_set_current_limit(ISA_END_ADDRESS); |
1101 | e820__memblock_setup(); |
1102 | |
1103 | reserve_bios_regions(); |
1104 | |
1105 | if (efi_enabled(EFI_MEMMAP)) { |
1106 | efi_fake_memmap(); |
1107 | efi_find_mirror(); |
1108 | efi_esrt_init(); |
1109 | |
1110 | /* |
1111 | * The EFI specification says that boot service code won't be |
1112 | * called after ExitBootServices(). This is, in fact, a lie. |
1113 | */ |
1114 | efi_reserve_boot_services(); |
1115 | } |
1116 | |
1117 | /* preallocate 4k for mptable mpc */ |
1118 | e820__memblock_alloc_reserved_mpc_new(); |
1119 | |
1120 | #ifdef CONFIG_X86_CHECK_BIOS_CORRUPTION |
1121 | setup_bios_corruption_check(); |
1122 | #endif |
1123 | |
1124 | #ifdef CONFIG_X86_32 |
1125 | printk(KERN_DEBUG "initial memory mapped: [mem 0x00000000-%#010lx]\n" , |
1126 | (max_pfn_mapped<<PAGE_SHIFT) - 1); |
1127 | #endif |
1128 | |
1129 | reserve_real_mode(); |
1130 | |
1131 | trim_platform_memory_ranges(); |
1132 | trim_low_memory_range(); |
1133 | |
1134 | init_mem_mapping(); |
1135 | |
1136 | idt_setup_early_pf(); |
1137 | |
1138 | /* |
1139 | * Update mmu_cr4_features (and, indirectly, trampoline_cr4_features) |
1140 | * with the current CR4 value. This may not be necessary, but |
1141 | * auditing all the early-boot CR4 manipulation would be needed to |
1142 | * rule it out. |
1143 | * |
1144 | * Mask off features that don't work outside long mode (just |
1145 | * PCIDE for now). |
1146 | */ |
1147 | mmu_cr4_features = __read_cr4() & ~X86_CR4_PCIDE; |
1148 | |
1149 | memblock_set_current_limit(get_max_mapped()); |
1150 | |
1151 | /* |
1152 | * NOTE: On x86-32, only from this point on, fixmaps are ready for use. |
1153 | */ |
1154 | |
1155 | #ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT |
1156 | if (init_ohci1394_dma_early) |
1157 | init_ohci1394_dma_on_all_controllers(); |
1158 | #endif |
1159 | /* Allocate bigger log buffer */ |
1160 | setup_log_buf(1); |
1161 | |
1162 | if (efi_enabled(EFI_BOOT)) { |
1163 | switch (boot_params.secure_boot) { |
1164 | case efi_secureboot_mode_disabled: |
1165 | pr_info("Secure boot disabled\n" ); |
1166 | break; |
1167 | case efi_secureboot_mode_enabled: |
1168 | pr_info("Secure boot enabled\n" ); |
1169 | break; |
1170 | default: |
1171 | pr_info("Secure boot could not be determined\n" ); |
1172 | break; |
1173 | } |
1174 | } |
1175 | |
1176 | reserve_initrd(); |
1177 | |
1178 | acpi_table_upgrade(); |
1179 | |
1180 | vsmp_init(); |
1181 | |
1182 | io_delay_init(); |
1183 | |
1184 | early_platform_quirks(); |
1185 | |
1186 | /* |
1187 | * Parse the ACPI tables for possible boot-time SMP configuration. |
1188 | */ |
1189 | acpi_boot_table_init(); |
1190 | |
1191 | early_acpi_boot_init(); |
1192 | |
1193 | initmem_init(); |
1194 | dma_contiguous_reserve(max_pfn_mapped << PAGE_SHIFT); |
1195 | |
1196 | /* |
1197 | * Reserve memory for crash kernel after SRAT is parsed so that it |
1198 | * won't consume hotpluggable memory. |
1199 | */ |
1200 | reserve_crashkernel(); |
1201 | |
1202 | memblock_find_dma_reserve(); |
1203 | |
1204 | if (!early_xdbc_setup_hardware()) |
1205 | early_xdbc_register_console(); |
1206 | |
1207 | x86_init.paging.pagetable_init(); |
1208 | |
1209 | kasan_init(); |
1210 | |
1211 | /* |
1212 | * Sync back kernel address range. |
1213 | * |
1214 | * FIXME: Can the later sync in setup_cpu_entry_areas() replace |
1215 | * this call? |
1216 | */ |
1217 | sync_initial_page_table(); |
1218 | |
1219 | tboot_probe(); |
1220 | |
1221 | map_vsyscall(); |
1222 | |
1223 | generic_apic_probe(); |
1224 | |
1225 | early_quirks(); |
1226 | |
1227 | /* |
1228 | * Read APIC and some other early information from ACPI tables. |
1229 | */ |
1230 | acpi_boot_init(); |
1231 | sfi_init(); |
1232 | x86_dtb_init(); |
1233 | |
1234 | /* |
1235 | * get boot-time SMP configuration: |
1236 | */ |
1237 | get_smp_config(); |
1238 | |
1239 | /* |
1240 | * Systems w/o ACPI and mptables might not have it mapped the local |
1241 | * APIC yet, but prefill_possible_map() might need to access it. |
1242 | */ |
1243 | init_apic_mappings(); |
1244 | |
1245 | prefill_possible_map(); |
1246 | |
1247 | init_cpu_to_node(); |
1248 | |
1249 | io_apic_init_mappings(); |
1250 | |
1251 | x86_init.hyper.guest_late_init(); |
1252 | |
1253 | e820__reserve_resources(); |
1254 | e820__register_nosave_regions(max_pfn); |
1255 | |
1256 | x86_init.resources.reserve_resources(); |
1257 | |
1258 | e820__setup_pci_gap(); |
1259 | |
1260 | #ifdef CONFIG_VT |
1261 | #if defined(CONFIG_VGA_CONSOLE) |
1262 | if (!efi_enabled(EFI_BOOT) || (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY)) |
1263 | conswitchp = &vga_con; |
1264 | #elif defined(CONFIG_DUMMY_CONSOLE) |
1265 | conswitchp = &dummy_con; |
1266 | #endif |
1267 | #endif |
1268 | x86_init.oem.banner(); |
1269 | |
1270 | x86_init.timers.wallclock_init(); |
1271 | |
1272 | mcheck_init(); |
1273 | |
1274 | register_refined_jiffies(CLOCK_TICK_RATE); |
1275 | |
1276 | #ifdef CONFIG_EFI |
1277 | if (efi_enabled(EFI_BOOT)) |
1278 | efi_apply_memmap_quirks(); |
1279 | #endif |
1280 | |
1281 | unwind_init(); |
1282 | } |
1283 | |
1284 | #ifdef CONFIG_X86_32 |
1285 | |
1286 | static struct resource video_ram_resource = { |
1287 | .name = "Video RAM area" , |
1288 | .start = 0xa0000, |
1289 | .end = 0xbffff, |
1290 | .flags = IORESOURCE_BUSY | IORESOURCE_MEM |
1291 | }; |
1292 | |
1293 | void __init i386_reserve_resources(void) |
1294 | { |
1295 | request_resource(&iomem_resource, &video_ram_resource); |
1296 | reserve_standard_io_resources(); |
1297 | } |
1298 | |
1299 | #endif /* CONFIG_X86_32 */ |
1300 | |
1301 | static struct notifier_block kernel_offset_notifier = { |
1302 | .notifier_call = dump_kernel_offset |
1303 | }; |
1304 | |
1305 | static int __init register_kernel_offset_dumper(void) |
1306 | { |
1307 | atomic_notifier_chain_register(&panic_notifier_list, |
1308 | &kernel_offset_notifier); |
1309 | return 0; |
1310 | } |
1311 | __initcall(register_kernel_offset_dumper); |
1312 | |