1/*
2 * Architecture specific (i386/x86_64) functions for kexec based crash dumps.
3 *
4 * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
5 *
6 * Copyright (C) IBM Corporation, 2004. All rights reserved.
7 * Copyright (C) Red Hat Inc., 2014. All rights reserved.
8 * Authors:
9 * Vivek Goyal <vgoyal@redhat.com>
10 *
11 */
12
13#define pr_fmt(fmt) "kexec: " fmt
14
15#include <linux/types.h>
16#include <linux/kernel.h>
17#include <linux/smp.h>
18#include <linux/reboot.h>
19#include <linux/kexec.h>
20#include <linux/delay.h>
21#include <linux/elf.h>
22#include <linux/elfcore.h>
23#include <linux/export.h>
24#include <linux/slab.h>
25#include <linux/vmalloc.h>
26
27#include <asm/processor.h>
28#include <asm/hardirq.h>
29#include <asm/nmi.h>
30#include <asm/hw_irq.h>
31#include <asm/apic.h>
32#include <asm/e820/types.h>
33#include <asm/io_apic.h>
34#include <asm/hpet.h>
35#include <linux/kdebug.h>
36#include <asm/cpu.h>
37#include <asm/reboot.h>
38#include <asm/virtext.h>
39#include <asm/intel_pt.h>
40#include <asm/crash.h>
41
42/* Used while preparing memory map entries for second kernel */
43struct crash_memmap_data {
44 struct boot_params *params;
45 /* Type of memory */
46 unsigned int type;
47};
48
49/*
50 * This is used to VMCLEAR all VMCSs loaded on the
51 * processor. And when loading kvm_intel module, the
52 * callback function pointer will be assigned.
53 *
54 * protected by rcu.
55 */
56crash_vmclear_fn __rcu *crash_vmclear_loaded_vmcss = NULL;
57EXPORT_SYMBOL_GPL(crash_vmclear_loaded_vmcss);
58unsigned long crash_zero_bytes;
59
60static inline void cpu_crash_vmclear_loaded_vmcss(void)
61{
62 crash_vmclear_fn *do_vmclear_operation = NULL;
63
64 rcu_read_lock();
65 do_vmclear_operation = rcu_dereference(crash_vmclear_loaded_vmcss);
66 if (do_vmclear_operation)
67 do_vmclear_operation();
68 rcu_read_unlock();
69}
70
71#if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
72
73static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
74{
75#ifdef CONFIG_X86_32
76 struct pt_regs fixed_regs;
77
78 if (!user_mode(regs)) {
79 crash_fixup_ss_esp(&fixed_regs, regs);
80 regs = &fixed_regs;
81 }
82#endif
83 crash_save_cpu(regs, cpu);
84
85 /*
86 * VMCLEAR VMCSs loaded on all cpus if needed.
87 */
88 cpu_crash_vmclear_loaded_vmcss();
89
90 /* Disable VMX or SVM if needed.
91 *
92 * We need to disable virtualization on all CPUs.
93 * Having VMX or SVM enabled on any CPU may break rebooting
94 * after the kdump kernel has finished its task.
95 */
96 cpu_emergency_vmxoff();
97 cpu_emergency_svm_disable();
98
99 /*
100 * Disable Intel PT to stop its logging
101 */
102 cpu_emergency_stop_pt();
103
104 disable_local_APIC();
105}
106
107void kdump_nmi_shootdown_cpus(void)
108{
109 nmi_shootdown_cpus(kdump_nmi_callback);
110
111 disable_local_APIC();
112}
113
114/* Override the weak function in kernel/panic.c */
115void crash_smp_send_stop(void)
116{
117 static int cpus_stopped;
118
119 if (cpus_stopped)
120 return;
121
122 if (smp_ops.crash_stop_other_cpus)
123 smp_ops.crash_stop_other_cpus();
124 else
125 smp_send_stop();
126
127 cpus_stopped = 1;
128}
129
130#else
131void crash_smp_send_stop(void)
132{
133 /* There are no cpus to shootdown */
134}
135#endif
136
137void native_machine_crash_shutdown(struct pt_regs *regs)
138{
139 /* This function is only called after the system
140 * has panicked or is otherwise in a critical state.
141 * The minimum amount of code to allow a kexec'd kernel
142 * to run successfully needs to happen here.
143 *
144 * In practice this means shooting down the other cpus in
145 * an SMP system.
146 */
147 /* The kernel is broken so disable interrupts */
148 local_irq_disable();
149
150 crash_smp_send_stop();
151
152 /*
153 * VMCLEAR VMCSs loaded on this cpu if needed.
154 */
155 cpu_crash_vmclear_loaded_vmcss();
156
157 /* Booting kdump kernel with VMX or SVM enabled won't work,
158 * because (among other limitations) we can't disable paging
159 * with the virt flags.
160 */
161 cpu_emergency_vmxoff();
162 cpu_emergency_svm_disable();
163
164 /*
165 * Disable Intel PT to stop its logging
166 */
167 cpu_emergency_stop_pt();
168
169#ifdef CONFIG_X86_IO_APIC
170 /* Prevent crash_kexec() from deadlocking on ioapic_lock. */
171 ioapic_zap_locks();
172 clear_IO_APIC();
173#endif
174 lapic_shutdown();
175 restore_boot_irq_mode();
176#ifdef CONFIG_HPET_TIMER
177 hpet_disable();
178#endif
179 crash_save_cpu(regs, safe_smp_processor_id());
180}
181
182#ifdef CONFIG_KEXEC_FILE
183static int get_nr_ram_ranges_callback(struct resource *res, void *arg)
184{
185 unsigned int *nr_ranges = arg;
186
187 (*nr_ranges)++;
188 return 0;
189}
190
191/* Gather all the required information to prepare elf headers for ram regions */
192static struct crash_mem *fill_up_crash_elf_data(void)
193{
194 unsigned int nr_ranges = 0;
195 struct crash_mem *cmem;
196
197 walk_system_ram_res(0, -1, &nr_ranges,
198 get_nr_ram_ranges_callback);
199 if (!nr_ranges)
200 return NULL;
201
202 /*
203 * Exclusion of crash region and/or crashk_low_res may cause
204 * another range split. So add extra two slots here.
205 */
206 nr_ranges += 2;
207 cmem = vzalloc(sizeof(struct crash_mem) +
208 sizeof(struct crash_mem_range) * nr_ranges);
209 if (!cmem)
210 return NULL;
211
212 cmem->max_nr_ranges = nr_ranges;
213 cmem->nr_ranges = 0;
214
215 return cmem;
216}
217
218/*
219 * Look for any unwanted ranges between mstart, mend and remove them. This
220 * might lead to split and split ranges are put in cmem->ranges[] array
221 */
222static int elf_header_exclude_ranges(struct crash_mem *cmem)
223{
224 int ret = 0;
225
226 /* Exclude crashkernel region */
227 ret = crash_exclude_mem_range(cmem, crashk_res.start, crashk_res.end);
228 if (ret)
229 return ret;
230
231 if (crashk_low_res.end) {
232 ret = crash_exclude_mem_range(cmem, crashk_low_res.start,
233 crashk_low_res.end);
234 if (ret)
235 return ret;
236 }
237
238 return ret;
239}
240
241static int prepare_elf64_ram_headers_callback(struct resource *res, void *arg)
242{
243 struct crash_mem *cmem = arg;
244
245 cmem->ranges[cmem->nr_ranges].start = res->start;
246 cmem->ranges[cmem->nr_ranges].end = res->end;
247 cmem->nr_ranges++;
248
249 return 0;
250}
251
252/* Prepare elf headers. Return addr and size */
253static int prepare_elf_headers(struct kimage *image, void **addr,
254 unsigned long *sz)
255{
256 struct crash_mem *cmem;
257 Elf64_Ehdr *ehdr;
258 Elf64_Phdr *phdr;
259 int ret, i;
260
261 cmem = fill_up_crash_elf_data();
262 if (!cmem)
263 return -ENOMEM;
264
265 ret = walk_system_ram_res(0, -1, cmem,
266 prepare_elf64_ram_headers_callback);
267 if (ret)
268 goto out;
269
270 /* Exclude unwanted mem ranges */
271 ret = elf_header_exclude_ranges(cmem);
272 if (ret)
273 goto out;
274
275 /* By default prepare 64bit headers */
276 ret = crash_prepare_elf64_headers(cmem,
277 IS_ENABLED(CONFIG_X86_64), addr, sz);
278 if (ret)
279 goto out;
280
281 /*
282 * If a range matches backup region, adjust offset to backup
283 * segment.
284 */
285 ehdr = (Elf64_Ehdr *)*addr;
286 phdr = (Elf64_Phdr *)(ehdr + 1);
287 for (i = 0; i < ehdr->e_phnum; phdr++, i++)
288 if (phdr->p_type == PT_LOAD &&
289 phdr->p_paddr == image->arch.backup_src_start &&
290 phdr->p_memsz == image->arch.backup_src_sz) {
291 phdr->p_offset = image->arch.backup_load_addr;
292 break;
293 }
294out:
295 vfree(cmem);
296 return ret;
297}
298
299static int add_e820_entry(struct boot_params *params, struct e820_entry *entry)
300{
301 unsigned int nr_e820_entries;
302
303 nr_e820_entries = params->e820_entries;
304 if (nr_e820_entries >= E820_MAX_ENTRIES_ZEROPAGE)
305 return 1;
306
307 memcpy(&params->e820_table[nr_e820_entries], entry,
308 sizeof(struct e820_entry));
309 params->e820_entries++;
310 return 0;
311}
312
313static int memmap_entry_callback(struct resource *res, void *arg)
314{
315 struct crash_memmap_data *cmd = arg;
316 struct boot_params *params = cmd->params;
317 struct e820_entry ei;
318
319 ei.addr = res->start;
320 ei.size = resource_size(res);
321 ei.type = cmd->type;
322 add_e820_entry(params, &ei);
323
324 return 0;
325}
326
327static int memmap_exclude_ranges(struct kimage *image, struct crash_mem *cmem,
328 unsigned long long mstart,
329 unsigned long long mend)
330{
331 unsigned long start, end;
332 int ret = 0;
333
334 cmem->ranges[0].start = mstart;
335 cmem->ranges[0].end = mend;
336 cmem->nr_ranges = 1;
337
338 /* Exclude Backup region */
339 start = image->arch.backup_load_addr;
340 end = start + image->arch.backup_src_sz - 1;
341 ret = crash_exclude_mem_range(cmem, start, end);
342 if (ret)
343 return ret;
344
345 /* Exclude elf header region */
346 start = image->arch.elf_load_addr;
347 end = start + image->arch.elf_headers_sz - 1;
348 return crash_exclude_mem_range(cmem, start, end);
349}
350
351/* Prepare memory map for crash dump kernel */
352int crash_setup_memmap_entries(struct kimage *image, struct boot_params *params)
353{
354 int i, ret = 0;
355 unsigned long flags;
356 struct e820_entry ei;
357 struct crash_memmap_data cmd;
358 struct crash_mem *cmem;
359
360 cmem = vzalloc(sizeof(struct crash_mem));
361 if (!cmem)
362 return -ENOMEM;
363
364 memset(&cmd, 0, sizeof(struct crash_memmap_data));
365 cmd.params = params;
366
367 /* Add first 640K segment */
368 ei.addr = image->arch.backup_src_start;
369 ei.size = image->arch.backup_src_sz;
370 ei.type = E820_TYPE_RAM;
371 add_e820_entry(params, &ei);
372
373 /* Add ACPI tables */
374 cmd.type = E820_TYPE_ACPI;
375 flags = IORESOURCE_MEM | IORESOURCE_BUSY;
376 walk_iomem_res_desc(IORES_DESC_ACPI_TABLES, flags, 0, -1, &cmd,
377 memmap_entry_callback);
378
379 /* Add ACPI Non-volatile Storage */
380 cmd.type = E820_TYPE_NVS;
381 walk_iomem_res_desc(IORES_DESC_ACPI_NV_STORAGE, flags, 0, -1, &cmd,
382 memmap_entry_callback);
383
384 /* Add crashk_low_res region */
385 if (crashk_low_res.end) {
386 ei.addr = crashk_low_res.start;
387 ei.size = crashk_low_res.end - crashk_low_res.start + 1;
388 ei.type = E820_TYPE_RAM;
389 add_e820_entry(params, &ei);
390 }
391
392 /* Exclude some ranges from crashk_res and add rest to memmap */
393 ret = memmap_exclude_ranges(image, cmem, crashk_res.start,
394 crashk_res.end);
395 if (ret)
396 goto out;
397
398 for (i = 0; i < cmem->nr_ranges; i++) {
399 ei.size = cmem->ranges[i].end - cmem->ranges[i].start + 1;
400
401 /* If entry is less than a page, skip it */
402 if (ei.size < PAGE_SIZE)
403 continue;
404 ei.addr = cmem->ranges[i].start;
405 ei.type = E820_TYPE_RAM;
406 add_e820_entry(params, &ei);
407 }
408
409out:
410 vfree(cmem);
411 return ret;
412}
413
414static int determine_backup_region(struct resource *res, void *arg)
415{
416 struct kimage *image = arg;
417
418 image->arch.backup_src_start = res->start;
419 image->arch.backup_src_sz = resource_size(res);
420
421 /* Expecting only one range for backup region */
422 return 1;
423}
424
425int crash_load_segments(struct kimage *image)
426{
427 int ret;
428 struct kexec_buf kbuf = { .image = image, .buf_min = 0,
429 .buf_max = ULONG_MAX, .top_down = false };
430
431 /*
432 * Determine and load a segment for backup area. First 640K RAM
433 * region is backup source
434 */
435
436 ret = walk_system_ram_res(KEXEC_BACKUP_SRC_START, KEXEC_BACKUP_SRC_END,
437 image, determine_backup_region);
438
439 /* Zero or postive return values are ok */
440 if (ret < 0)
441 return ret;
442
443 /* Add backup segment. */
444 if (image->arch.backup_src_sz) {
445 kbuf.buffer = &crash_zero_bytes;
446 kbuf.bufsz = sizeof(crash_zero_bytes);
447 kbuf.memsz = image->arch.backup_src_sz;
448 kbuf.buf_align = PAGE_SIZE;
449 /*
450 * Ideally there is no source for backup segment. This is
451 * copied in purgatory after crash. Just add a zero filled
452 * segment for now to make sure checksum logic works fine.
453 */
454 ret = kexec_add_buffer(&kbuf);
455 if (ret)
456 return ret;
457 image->arch.backup_load_addr = kbuf.mem;
458 pr_debug("Loaded backup region at 0x%lx backup_start=0x%lx memsz=0x%lx\n",
459 image->arch.backup_load_addr,
460 image->arch.backup_src_start, kbuf.memsz);
461 }
462
463 /* Prepare elf headers and add a segment */
464 ret = prepare_elf_headers(image, &kbuf.buffer, &kbuf.bufsz);
465 if (ret)
466 return ret;
467
468 image->arch.elf_headers = kbuf.buffer;
469 image->arch.elf_headers_sz = kbuf.bufsz;
470
471 kbuf.memsz = kbuf.bufsz;
472 kbuf.buf_align = ELF_CORE_HEADER_ALIGN;
473 kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
474 ret = kexec_add_buffer(&kbuf);
475 if (ret) {
476 vfree((void *)image->arch.elf_headers);
477 return ret;
478 }
479 image->arch.elf_load_addr = kbuf.mem;
480 pr_debug("Loaded ELF headers at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
481 image->arch.elf_load_addr, kbuf.bufsz, kbuf.bufsz);
482
483 return ret;
484}
485#endif /* CONFIG_KEXEC_FILE */
486