1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Core of Xen paravirt_ops implementation. |
4 | * |
5 | * This file contains the xen_paravirt_ops structure itself, and the |
6 | * implementations for: |
7 | * - privileged instructions |
8 | * - interrupt flags |
9 | * - segment operations |
10 | * - booting and setup |
11 | * |
12 | * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007 |
13 | */ |
14 | |
15 | #include <linux/cpu.h> |
16 | #include <linux/kernel.h> |
17 | #include <linux/init.h> |
18 | #include <linux/smp.h> |
19 | #include <linux/preempt.h> |
20 | #include <linux/hardirq.h> |
21 | #include <linux/percpu.h> |
22 | #include <linux/delay.h> |
23 | #include <linux/start_kernel.h> |
24 | #include <linux/sched.h> |
25 | #include <linux/kprobes.h> |
26 | #include <linux/kstrtox.h> |
27 | #include <linux/memblock.h> |
28 | #include <linux/export.h> |
29 | #include <linux/mm.h> |
30 | #include <linux/page-flags.h> |
31 | #include <linux/pci.h> |
32 | #include <linux/gfp.h> |
33 | #include <linux/edd.h> |
34 | #include <linux/reboot.h> |
35 | #include <linux/virtio_anchor.h> |
36 | #include <linux/stackprotector.h> |
37 | |
38 | #include <xen/xen.h> |
39 | #include <xen/events.h> |
40 | #include <xen/interface/xen.h> |
41 | #include <xen/interface/version.h> |
42 | #include <xen/interface/physdev.h> |
43 | #include <xen/interface/vcpu.h> |
44 | #include <xen/interface/memory.h> |
45 | #include <xen/interface/nmi.h> |
46 | #include <xen/interface/xen-mca.h> |
47 | #include <xen/features.h> |
48 | #include <xen/page.h> |
49 | #include <xen/hvc-console.h> |
50 | #include <xen/acpi.h> |
51 | |
52 | #include <asm/paravirt.h> |
53 | #include <asm/apic.h> |
54 | #include <asm/page.h> |
55 | #include <asm/xen/pci.h> |
56 | #include <asm/xen/hypercall.h> |
57 | #include <asm/xen/hypervisor.h> |
58 | #include <asm/xen/cpuid.h> |
59 | #include <asm/fixmap.h> |
60 | #include <asm/processor.h> |
61 | #include <asm/proto.h> |
62 | #include <asm/msr-index.h> |
63 | #include <asm/traps.h> |
64 | #include <asm/setup.h> |
65 | #include <asm/desc.h> |
66 | #include <asm/pgalloc.h> |
67 | #include <asm/tlbflush.h> |
68 | #include <asm/reboot.h> |
69 | #include <asm/hypervisor.h> |
70 | #include <asm/mach_traps.h> |
71 | #include <asm/mtrr.h> |
72 | #include <asm/mwait.h> |
73 | #include <asm/pci_x86.h> |
74 | #include <asm/cpu.h> |
75 | #ifdef CONFIG_X86_IOPL_IOPERM |
76 | #include <asm/io_bitmap.h> |
77 | #endif |
78 | |
79 | #ifdef CONFIG_ACPI |
80 | #include <linux/acpi.h> |
81 | #include <asm/acpi.h> |
82 | #include <acpi/proc_cap_intel.h> |
83 | #include <acpi/processor.h> |
84 | #include <xen/interface/platform.h> |
85 | #endif |
86 | |
87 | #include "xen-ops.h" |
88 | #include "mmu.h" |
89 | #include "smp.h" |
90 | #include "multicalls.h" |
91 | #include "pmu.h" |
92 | |
93 | #include "../kernel/cpu/cpu.h" /* get_cpu_cap() */ |
94 | |
95 | void *xen_initial_gdt; |
96 | |
97 | static int xen_cpu_up_prepare_pv(unsigned int cpu); |
98 | static int xen_cpu_dead_pv(unsigned int cpu); |
99 | |
100 | struct tls_descs { |
101 | struct desc_struct desc[3]; |
102 | }; |
103 | |
104 | DEFINE_PER_CPU(enum xen_lazy_mode, xen_lazy_mode) = XEN_LAZY_NONE; |
105 | DEFINE_PER_CPU(unsigned int, xen_lazy_nesting); |
106 | |
107 | enum xen_lazy_mode xen_get_lazy_mode(void) |
108 | { |
109 | if (in_interrupt()) |
110 | return XEN_LAZY_NONE; |
111 | |
112 | return this_cpu_read(xen_lazy_mode); |
113 | } |
114 | |
115 | /* |
116 | * Updating the 3 TLS descriptors in the GDT on every task switch is |
117 | * surprisingly expensive so we avoid updating them if they haven't |
118 | * changed. Since Xen writes different descriptors than the one |
119 | * passed in the update_descriptor hypercall we keep shadow copies to |
120 | * compare against. |
121 | */ |
122 | static DEFINE_PER_CPU(struct tls_descs, shadow_tls_desc); |
123 | |
124 | static __read_mostly bool xen_msr_safe = IS_ENABLED(CONFIG_XEN_PV_MSR_SAFE); |
125 | |
126 | static int __init parse_xen_msr_safe(char *str) |
127 | { |
128 | if (str) |
129 | return kstrtobool(s: str, res: &xen_msr_safe); |
130 | return -EINVAL; |
131 | } |
132 | early_param("xen_msr_safe" , parse_xen_msr_safe); |
133 | |
134 | /* Get MTRR settings from Xen and put them into mtrr_state. */ |
135 | static void __init xen_set_mtrr_data(void) |
136 | { |
137 | #ifdef CONFIG_MTRR |
138 | struct xen_platform_op op = { |
139 | .cmd = XENPF_read_memtype, |
140 | .interface_version = XENPF_INTERFACE_VERSION, |
141 | }; |
142 | unsigned int reg; |
143 | unsigned long mask; |
144 | uint32_t eax, width; |
145 | static struct mtrr_var_range var[MTRR_MAX_VAR_RANGES] __initdata; |
146 | |
147 | /* Get physical address width (only 64-bit cpus supported). */ |
148 | width = 36; |
149 | eax = cpuid_eax(op: 0x80000000); |
150 | if ((eax >> 16) == 0x8000 && eax >= 0x80000008) { |
151 | eax = cpuid_eax(op: 0x80000008); |
152 | width = eax & 0xff; |
153 | } |
154 | |
155 | for (reg = 0; reg < MTRR_MAX_VAR_RANGES; reg++) { |
156 | op.u.read_memtype.reg = reg; |
157 | if (HYPERVISOR_platform_op(op: &op)) |
158 | break; |
159 | |
160 | /* |
161 | * Only called in dom0, which has all RAM PFNs mapped at |
162 | * RAM MFNs, and all PCI space etc. is identity mapped. |
163 | * This means we can treat MFN == PFN regarding MTRR settings. |
164 | */ |
165 | var[reg].base_lo = op.u.read_memtype.type; |
166 | var[reg].base_lo |= op.u.read_memtype.mfn << PAGE_SHIFT; |
167 | var[reg].base_hi = op.u.read_memtype.mfn >> (32 - PAGE_SHIFT); |
168 | mask = ~((op.u.read_memtype.nr_mfns << PAGE_SHIFT) - 1); |
169 | mask &= (1UL << width) - 1; |
170 | if (mask) |
171 | mask |= MTRR_PHYSMASK_V; |
172 | var[reg].mask_lo = mask; |
173 | var[reg].mask_hi = mask >> 32; |
174 | } |
175 | |
176 | /* Only overwrite MTRR state if any MTRR could be got from Xen. */ |
177 | if (reg) |
178 | mtrr_overwrite_state(var, num_var: reg, MTRR_TYPE_UNCACHABLE); |
179 | #endif |
180 | } |
181 | |
182 | static void __init xen_pv_init_platform(void) |
183 | { |
184 | /* PV guests can't operate virtio devices without grants. */ |
185 | if (IS_ENABLED(CONFIG_XEN_VIRTIO)) |
186 | virtio_set_mem_acc_cb(func: xen_virtio_restricted_mem_acc); |
187 | |
188 | populate_extra_pte(vaddr: fix_to_virt(idx: FIX_PARAVIRT_BOOTMAP)); |
189 | |
190 | set_fixmap(FIX_PARAVIRT_BOOTMAP, xen_start_info->shared_info); |
191 | HYPERVISOR_shared_info = (void *)fix_to_virt(idx: FIX_PARAVIRT_BOOTMAP); |
192 | |
193 | /* xen clock uses per-cpu vcpu_info, need to init it for boot cpu */ |
194 | xen_vcpu_info_reset(cpu: 0); |
195 | |
196 | /* pvclock is in shared info area */ |
197 | xen_init_time_ops(); |
198 | |
199 | if (xen_initial_domain()) |
200 | xen_set_mtrr_data(); |
201 | else |
202 | mtrr_overwrite_state(NULL, num_var: 0, MTRR_TYPE_WRBACK); |
203 | } |
204 | |
205 | static void __init xen_pv_guest_late_init(void) |
206 | { |
207 | #ifndef CONFIG_SMP |
208 | /* Setup shared vcpu info for non-smp configurations */ |
209 | xen_setup_vcpu_info_placement(); |
210 | #endif |
211 | } |
212 | |
213 | static __read_mostly unsigned int cpuid_leaf5_ecx_val; |
214 | static __read_mostly unsigned int cpuid_leaf5_edx_val; |
215 | |
216 | static void xen_cpuid(unsigned int *ax, unsigned int *bx, |
217 | unsigned int *cx, unsigned int *dx) |
218 | { |
219 | unsigned maskebx = ~0; |
220 | |
221 | /* |
222 | * Mask out inconvenient features, to try and disable as many |
223 | * unsupported kernel subsystems as possible. |
224 | */ |
225 | switch (*ax) { |
226 | case CPUID_MWAIT_LEAF: |
227 | /* Synthesize the values.. */ |
228 | *ax = 0; |
229 | *bx = 0; |
230 | *cx = cpuid_leaf5_ecx_val; |
231 | *dx = cpuid_leaf5_edx_val; |
232 | return; |
233 | |
234 | case 0xb: |
235 | /* Suppress extended topology stuff */ |
236 | maskebx = 0; |
237 | break; |
238 | } |
239 | |
240 | asm(XEN_EMULATE_PREFIX "cpuid" |
241 | : "=a" (*ax), |
242 | "=b" (*bx), |
243 | "=c" (*cx), |
244 | "=d" (*dx) |
245 | : "0" (*ax), "2" (*cx)); |
246 | |
247 | *bx &= maskebx; |
248 | } |
249 | |
250 | static bool __init xen_check_mwait(void) |
251 | { |
252 | #ifdef CONFIG_ACPI |
253 | struct xen_platform_op op = { |
254 | .cmd = XENPF_set_processor_pminfo, |
255 | .u.set_pminfo.id = -1, |
256 | .u.set_pminfo.type = XEN_PM_PDC, |
257 | }; |
258 | uint32_t buf[3]; |
259 | unsigned int ax, bx, cx, dx; |
260 | unsigned int mwait_mask; |
261 | |
262 | /* We need to determine whether it is OK to expose the MWAIT |
263 | * capability to the kernel to harvest deeper than C3 states from ACPI |
264 | * _CST using the processor_harvest_xen.c module. For this to work, we |
265 | * need to gather the MWAIT_LEAF values (which the cstate.c code |
266 | * checks against). The hypervisor won't expose the MWAIT flag because |
267 | * it would break backwards compatibility; so we will find out directly |
268 | * from the hardware and hypercall. |
269 | */ |
270 | if (!xen_initial_domain()) |
271 | return false; |
272 | |
273 | /* |
274 | * When running under platform earlier than Xen4.2, do not expose |
275 | * mwait, to avoid the risk of loading native acpi pad driver |
276 | */ |
277 | if (!xen_running_on_version_or_later(major: 4, minor: 2)) |
278 | return false; |
279 | |
280 | ax = 1; |
281 | cx = 0; |
282 | |
283 | native_cpuid(eax: &ax, ebx: &bx, ecx: &cx, edx: &dx); |
284 | |
285 | mwait_mask = (1 << (X86_FEATURE_EST % 32)) | |
286 | (1 << (X86_FEATURE_MWAIT % 32)); |
287 | |
288 | if ((cx & mwait_mask) != mwait_mask) |
289 | return false; |
290 | |
291 | /* We need to emulate the MWAIT_LEAF and for that we need both |
292 | * ecx and edx. The hypercall provides only partial information. |
293 | */ |
294 | |
295 | ax = CPUID_MWAIT_LEAF; |
296 | bx = 0; |
297 | cx = 0; |
298 | dx = 0; |
299 | |
300 | native_cpuid(eax: &ax, ebx: &bx, ecx: &cx, edx: &dx); |
301 | |
302 | /* Ask the Hypervisor whether to clear ACPI_PROC_CAP_C_C2C3_FFH. If so, |
303 | * don't expose MWAIT_LEAF and let ACPI pick the IOPORT version of C3. |
304 | */ |
305 | buf[0] = ACPI_PDC_REVISION_ID; |
306 | buf[1] = 1; |
307 | buf[2] = (ACPI_PROC_CAP_C_CAPABILITY_SMP | ACPI_PROC_CAP_EST_CAPABILITY_SWSMP); |
308 | |
309 | set_xen_guest_handle(op.u.set_pminfo.pdc, buf); |
310 | |
311 | if ((HYPERVISOR_platform_op(op: &op) == 0) && |
312 | (buf[2] & (ACPI_PROC_CAP_C_C1_FFH | ACPI_PROC_CAP_C_C2C3_FFH))) { |
313 | cpuid_leaf5_ecx_val = cx; |
314 | cpuid_leaf5_edx_val = dx; |
315 | } |
316 | return true; |
317 | #else |
318 | return false; |
319 | #endif |
320 | } |
321 | |
322 | static bool __init xen_check_xsave(void) |
323 | { |
324 | unsigned int cx, xsave_mask; |
325 | |
326 | cx = cpuid_ecx(op: 1); |
327 | |
328 | xsave_mask = (1 << (X86_FEATURE_XSAVE % 32)) | |
329 | (1 << (X86_FEATURE_OSXSAVE % 32)); |
330 | |
331 | /* Xen will set CR4.OSXSAVE if supported and not disabled by force */ |
332 | return (cx & xsave_mask) == xsave_mask; |
333 | } |
334 | |
335 | static void __init xen_init_capabilities(void) |
336 | { |
337 | setup_force_cpu_cap(X86_FEATURE_XENPV); |
338 | setup_clear_cpu_cap(X86_FEATURE_DCA); |
339 | setup_clear_cpu_cap(X86_FEATURE_APERFMPERF); |
340 | setup_clear_cpu_cap(X86_FEATURE_MTRR); |
341 | setup_clear_cpu_cap(X86_FEATURE_ACC); |
342 | setup_clear_cpu_cap(X86_FEATURE_X2APIC); |
343 | setup_clear_cpu_cap(X86_FEATURE_SME); |
344 | setup_clear_cpu_cap(X86_FEATURE_LKGS); |
345 | |
346 | /* |
347 | * Xen PV would need some work to support PCID: CR3 handling as well |
348 | * as xen_flush_tlb_others() would need updating. |
349 | */ |
350 | setup_clear_cpu_cap(X86_FEATURE_PCID); |
351 | |
352 | if (!xen_initial_domain()) |
353 | setup_clear_cpu_cap(X86_FEATURE_ACPI); |
354 | |
355 | if (xen_check_mwait()) |
356 | setup_force_cpu_cap(X86_FEATURE_MWAIT); |
357 | else |
358 | setup_clear_cpu_cap(X86_FEATURE_MWAIT); |
359 | |
360 | if (!xen_check_xsave()) { |
361 | setup_clear_cpu_cap(X86_FEATURE_XSAVE); |
362 | setup_clear_cpu_cap(X86_FEATURE_OSXSAVE); |
363 | } |
364 | } |
365 | |
366 | static noinstr void xen_set_debugreg(int reg, unsigned long val) |
367 | { |
368 | HYPERVISOR_set_debugreg(reg, value: val); |
369 | } |
370 | |
371 | static noinstr unsigned long xen_get_debugreg(int reg) |
372 | { |
373 | return HYPERVISOR_get_debugreg(reg); |
374 | } |
375 | |
376 | static void xen_start_context_switch(struct task_struct *prev) |
377 | { |
378 | BUG_ON(preemptible()); |
379 | |
380 | if (this_cpu_read(xen_lazy_mode) == XEN_LAZY_MMU) { |
381 | arch_leave_lazy_mmu_mode(); |
382 | set_ti_thread_flag(task_thread_info(prev), TIF_LAZY_MMU_UPDATES); |
383 | } |
384 | enter_lazy(mode: XEN_LAZY_CPU); |
385 | } |
386 | |
387 | static void xen_end_context_switch(struct task_struct *next) |
388 | { |
389 | BUG_ON(preemptible()); |
390 | |
391 | xen_mc_flush(); |
392 | leave_lazy(mode: XEN_LAZY_CPU); |
393 | if (test_and_clear_ti_thread_flag(task_thread_info(next), TIF_LAZY_MMU_UPDATES)) |
394 | arch_enter_lazy_mmu_mode(); |
395 | } |
396 | |
397 | static unsigned long xen_store_tr(void) |
398 | { |
399 | return 0; |
400 | } |
401 | |
402 | /* |
403 | * Set the page permissions for a particular virtual address. If the |
404 | * address is a vmalloc mapping (or other non-linear mapping), then |
405 | * find the linear mapping of the page and also set its protections to |
406 | * match. |
407 | */ |
408 | static void set_aliased_prot(void *v, pgprot_t prot) |
409 | { |
410 | int level; |
411 | pte_t *ptep; |
412 | pte_t pte; |
413 | unsigned long pfn; |
414 | unsigned char dummy; |
415 | void *va; |
416 | |
417 | ptep = lookup_address(address: (unsigned long)v, level: &level); |
418 | BUG_ON(ptep == NULL); |
419 | |
420 | pfn = pte_pfn(pte: *ptep); |
421 | pte = pfn_pte(page_nr: pfn, pgprot: prot); |
422 | |
423 | /* |
424 | * Careful: update_va_mapping() will fail if the virtual address |
425 | * we're poking isn't populated in the page tables. We don't |
426 | * need to worry about the direct map (that's always in the page |
427 | * tables), but we need to be careful about vmap space. In |
428 | * particular, the top level page table can lazily propagate |
429 | * entries between processes, so if we've switched mms since we |
430 | * vmapped the target in the first place, we might not have the |
431 | * top-level page table entry populated. |
432 | * |
433 | * We disable preemption because we want the same mm active when |
434 | * we probe the target and when we issue the hypercall. We'll |
435 | * have the same nominal mm, but if we're a kernel thread, lazy |
436 | * mm dropping could change our pgd. |
437 | * |
438 | * Out of an abundance of caution, this uses __get_user() to fault |
439 | * in the target address just in case there's some obscure case |
440 | * in which the target address isn't readable. |
441 | */ |
442 | |
443 | preempt_disable(); |
444 | |
445 | copy_from_kernel_nofault(dst: &dummy, src: v, size: 1); |
446 | |
447 | if (HYPERVISOR_update_va_mapping(va: (unsigned long)v, new_val: pte, flags: 0)) |
448 | BUG(); |
449 | |
450 | va = __va(PFN_PHYS(pfn)); |
451 | |
452 | if (va != v && HYPERVISOR_update_va_mapping(va: (unsigned long)va, new_val: pte, flags: 0)) |
453 | BUG(); |
454 | |
455 | preempt_enable(); |
456 | } |
457 | |
458 | static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries) |
459 | { |
460 | const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE; |
461 | int i; |
462 | |
463 | /* |
464 | * We need to mark the all aliases of the LDT pages RO. We |
465 | * don't need to call vm_flush_aliases(), though, since that's |
466 | * only responsible for flushing aliases out the TLBs, not the |
467 | * page tables, and Xen will flush the TLB for us if needed. |
468 | * |
469 | * To avoid confusing future readers: none of this is necessary |
470 | * to load the LDT. The hypervisor only checks this when the |
471 | * LDT is faulted in due to subsequent descriptor access. |
472 | */ |
473 | |
474 | for (i = 0; i < entries; i += entries_per_page) |
475 | set_aliased_prot(v: ldt + i, PAGE_KERNEL_RO); |
476 | } |
477 | |
478 | static void xen_free_ldt(struct desc_struct *ldt, unsigned entries) |
479 | { |
480 | const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE; |
481 | int i; |
482 | |
483 | for (i = 0; i < entries; i += entries_per_page) |
484 | set_aliased_prot(v: ldt + i, PAGE_KERNEL); |
485 | } |
486 | |
487 | static void xen_set_ldt(const void *addr, unsigned entries) |
488 | { |
489 | struct mmuext_op *op; |
490 | struct multicall_space mcs = xen_mc_entry(args: sizeof(*op)); |
491 | |
492 | trace_xen_cpu_set_ldt(addr, entries); |
493 | |
494 | op = mcs.args; |
495 | op->cmd = MMUEXT_SET_LDT; |
496 | op->arg1.linear_addr = (unsigned long)addr; |
497 | op->arg2.nr_ents = entries; |
498 | |
499 | MULTI_mmuext_op(mcl: mcs.mc, op, count: 1, NULL, DOMID_SELF); |
500 | |
501 | xen_mc_issue(mode: XEN_LAZY_CPU); |
502 | } |
503 | |
504 | static void xen_load_gdt(const struct desc_ptr *dtr) |
505 | { |
506 | unsigned long va = dtr->address; |
507 | unsigned int size = dtr->size + 1; |
508 | unsigned long pfn, mfn; |
509 | int level; |
510 | pte_t *ptep; |
511 | void *virt; |
512 | |
513 | /* @size should be at most GDT_SIZE which is smaller than PAGE_SIZE. */ |
514 | BUG_ON(size > PAGE_SIZE); |
515 | BUG_ON(va & ~PAGE_MASK); |
516 | |
517 | /* |
518 | * The GDT is per-cpu and is in the percpu data area. |
519 | * That can be virtually mapped, so we need to do a |
520 | * page-walk to get the underlying MFN for the |
521 | * hypercall. The page can also be in the kernel's |
522 | * linear range, so we need to RO that mapping too. |
523 | */ |
524 | ptep = lookup_address(address: va, level: &level); |
525 | BUG_ON(ptep == NULL); |
526 | |
527 | pfn = pte_pfn(pte: *ptep); |
528 | mfn = pfn_to_mfn(pfn); |
529 | virt = __va(PFN_PHYS(pfn)); |
530 | |
531 | make_lowmem_page_readonly(vaddr: (void *)va); |
532 | make_lowmem_page_readonly(vaddr: virt); |
533 | |
534 | if (HYPERVISOR_set_gdt(frame_list: &mfn, entries: size / sizeof(struct desc_struct))) |
535 | BUG(); |
536 | } |
537 | |
538 | /* |
539 | * load_gdt for early boot, when the gdt is only mapped once |
540 | */ |
541 | static void __init xen_load_gdt_boot(const struct desc_ptr *dtr) |
542 | { |
543 | unsigned long va = dtr->address; |
544 | unsigned int size = dtr->size + 1; |
545 | unsigned long pfn, mfn; |
546 | pte_t pte; |
547 | |
548 | /* @size should be at most GDT_SIZE which is smaller than PAGE_SIZE. */ |
549 | BUG_ON(size > PAGE_SIZE); |
550 | BUG_ON(va & ~PAGE_MASK); |
551 | |
552 | pfn = virt_to_pfn(v: (void *)va); |
553 | mfn = pfn_to_mfn(pfn); |
554 | |
555 | pte = pfn_pte(page_nr: pfn, PAGE_KERNEL_RO); |
556 | |
557 | if (HYPERVISOR_update_va_mapping(va: (unsigned long)va, new_val: pte, flags: 0)) |
558 | BUG(); |
559 | |
560 | if (HYPERVISOR_set_gdt(frame_list: &mfn, entries: size / sizeof(struct desc_struct))) |
561 | BUG(); |
562 | } |
563 | |
564 | static inline bool desc_equal(const struct desc_struct *d1, |
565 | const struct desc_struct *d2) |
566 | { |
567 | return !memcmp(p: d1, q: d2, size: sizeof(*d1)); |
568 | } |
569 | |
570 | static void load_TLS_descriptor(struct thread_struct *t, |
571 | unsigned int cpu, unsigned int i) |
572 | { |
573 | struct desc_struct *shadow = &per_cpu(shadow_tls_desc, cpu).desc[i]; |
574 | struct desc_struct *gdt; |
575 | xmaddr_t maddr; |
576 | struct multicall_space mc; |
577 | |
578 | if (desc_equal(d1: shadow, d2: &t->tls_array[i])) |
579 | return; |
580 | |
581 | *shadow = t->tls_array[i]; |
582 | |
583 | gdt = get_cpu_gdt_rw(cpu); |
584 | maddr = arbitrary_virt_to_machine(address: &gdt[GDT_ENTRY_TLS_MIN+i]); |
585 | mc = __xen_mc_entry(args: 0); |
586 | |
587 | MULTI_update_descriptor(mcl: mc.mc, maddr: maddr.maddr, desc: t->tls_array[i]); |
588 | } |
589 | |
590 | static void xen_load_tls(struct thread_struct *t, unsigned int cpu) |
591 | { |
592 | /* |
593 | * In lazy mode we need to zero %fs, otherwise we may get an |
594 | * exception between the new %fs descriptor being loaded and |
595 | * %fs being effectively cleared at __switch_to(). |
596 | */ |
597 | if (xen_get_lazy_mode() == XEN_LAZY_CPU) |
598 | loadsegment(fs, 0); |
599 | |
600 | xen_mc_batch(); |
601 | |
602 | load_TLS_descriptor(t, cpu, i: 0); |
603 | load_TLS_descriptor(t, cpu, i: 1); |
604 | load_TLS_descriptor(t, cpu, i: 2); |
605 | |
606 | xen_mc_issue(mode: XEN_LAZY_CPU); |
607 | } |
608 | |
609 | static void xen_load_gs_index(unsigned int idx) |
610 | { |
611 | if (HYPERVISOR_set_segment_base(SEGBASE_GS_USER_SEL, value: idx)) |
612 | BUG(); |
613 | } |
614 | |
615 | static void xen_write_ldt_entry(struct desc_struct *dt, int entrynum, |
616 | const void *ptr) |
617 | { |
618 | xmaddr_t mach_lp = arbitrary_virt_to_machine(address: &dt[entrynum]); |
619 | u64 entry = *(u64 *)ptr; |
620 | |
621 | trace_xen_cpu_write_ldt_entry(dt, entrynum, desc: entry); |
622 | |
623 | preempt_disable(); |
624 | |
625 | xen_mc_flush(); |
626 | if (HYPERVISOR_update_descriptor(ma: mach_lp.maddr, desc: entry)) |
627 | BUG(); |
628 | |
629 | preempt_enable(); |
630 | } |
631 | |
632 | void noist_exc_debug(struct pt_regs *regs); |
633 | |
634 | DEFINE_IDTENTRY_RAW(xenpv_exc_nmi) |
635 | { |
636 | /* On Xen PV, NMI doesn't use IST. The C part is the same as native. */ |
637 | exc_nmi(regs); |
638 | } |
639 | |
640 | DEFINE_IDTENTRY_RAW_ERRORCODE(xenpv_exc_double_fault) |
641 | { |
642 | /* On Xen PV, DF doesn't use IST. The C part is the same as native. */ |
643 | exc_double_fault(regs, error_code); |
644 | } |
645 | |
646 | DEFINE_IDTENTRY_RAW(xenpv_exc_debug) |
647 | { |
648 | /* |
649 | * There's no IST on Xen PV, but we still need to dispatch |
650 | * to the correct handler. |
651 | */ |
652 | if (user_mode(regs)) |
653 | noist_exc_debug(regs); |
654 | else |
655 | exc_debug(regs); |
656 | } |
657 | |
658 | DEFINE_IDTENTRY_RAW(exc_xen_unknown_trap) |
659 | { |
660 | /* This should never happen and there is no way to handle it. */ |
661 | instrumentation_begin(); |
662 | pr_err("Unknown trap in Xen PV mode." ); |
663 | BUG(); |
664 | instrumentation_end(); |
665 | } |
666 | |
667 | #ifdef CONFIG_X86_MCE |
668 | DEFINE_IDTENTRY_RAW(xenpv_exc_machine_check) |
669 | { |
670 | /* |
671 | * There's no IST on Xen PV, but we still need to dispatch |
672 | * to the correct handler. |
673 | */ |
674 | if (user_mode(regs)) |
675 | noist_exc_machine_check(regs); |
676 | else |
677 | exc_machine_check(regs); |
678 | } |
679 | #endif |
680 | |
681 | struct trap_array_entry { |
682 | void (*orig)(void); |
683 | void (*xen)(void); |
684 | bool ist_okay; |
685 | }; |
686 | |
687 | #define TRAP_ENTRY(func, ist_ok) { \ |
688 | .orig = asm_##func, \ |
689 | .xen = xen_asm_##func, \ |
690 | .ist_okay = ist_ok } |
691 | |
692 | #define TRAP_ENTRY_REDIR(func, ist_ok) { \ |
693 | .orig = asm_##func, \ |
694 | .xen = xen_asm_xenpv_##func, \ |
695 | .ist_okay = ist_ok } |
696 | |
697 | static struct trap_array_entry trap_array[] = { |
698 | TRAP_ENTRY_REDIR(exc_debug, true ), |
699 | TRAP_ENTRY_REDIR(exc_double_fault, true ), |
700 | #ifdef CONFIG_X86_MCE |
701 | TRAP_ENTRY_REDIR(exc_machine_check, true ), |
702 | #endif |
703 | TRAP_ENTRY_REDIR(exc_nmi, true ), |
704 | TRAP_ENTRY(exc_int3, false ), |
705 | TRAP_ENTRY(exc_overflow, false ), |
706 | #ifdef CONFIG_IA32_EMULATION |
707 | { entry_INT80_compat, xen_entry_INT80_compat, false }, |
708 | #endif |
709 | TRAP_ENTRY(exc_page_fault, false ), |
710 | TRAP_ENTRY(exc_divide_error, false ), |
711 | TRAP_ENTRY(exc_bounds, false ), |
712 | TRAP_ENTRY(exc_invalid_op, false ), |
713 | TRAP_ENTRY(exc_device_not_available, false ), |
714 | TRAP_ENTRY(exc_coproc_segment_overrun, false ), |
715 | TRAP_ENTRY(exc_invalid_tss, false ), |
716 | TRAP_ENTRY(exc_segment_not_present, false ), |
717 | TRAP_ENTRY(exc_stack_segment, false ), |
718 | TRAP_ENTRY(exc_general_protection, false ), |
719 | TRAP_ENTRY(exc_spurious_interrupt_bug, false ), |
720 | TRAP_ENTRY(exc_coprocessor_error, false ), |
721 | TRAP_ENTRY(exc_alignment_check, false ), |
722 | TRAP_ENTRY(exc_simd_coprocessor_error, false ), |
723 | #ifdef CONFIG_X86_CET |
724 | TRAP_ENTRY(exc_control_protection, false ), |
725 | #endif |
726 | }; |
727 | |
728 | static bool __ref get_trap_addr(void **addr, unsigned int ist) |
729 | { |
730 | unsigned int nr; |
731 | bool ist_okay = false; |
732 | bool found = false; |
733 | |
734 | /* |
735 | * Replace trap handler addresses by Xen specific ones. |
736 | * Check for known traps using IST and whitelist them. |
737 | * The debugger ones are the only ones we care about. |
738 | * Xen will handle faults like double_fault, so we should never see |
739 | * them. Warn if there's an unexpected IST-using fault handler. |
740 | */ |
741 | for (nr = 0; nr < ARRAY_SIZE(trap_array); nr++) { |
742 | struct trap_array_entry *entry = trap_array + nr; |
743 | |
744 | if (*addr == entry->orig) { |
745 | *addr = entry->xen; |
746 | ist_okay = entry->ist_okay; |
747 | found = true; |
748 | break; |
749 | } |
750 | } |
751 | |
752 | if (nr == ARRAY_SIZE(trap_array) && |
753 | *addr >= (void *)early_idt_handler_array[0] && |
754 | *addr < (void *)early_idt_handler_array[NUM_EXCEPTION_VECTORS]) { |
755 | nr = (*addr - (void *)early_idt_handler_array[0]) / |
756 | EARLY_IDT_HANDLER_SIZE; |
757 | *addr = (void *)xen_early_idt_handler_array[nr]; |
758 | found = true; |
759 | } |
760 | |
761 | if (!found) |
762 | *addr = (void *)xen_asm_exc_xen_unknown_trap; |
763 | |
764 | if (WARN_ON(found && ist != 0 && !ist_okay)) |
765 | return false; |
766 | |
767 | return true; |
768 | } |
769 | |
770 | static int cvt_gate_to_trap(int vector, const gate_desc *val, |
771 | struct trap_info *info) |
772 | { |
773 | unsigned long addr; |
774 | |
775 | if (val->bits.type != GATE_TRAP && val->bits.type != GATE_INTERRUPT) |
776 | return 0; |
777 | |
778 | info->vector = vector; |
779 | |
780 | addr = gate_offset(g: val); |
781 | if (!get_trap_addr(addr: (void **)&addr, ist: val->bits.ist)) |
782 | return 0; |
783 | info->address = addr; |
784 | |
785 | info->cs = gate_segment(g: val); |
786 | info->flags = val->bits.dpl; |
787 | /* interrupt gates clear IF */ |
788 | if (val->bits.type == GATE_INTERRUPT) |
789 | info->flags |= 1 << 2; |
790 | |
791 | return 1; |
792 | } |
793 | |
794 | /* Locations of each CPU's IDT */ |
795 | static DEFINE_PER_CPU(struct desc_ptr, idt_desc); |
796 | |
797 | /* Set an IDT entry. If the entry is part of the current IDT, then |
798 | also update Xen. */ |
799 | static void xen_write_idt_entry(gate_desc *dt, int entrynum, const gate_desc *g) |
800 | { |
801 | unsigned long p = (unsigned long)&dt[entrynum]; |
802 | unsigned long start, end; |
803 | |
804 | trace_xen_cpu_write_idt_entry(dt, entrynum, ent: g); |
805 | |
806 | preempt_disable(); |
807 | |
808 | start = __this_cpu_read(idt_desc.address); |
809 | end = start + __this_cpu_read(idt_desc.size) + 1; |
810 | |
811 | xen_mc_flush(); |
812 | |
813 | native_write_idt_entry(idt: dt, entry: entrynum, gate: g); |
814 | |
815 | if (p >= start && (p + 8) <= end) { |
816 | struct trap_info info[2]; |
817 | |
818 | info[1].address = 0; |
819 | |
820 | if (cvt_gate_to_trap(vector: entrynum, val: g, info: &info[0])) |
821 | if (HYPERVISOR_set_trap_table(table: info)) |
822 | BUG(); |
823 | } |
824 | |
825 | preempt_enable(); |
826 | } |
827 | |
828 | static unsigned xen_convert_trap_info(const struct desc_ptr *desc, |
829 | struct trap_info *traps, bool full) |
830 | { |
831 | unsigned in, out, count; |
832 | |
833 | count = (desc->size+1) / sizeof(gate_desc); |
834 | BUG_ON(count > 256); |
835 | |
836 | for (in = out = 0; in < count; in++) { |
837 | gate_desc *entry = (gate_desc *)(desc->address) + in; |
838 | |
839 | if (cvt_gate_to_trap(vector: in, val: entry, info: &traps[out]) || full) |
840 | out++; |
841 | } |
842 | |
843 | return out; |
844 | } |
845 | |
846 | void xen_copy_trap_info(struct trap_info *traps) |
847 | { |
848 | const struct desc_ptr *desc = this_cpu_ptr(&idt_desc); |
849 | |
850 | xen_convert_trap_info(desc, traps, full: true); |
851 | } |
852 | |
853 | /* Load a new IDT into Xen. In principle this can be per-CPU, so we |
854 | hold a spinlock to protect the static traps[] array (static because |
855 | it avoids allocation, and saves stack space). */ |
856 | static void xen_load_idt(const struct desc_ptr *desc) |
857 | { |
858 | static DEFINE_SPINLOCK(lock); |
859 | static struct trap_info traps[257]; |
860 | static const struct trap_info zero = { }; |
861 | unsigned out; |
862 | |
863 | trace_xen_cpu_load_idt(desc); |
864 | |
865 | spin_lock(lock: &lock); |
866 | |
867 | memcpy(this_cpu_ptr(&idt_desc), desc, sizeof(idt_desc)); |
868 | |
869 | out = xen_convert_trap_info(desc, traps, full: false); |
870 | traps[out] = zero; |
871 | |
872 | xen_mc_flush(); |
873 | if (HYPERVISOR_set_trap_table(table: traps)) |
874 | BUG(); |
875 | |
876 | spin_unlock(lock: &lock); |
877 | } |
878 | |
879 | /* Write a GDT descriptor entry. Ignore LDT descriptors, since |
880 | they're handled differently. */ |
881 | static void xen_write_gdt_entry(struct desc_struct *dt, int entry, |
882 | const void *desc, int type) |
883 | { |
884 | trace_xen_cpu_write_gdt_entry(dt, entrynum: entry, desc, type); |
885 | |
886 | preempt_disable(); |
887 | |
888 | switch (type) { |
889 | case DESC_LDT: |
890 | case DESC_TSS: |
891 | /* ignore */ |
892 | break; |
893 | |
894 | default: { |
895 | xmaddr_t maddr = arbitrary_virt_to_machine(address: &dt[entry]); |
896 | |
897 | xen_mc_flush(); |
898 | if (HYPERVISOR_update_descriptor(ma: maddr.maddr, desc: *(u64 *)desc)) |
899 | BUG(); |
900 | } |
901 | |
902 | } |
903 | |
904 | preempt_enable(); |
905 | } |
906 | |
907 | /* |
908 | * Version of write_gdt_entry for use at early boot-time needed to |
909 | * update an entry as simply as possible. |
910 | */ |
911 | static void __init xen_write_gdt_entry_boot(struct desc_struct *dt, int entry, |
912 | const void *desc, int type) |
913 | { |
914 | trace_xen_cpu_write_gdt_entry(dt, entrynum: entry, desc, type); |
915 | |
916 | switch (type) { |
917 | case DESC_LDT: |
918 | case DESC_TSS: |
919 | /* ignore */ |
920 | break; |
921 | |
922 | default: { |
923 | xmaddr_t maddr = virt_to_machine(&dt[entry]); |
924 | |
925 | if (HYPERVISOR_update_descriptor(ma: maddr.maddr, desc: *(u64 *)desc)) |
926 | dt[entry] = *(struct desc_struct *)desc; |
927 | } |
928 | |
929 | } |
930 | } |
931 | |
932 | static void xen_load_sp0(unsigned long sp0) |
933 | { |
934 | struct multicall_space mcs; |
935 | |
936 | mcs = xen_mc_entry(args: 0); |
937 | MULTI_stack_switch(mcl: mcs.mc, __KERNEL_DS, esp: sp0); |
938 | xen_mc_issue(mode: XEN_LAZY_CPU); |
939 | this_cpu_write(cpu_tss_rw.x86_tss.sp0, sp0); |
940 | } |
941 | |
942 | #ifdef CONFIG_X86_IOPL_IOPERM |
943 | static void xen_invalidate_io_bitmap(void) |
944 | { |
945 | struct physdev_set_iobitmap iobitmap = { |
946 | .bitmap = NULL, |
947 | .nr_ports = 0, |
948 | }; |
949 | |
950 | native_tss_invalidate_io_bitmap(); |
951 | HYPERVISOR_physdev_op(PHYSDEVOP_set_iobitmap, arg: &iobitmap); |
952 | } |
953 | |
954 | static void xen_update_io_bitmap(void) |
955 | { |
956 | struct physdev_set_iobitmap iobitmap; |
957 | struct tss_struct *tss = this_cpu_ptr(&cpu_tss_rw); |
958 | |
959 | native_tss_update_io_bitmap(); |
960 | |
961 | iobitmap.bitmap = (uint8_t *)(&tss->x86_tss) + |
962 | tss->x86_tss.io_bitmap_base; |
963 | if (tss->x86_tss.io_bitmap_base == IO_BITMAP_OFFSET_INVALID) |
964 | iobitmap.nr_ports = 0; |
965 | else |
966 | iobitmap.nr_ports = IO_BITMAP_BITS; |
967 | |
968 | HYPERVISOR_physdev_op(PHYSDEVOP_set_iobitmap, arg: &iobitmap); |
969 | } |
970 | #endif |
971 | |
972 | static void xen_io_delay(void) |
973 | { |
974 | } |
975 | |
976 | static DEFINE_PER_CPU(unsigned long, xen_cr0_value); |
977 | |
978 | static unsigned long xen_read_cr0(void) |
979 | { |
980 | unsigned long cr0 = this_cpu_read(xen_cr0_value); |
981 | |
982 | if (unlikely(cr0 == 0)) { |
983 | cr0 = native_read_cr0(); |
984 | this_cpu_write(xen_cr0_value, cr0); |
985 | } |
986 | |
987 | return cr0; |
988 | } |
989 | |
990 | static void xen_write_cr0(unsigned long cr0) |
991 | { |
992 | struct multicall_space mcs; |
993 | |
994 | this_cpu_write(xen_cr0_value, cr0); |
995 | |
996 | /* Only pay attention to cr0.TS; everything else is |
997 | ignored. */ |
998 | mcs = xen_mc_entry(args: 0); |
999 | |
1000 | MULTI_fpu_taskswitch(mcl: mcs.mc, set: (cr0 & X86_CR0_TS) != 0); |
1001 | |
1002 | xen_mc_issue(mode: XEN_LAZY_CPU); |
1003 | } |
1004 | |
1005 | static void xen_write_cr4(unsigned long cr4) |
1006 | { |
1007 | cr4 &= ~(X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PCE); |
1008 | |
1009 | native_write_cr4(val: cr4); |
1010 | } |
1011 | |
1012 | static u64 xen_do_read_msr(unsigned int msr, int *err) |
1013 | { |
1014 | u64 val = 0; /* Avoid uninitialized value for safe variant. */ |
1015 | |
1016 | if (pmu_msr_read(msr, val: &val, err)) |
1017 | return val; |
1018 | |
1019 | if (err) |
1020 | val = native_read_msr_safe(msr, err); |
1021 | else |
1022 | val = native_read_msr(msr); |
1023 | |
1024 | switch (msr) { |
1025 | case MSR_IA32_APICBASE: |
1026 | val &= ~X2APIC_ENABLE; |
1027 | break; |
1028 | } |
1029 | return val; |
1030 | } |
1031 | |
1032 | static void set_seg(unsigned int which, unsigned int low, unsigned int high, |
1033 | int *err) |
1034 | { |
1035 | u64 base = ((u64)high << 32) | low; |
1036 | |
1037 | if (HYPERVISOR_set_segment_base(reg: which, value: base) == 0) |
1038 | return; |
1039 | |
1040 | if (err) |
1041 | *err = -EIO; |
1042 | else |
1043 | WARN(1, "Xen set_segment_base(%u, %llx) failed\n" , which, base); |
1044 | } |
1045 | |
1046 | /* |
1047 | * Support write_msr_safe() and write_msr() semantics. |
1048 | * With err == NULL write_msr() semantics are selected. |
1049 | * Supplying an err pointer requires err to be pre-initialized with 0. |
1050 | */ |
1051 | static void xen_do_write_msr(unsigned int msr, unsigned int low, |
1052 | unsigned int high, int *err) |
1053 | { |
1054 | switch (msr) { |
1055 | case MSR_FS_BASE: |
1056 | set_seg(SEGBASE_FS, low, high, err); |
1057 | break; |
1058 | |
1059 | case MSR_KERNEL_GS_BASE: |
1060 | set_seg(SEGBASE_GS_USER, low, high, err); |
1061 | break; |
1062 | |
1063 | case MSR_GS_BASE: |
1064 | set_seg(SEGBASE_GS_KERNEL, low, high, err); |
1065 | break; |
1066 | |
1067 | case MSR_STAR: |
1068 | case MSR_CSTAR: |
1069 | case MSR_LSTAR: |
1070 | case MSR_SYSCALL_MASK: |
1071 | case MSR_IA32_SYSENTER_CS: |
1072 | case MSR_IA32_SYSENTER_ESP: |
1073 | case MSR_IA32_SYSENTER_EIP: |
1074 | /* Fast syscall setup is all done in hypercalls, so |
1075 | these are all ignored. Stub them out here to stop |
1076 | Xen console noise. */ |
1077 | break; |
1078 | |
1079 | default: |
1080 | if (!pmu_msr_write(msr, low, high, err)) { |
1081 | if (err) |
1082 | *err = native_write_msr_safe(msr, low, high); |
1083 | else |
1084 | native_write_msr(msr, low, high); |
1085 | } |
1086 | } |
1087 | } |
1088 | |
1089 | static u64 xen_read_msr_safe(unsigned int msr, int *err) |
1090 | { |
1091 | return xen_do_read_msr(msr, err); |
1092 | } |
1093 | |
1094 | static int xen_write_msr_safe(unsigned int msr, unsigned int low, |
1095 | unsigned int high) |
1096 | { |
1097 | int err = 0; |
1098 | |
1099 | xen_do_write_msr(msr, low, high, err: &err); |
1100 | |
1101 | return err; |
1102 | } |
1103 | |
1104 | static u64 xen_read_msr(unsigned int msr) |
1105 | { |
1106 | int err; |
1107 | |
1108 | return xen_do_read_msr(msr, err: xen_msr_safe ? &err : NULL); |
1109 | } |
1110 | |
1111 | static void xen_write_msr(unsigned int msr, unsigned low, unsigned high) |
1112 | { |
1113 | int err; |
1114 | |
1115 | xen_do_write_msr(msr, low, high, err: xen_msr_safe ? &err : NULL); |
1116 | } |
1117 | |
1118 | /* This is called once we have the cpu_possible_mask */ |
1119 | void __init xen_setup_vcpu_info_placement(void) |
1120 | { |
1121 | int cpu; |
1122 | |
1123 | for_each_possible_cpu(cpu) { |
1124 | /* Set up direct vCPU id mapping for PV guests. */ |
1125 | per_cpu(xen_vcpu_id, cpu) = cpu; |
1126 | xen_vcpu_setup(cpu); |
1127 | } |
1128 | |
1129 | pv_ops.irq.save_fl = __PV_IS_CALLEE_SAVE(xen_save_fl_direct); |
1130 | pv_ops.irq.irq_disable = __PV_IS_CALLEE_SAVE(xen_irq_disable_direct); |
1131 | pv_ops.irq.irq_enable = __PV_IS_CALLEE_SAVE(xen_irq_enable_direct); |
1132 | pv_ops.mmu.read_cr2 = __PV_IS_CALLEE_SAVE(xen_read_cr2_direct); |
1133 | } |
1134 | |
1135 | static const struct pv_info xen_info __initconst = { |
1136 | .extra_user_64bit_cs = FLAT_USER_CS64, |
1137 | .name = "Xen" , |
1138 | }; |
1139 | |
1140 | static const typeof(pv_ops) xen_cpu_ops __initconst = { |
1141 | .cpu = { |
1142 | .cpuid = xen_cpuid, |
1143 | |
1144 | .set_debugreg = xen_set_debugreg, |
1145 | .get_debugreg = xen_get_debugreg, |
1146 | |
1147 | .read_cr0 = xen_read_cr0, |
1148 | .write_cr0 = xen_write_cr0, |
1149 | |
1150 | .write_cr4 = xen_write_cr4, |
1151 | |
1152 | .wbinvd = pv_native_wbinvd, |
1153 | |
1154 | .read_msr = xen_read_msr, |
1155 | .write_msr = xen_write_msr, |
1156 | |
1157 | .read_msr_safe = xen_read_msr_safe, |
1158 | .write_msr_safe = xen_write_msr_safe, |
1159 | |
1160 | .read_pmc = xen_read_pmc, |
1161 | |
1162 | .load_tr_desc = paravirt_nop, |
1163 | .set_ldt = xen_set_ldt, |
1164 | .load_gdt = xen_load_gdt, |
1165 | .load_idt = xen_load_idt, |
1166 | .load_tls = xen_load_tls, |
1167 | .load_gs_index = xen_load_gs_index, |
1168 | |
1169 | .alloc_ldt = xen_alloc_ldt, |
1170 | .free_ldt = xen_free_ldt, |
1171 | |
1172 | .store_tr = xen_store_tr, |
1173 | |
1174 | .write_ldt_entry = xen_write_ldt_entry, |
1175 | .write_gdt_entry = xen_write_gdt_entry, |
1176 | .write_idt_entry = xen_write_idt_entry, |
1177 | .load_sp0 = xen_load_sp0, |
1178 | |
1179 | #ifdef CONFIG_X86_IOPL_IOPERM |
1180 | .invalidate_io_bitmap = xen_invalidate_io_bitmap, |
1181 | .update_io_bitmap = xen_update_io_bitmap, |
1182 | #endif |
1183 | .io_delay = xen_io_delay, |
1184 | |
1185 | .start_context_switch = xen_start_context_switch, |
1186 | .end_context_switch = xen_end_context_switch, |
1187 | }, |
1188 | }; |
1189 | |
1190 | static void xen_restart(char *msg) |
1191 | { |
1192 | xen_reboot(SHUTDOWN_reboot); |
1193 | } |
1194 | |
1195 | static void xen_machine_halt(void) |
1196 | { |
1197 | xen_reboot(SHUTDOWN_poweroff); |
1198 | } |
1199 | |
1200 | static void xen_machine_power_off(void) |
1201 | { |
1202 | do_kernel_power_off(); |
1203 | xen_reboot(SHUTDOWN_poweroff); |
1204 | } |
1205 | |
1206 | static void xen_crash_shutdown(struct pt_regs *regs) |
1207 | { |
1208 | xen_reboot(SHUTDOWN_crash); |
1209 | } |
1210 | |
1211 | static const struct machine_ops xen_machine_ops __initconst = { |
1212 | .restart = xen_restart, |
1213 | .halt = xen_machine_halt, |
1214 | .power_off = xen_machine_power_off, |
1215 | .shutdown = xen_machine_halt, |
1216 | .crash_shutdown = xen_crash_shutdown, |
1217 | .emergency_restart = xen_emergency_restart, |
1218 | }; |
1219 | |
1220 | static unsigned char xen_get_nmi_reason(void) |
1221 | { |
1222 | unsigned char reason = 0; |
1223 | |
1224 | /* Construct a value which looks like it came from port 0x61. */ |
1225 | if (test_bit(_XEN_NMIREASON_io_error, |
1226 | &HYPERVISOR_shared_info->arch.nmi_reason)) |
1227 | reason |= NMI_REASON_IOCHK; |
1228 | if (test_bit(_XEN_NMIREASON_pci_serr, |
1229 | &HYPERVISOR_shared_info->arch.nmi_reason)) |
1230 | reason |= NMI_REASON_SERR; |
1231 | |
1232 | return reason; |
1233 | } |
1234 | |
1235 | static void __init xen_boot_params_init_edd(void) |
1236 | { |
1237 | #if IS_ENABLED(CONFIG_EDD) |
1238 | struct xen_platform_op op; |
1239 | struct edd_info *edd_info; |
1240 | u32 *mbr_signature; |
1241 | unsigned nr; |
1242 | int ret; |
1243 | |
1244 | edd_info = boot_params.eddbuf; |
1245 | mbr_signature = boot_params.edd_mbr_sig_buffer; |
1246 | |
1247 | op.cmd = XENPF_firmware_info; |
1248 | |
1249 | op.u.firmware_info.type = XEN_FW_DISK_INFO; |
1250 | for (nr = 0; nr < EDDMAXNR; nr++) { |
1251 | struct edd_info *info = edd_info + nr; |
1252 | |
1253 | op.u.firmware_info.index = nr; |
1254 | info->params.length = sizeof(info->params); |
1255 | set_xen_guest_handle(op.u.firmware_info.u.disk_info.edd_params, |
1256 | &info->params); |
1257 | ret = HYPERVISOR_platform_op(op: &op); |
1258 | if (ret) |
1259 | break; |
1260 | |
1261 | #define C(x) info->x = op.u.firmware_info.u.disk_info.x |
1262 | C(device); |
1263 | C(version); |
1264 | C(interface_support); |
1265 | C(legacy_max_cylinder); |
1266 | C(legacy_max_head); |
1267 | C(legacy_sectors_per_track); |
1268 | #undef C |
1269 | } |
1270 | boot_params.eddbuf_entries = nr; |
1271 | |
1272 | op.u.firmware_info.type = XEN_FW_DISK_MBR_SIGNATURE; |
1273 | for (nr = 0; nr < EDD_MBR_SIG_MAX; nr++) { |
1274 | op.u.firmware_info.index = nr; |
1275 | ret = HYPERVISOR_platform_op(op: &op); |
1276 | if (ret) |
1277 | break; |
1278 | mbr_signature[nr] = op.u.firmware_info.u.disk_mbr_signature.mbr_signature; |
1279 | } |
1280 | boot_params.edd_mbr_sig_buf_entries = nr; |
1281 | #endif |
1282 | } |
1283 | |
1284 | /* |
1285 | * Set up the GDT and segment registers for -fstack-protector. Until |
1286 | * we do this, we have to be careful not to call any stack-protected |
1287 | * function, which is most of the kernel. |
1288 | */ |
1289 | static void __init xen_setup_gdt(int cpu) |
1290 | { |
1291 | pv_ops.cpu.write_gdt_entry = xen_write_gdt_entry_boot; |
1292 | pv_ops.cpu.load_gdt = xen_load_gdt_boot; |
1293 | |
1294 | switch_gdt_and_percpu_base(cpu); |
1295 | |
1296 | pv_ops.cpu.write_gdt_entry = xen_write_gdt_entry; |
1297 | pv_ops.cpu.load_gdt = xen_load_gdt; |
1298 | } |
1299 | |
1300 | static void __init xen_dom0_set_legacy_features(void) |
1301 | { |
1302 | x86_platform.legacy.rtc = 1; |
1303 | } |
1304 | |
1305 | static void __init xen_domu_set_legacy_features(void) |
1306 | { |
1307 | x86_platform.legacy.rtc = 0; |
1308 | } |
1309 | |
1310 | extern void early_xen_iret_patch(void); |
1311 | |
1312 | /* First C function to be called on Xen boot */ |
1313 | asmlinkage __visible void __init xen_start_kernel(struct start_info *si) |
1314 | { |
1315 | struct physdev_set_iopl set_iopl; |
1316 | unsigned long initrd_start = 0; |
1317 | int rc; |
1318 | |
1319 | if (!si) |
1320 | return; |
1321 | |
1322 | clear_bss(); |
1323 | |
1324 | xen_start_info = si; |
1325 | |
1326 | __text_gen_insn(buf: &early_xen_iret_patch, |
1327 | JMP32_INSN_OPCODE, addr: &early_xen_iret_patch, dest: &xen_iret, |
1328 | JMP32_INSN_SIZE); |
1329 | |
1330 | xen_domain_type = XEN_PV_DOMAIN; |
1331 | xen_start_flags = xen_start_info->flags; |
1332 | |
1333 | xen_setup_features(); |
1334 | |
1335 | /* Install Xen paravirt ops */ |
1336 | pv_info = xen_info; |
1337 | pv_ops.cpu = xen_cpu_ops.cpu; |
1338 | xen_init_irq_ops(); |
1339 | |
1340 | /* |
1341 | * Setup xen_vcpu early because it is needed for |
1342 | * local_irq_disable(), irqs_disabled(), e.g. in printk(). |
1343 | * |
1344 | * Don't do the full vcpu_info placement stuff until we have |
1345 | * the cpu_possible_mask and a non-dummy shared_info. |
1346 | */ |
1347 | xen_vcpu_info_reset(cpu: 0); |
1348 | |
1349 | x86_platform.get_nmi_reason = xen_get_nmi_reason; |
1350 | x86_platform.realmode_reserve = x86_init_noop; |
1351 | x86_platform.realmode_init = x86_init_noop; |
1352 | |
1353 | x86_init.resources.memory_setup = xen_memory_setup; |
1354 | x86_init.irqs.intr_mode_select = x86_init_noop; |
1355 | x86_init.irqs.intr_mode_init = x86_64_probe_apic; |
1356 | x86_init.oem.arch_setup = xen_arch_setup; |
1357 | x86_init.oem.banner = xen_banner; |
1358 | x86_init.hyper.init_platform = xen_pv_init_platform; |
1359 | x86_init.hyper.guest_late_init = xen_pv_guest_late_init; |
1360 | |
1361 | /* |
1362 | * Set up some pagetable state before starting to set any ptes. |
1363 | */ |
1364 | |
1365 | xen_setup_machphys_mapping(); |
1366 | xen_init_mmu_ops(); |
1367 | |
1368 | /* Prevent unwanted bits from being set in PTEs. */ |
1369 | __supported_pte_mask &= ~_PAGE_GLOBAL; |
1370 | __default_kernel_pte_mask &= ~_PAGE_GLOBAL; |
1371 | |
1372 | /* Get mfn list */ |
1373 | xen_build_dynamic_phys_to_machine(); |
1374 | |
1375 | /* Work out if we support NX */ |
1376 | get_cpu_cap(c: &boot_cpu_data); |
1377 | x86_configure_nx(); |
1378 | |
1379 | /* |
1380 | * Set up kernel GDT and segment registers, mainly so that |
1381 | * -fstack-protector code can be executed. |
1382 | */ |
1383 | xen_setup_gdt(cpu: 0); |
1384 | |
1385 | /* Determine virtual and physical address sizes */ |
1386 | get_cpu_address_sizes(c: &boot_cpu_data); |
1387 | |
1388 | /* Let's presume PV guests always boot on vCPU with id 0. */ |
1389 | per_cpu(xen_vcpu_id, 0) = 0; |
1390 | |
1391 | idt_setup_early_handler(); |
1392 | |
1393 | xen_init_capabilities(); |
1394 | |
1395 | /* |
1396 | * set up the basic apic ops. |
1397 | */ |
1398 | xen_init_apic(); |
1399 | |
1400 | machine_ops = xen_machine_ops; |
1401 | |
1402 | /* |
1403 | * The only reliable way to retain the initial address of the |
1404 | * percpu gdt_page is to remember it here, so we can go and |
1405 | * mark it RW later, when the initial percpu area is freed. |
1406 | */ |
1407 | xen_initial_gdt = &per_cpu(gdt_page, 0); |
1408 | |
1409 | xen_smp_init(); |
1410 | |
1411 | #ifdef CONFIG_ACPI_NUMA |
1412 | /* |
1413 | * The pages we from Xen are not related to machine pages, so |
1414 | * any NUMA information the kernel tries to get from ACPI will |
1415 | * be meaningless. Prevent it from trying. |
1416 | */ |
1417 | disable_srat(); |
1418 | #endif |
1419 | WARN_ON(xen_cpuhp_setup(xen_cpu_up_prepare_pv, xen_cpu_dead_pv)); |
1420 | |
1421 | local_irq_disable(); |
1422 | early_boot_irqs_disabled = true; |
1423 | |
1424 | xen_raw_console_write(str: "mapping kernel into physical memory\n" ); |
1425 | xen_setup_kernel_pagetable(pgd: (pgd_t *)xen_start_info->pt_base, |
1426 | max_pfn: xen_start_info->nr_pages); |
1427 | xen_reserve_special_pages(); |
1428 | |
1429 | /* |
1430 | * We used to do this in xen_arch_setup, but that is too late |
1431 | * on AMD were early_cpu_init (run before ->arch_setup()) calls |
1432 | * early_amd_init which pokes 0xcf8 port. |
1433 | */ |
1434 | set_iopl.iopl = 1; |
1435 | rc = HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, arg: &set_iopl); |
1436 | if (rc != 0) |
1437 | xen_raw_printk(fmt: "physdev_op failed %d\n" , rc); |
1438 | |
1439 | |
1440 | if (xen_start_info->mod_start) { |
1441 | if (xen_start_info->flags & SIF_MOD_START_PFN) |
1442 | initrd_start = PFN_PHYS(xen_start_info->mod_start); |
1443 | else |
1444 | initrd_start = __pa(xen_start_info->mod_start); |
1445 | } |
1446 | |
1447 | /* Poke various useful things into boot_params */ |
1448 | boot_params.hdr.type_of_loader = (9 << 4) | 0; |
1449 | boot_params.hdr.ramdisk_image = initrd_start; |
1450 | boot_params.hdr.ramdisk_size = xen_start_info->mod_len; |
1451 | boot_params.hdr.cmd_line_ptr = __pa(xen_start_info->cmd_line); |
1452 | boot_params.hdr.hardware_subarch = X86_SUBARCH_XEN; |
1453 | |
1454 | if (!xen_initial_domain()) { |
1455 | if (pci_xen) |
1456 | x86_init.pci.arch_init = pci_xen_init; |
1457 | x86_platform.set_legacy_features = |
1458 | xen_domu_set_legacy_features; |
1459 | } else { |
1460 | const struct dom0_vga_console_info *info = |
1461 | (void *)((char *)xen_start_info + |
1462 | xen_start_info->console.dom0.info_off); |
1463 | struct xen_platform_op op = { |
1464 | .cmd = XENPF_firmware_info, |
1465 | .interface_version = XENPF_INTERFACE_VERSION, |
1466 | .u.firmware_info.type = XEN_FW_KBD_SHIFT_FLAGS, |
1467 | }; |
1468 | |
1469 | x86_platform.set_legacy_features = |
1470 | xen_dom0_set_legacy_features; |
1471 | xen_init_vga(info, size: xen_start_info->console.dom0.info_size, |
1472 | &boot_params.screen_info); |
1473 | xen_start_info->console.domU.mfn = 0; |
1474 | xen_start_info->console.domU.evtchn = 0; |
1475 | |
1476 | if (HYPERVISOR_platform_op(op: &op) == 0) |
1477 | boot_params.kbd_status = op.u.firmware_info.u.kbd_shift_flags; |
1478 | |
1479 | /* Make sure ACS will be enabled */ |
1480 | pci_request_acs(); |
1481 | |
1482 | xen_acpi_sleep_register(); |
1483 | |
1484 | xen_boot_params_init_edd(); |
1485 | |
1486 | #ifdef CONFIG_ACPI |
1487 | /* |
1488 | * Disable selecting "Firmware First mode" for correctable |
1489 | * memory errors, as this is the duty of the hypervisor to |
1490 | * decide. |
1491 | */ |
1492 | acpi_disable_cmcff = 1; |
1493 | #endif |
1494 | } |
1495 | |
1496 | xen_add_preferred_consoles(); |
1497 | |
1498 | #ifdef CONFIG_PCI |
1499 | /* PCI BIOS service won't work from a PV guest. */ |
1500 | pci_probe &= ~PCI_PROBE_BIOS; |
1501 | #endif |
1502 | xen_raw_console_write(str: "about to get started...\n" ); |
1503 | |
1504 | /* We need this for printk timestamps */ |
1505 | xen_setup_runstate_info(cpu: 0); |
1506 | |
1507 | xen_efi_init(boot_params: &boot_params); |
1508 | |
1509 | /* Start the world */ |
1510 | cr4_init_shadow(); /* 32b kernel does this in i386_start_kernel() */ |
1511 | x86_64_start_reservations(real_mode_data: (char *)__pa_symbol(&boot_params)); |
1512 | } |
1513 | |
1514 | static int xen_cpu_up_prepare_pv(unsigned int cpu) |
1515 | { |
1516 | int rc; |
1517 | |
1518 | if (per_cpu(xen_vcpu, cpu) == NULL) |
1519 | return -ENODEV; |
1520 | |
1521 | xen_setup_timer(cpu); |
1522 | |
1523 | rc = xen_smp_intr_init(cpu); |
1524 | if (rc) { |
1525 | WARN(1, "xen_smp_intr_init() for CPU %d failed: %d\n" , |
1526 | cpu, rc); |
1527 | return rc; |
1528 | } |
1529 | |
1530 | rc = xen_smp_intr_init_pv(cpu); |
1531 | if (rc) { |
1532 | WARN(1, "xen_smp_intr_init_pv() for CPU %d failed: %d\n" , |
1533 | cpu, rc); |
1534 | return rc; |
1535 | } |
1536 | |
1537 | return 0; |
1538 | } |
1539 | |
1540 | static int xen_cpu_dead_pv(unsigned int cpu) |
1541 | { |
1542 | xen_smp_intr_free(cpu); |
1543 | xen_smp_intr_free_pv(cpu); |
1544 | |
1545 | xen_teardown_timer(cpu); |
1546 | |
1547 | return 0; |
1548 | } |
1549 | |
1550 | static uint32_t __init xen_platform_pv(void) |
1551 | { |
1552 | if (xen_pv_domain()) |
1553 | return xen_cpuid_base(); |
1554 | |
1555 | return 0; |
1556 | } |
1557 | |
1558 | const __initconst struct hypervisor_x86 x86_hyper_xen_pv = { |
1559 | .name = "Xen PV" , |
1560 | .detect = xen_platform_pv, |
1561 | .type = X86_HYPER_XEN_PV, |
1562 | .runtime.pin_vcpu = xen_pin_vcpu, |
1563 | .ignore_nopv = true, |
1564 | }; |
1565 | |