1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* Paravirtualization interfaces |
3 | Copyright (C) 2006 Rusty Russell IBM Corporation |
4 | |
5 | |
6 | 2007 - x86_64 support added by Glauber de Oliveira Costa, Red Hat Inc |
7 | */ |
8 | |
9 | #include <linux/errno.h> |
10 | #include <linux/init.h> |
11 | #include <linux/export.h> |
12 | #include <linux/efi.h> |
13 | #include <linux/bcd.h> |
14 | #include <linux/highmem.h> |
15 | #include <linux/kprobes.h> |
16 | #include <linux/pgtable.h> |
17 | #include <linux/static_call.h> |
18 | |
19 | #include <asm/bug.h> |
20 | #include <asm/paravirt.h> |
21 | #include <asm/debugreg.h> |
22 | #include <asm/desc.h> |
23 | #include <asm/setup.h> |
24 | #include <asm/time.h> |
25 | #include <asm/pgalloc.h> |
26 | #include <asm/irq.h> |
27 | #include <asm/delay.h> |
28 | #include <asm/fixmap.h> |
29 | #include <asm/apic.h> |
30 | #include <asm/tlbflush.h> |
31 | #include <asm/timer.h> |
32 | #include <asm/special_insns.h> |
33 | #include <asm/tlb.h> |
34 | #include <asm/io_bitmap.h> |
35 | #include <asm/gsseg.h> |
36 | |
37 | /* stub always returning 0. */ |
38 | DEFINE_ASM_FUNC(paravirt_ret0, "xor %eax,%eax" , .entry.text); |
39 | |
40 | void __init default_banner(void) |
41 | { |
42 | printk(KERN_INFO "Booting paravirtualized kernel on %s\n" , |
43 | pv_info.name); |
44 | } |
45 | |
46 | #ifdef CONFIG_PARAVIRT_XXL |
47 | DEFINE_ASM_FUNC(_paravirt_ident_64, "mov %rdi, %rax" , .text); |
48 | DEFINE_ASM_FUNC(pv_native_save_fl, "pushf; pop %rax" , .noinstr.text); |
49 | DEFINE_ASM_FUNC(pv_native_irq_disable, "cli" , .noinstr.text); |
50 | DEFINE_ASM_FUNC(pv_native_irq_enable, "sti" , .noinstr.text); |
51 | DEFINE_ASM_FUNC(pv_native_read_cr2, "mov %cr2, %rax" , .noinstr.text); |
52 | #endif |
53 | |
54 | DEFINE_STATIC_KEY_TRUE(virt_spin_lock_key); |
55 | |
56 | void __init native_pv_lock_init(void) |
57 | { |
58 | if (IS_ENABLED(CONFIG_PARAVIRT_SPINLOCKS) && |
59 | !boot_cpu_has(X86_FEATURE_HYPERVISOR)) |
60 | static_branch_disable(&virt_spin_lock_key); |
61 | } |
62 | |
63 | static void native_tlb_remove_table(struct mmu_gather *tlb, void *table) |
64 | { |
65 | tlb_remove_page(tlb, page: table); |
66 | } |
67 | |
68 | struct static_key paravirt_steal_enabled; |
69 | struct static_key paravirt_steal_rq_enabled; |
70 | |
71 | static u64 native_steal_clock(int cpu) |
72 | { |
73 | return 0; |
74 | } |
75 | |
76 | DEFINE_STATIC_CALL(pv_steal_clock, native_steal_clock); |
77 | DEFINE_STATIC_CALL(pv_sched_clock, native_sched_clock); |
78 | |
79 | void paravirt_set_sched_clock(u64 (*func)(void)) |
80 | { |
81 | static_call_update(pv_sched_clock, func); |
82 | } |
83 | |
84 | /* These are in entry.S */ |
85 | static struct resource reserve_ioports = { |
86 | .start = 0, |
87 | .end = IO_SPACE_LIMIT, |
88 | .name = "paravirt-ioport" , |
89 | .flags = IORESOURCE_IO | IORESOURCE_BUSY, |
90 | }; |
91 | |
92 | /* |
93 | * Reserve the whole legacy IO space to prevent any legacy drivers |
94 | * from wasting time probing for their hardware. This is a fairly |
95 | * brute-force approach to disabling all non-virtual drivers. |
96 | * |
97 | * Note that this must be called very early to have any effect. |
98 | */ |
99 | int paravirt_disable_iospace(void) |
100 | { |
101 | return request_resource(root: &ioport_resource, new: &reserve_ioports); |
102 | } |
103 | |
104 | #ifdef CONFIG_PARAVIRT_XXL |
105 | static noinstr void pv_native_write_cr2(unsigned long val) |
106 | { |
107 | native_write_cr2(val); |
108 | } |
109 | |
110 | static noinstr unsigned long pv_native_get_debugreg(int regno) |
111 | { |
112 | return native_get_debugreg(regno); |
113 | } |
114 | |
115 | static noinstr void pv_native_set_debugreg(int regno, unsigned long val) |
116 | { |
117 | native_set_debugreg(regno, value: val); |
118 | } |
119 | |
120 | noinstr void pv_native_wbinvd(void) |
121 | { |
122 | native_wbinvd(); |
123 | } |
124 | |
125 | static noinstr void pv_native_safe_halt(void) |
126 | { |
127 | native_safe_halt(); |
128 | } |
129 | #endif |
130 | |
131 | struct pv_info pv_info = { |
132 | .name = "bare hardware" , |
133 | #ifdef CONFIG_PARAVIRT_XXL |
134 | .extra_user_64bit_cs = __USER_CS, |
135 | #endif |
136 | }; |
137 | |
138 | /* 64-bit pagetable entries */ |
139 | #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64) |
140 | |
141 | struct paravirt_patch_template pv_ops = { |
142 | /* Cpu ops. */ |
143 | .cpu.io_delay = native_io_delay, |
144 | |
145 | #ifdef CONFIG_PARAVIRT_XXL |
146 | .cpu.cpuid = native_cpuid, |
147 | .cpu.get_debugreg = pv_native_get_debugreg, |
148 | .cpu.set_debugreg = pv_native_set_debugreg, |
149 | .cpu.read_cr0 = native_read_cr0, |
150 | .cpu.write_cr0 = native_write_cr0, |
151 | .cpu.write_cr4 = native_write_cr4, |
152 | .cpu.wbinvd = pv_native_wbinvd, |
153 | .cpu.read_msr = native_read_msr, |
154 | .cpu.write_msr = native_write_msr, |
155 | .cpu.read_msr_safe = native_read_msr_safe, |
156 | .cpu.write_msr_safe = native_write_msr_safe, |
157 | .cpu.read_pmc = native_read_pmc, |
158 | .cpu.load_tr_desc = native_load_tr_desc, |
159 | .cpu.set_ldt = native_set_ldt, |
160 | .cpu.load_gdt = native_load_gdt, |
161 | .cpu.load_idt = native_load_idt, |
162 | .cpu.store_tr = native_store_tr, |
163 | .cpu.load_tls = native_load_tls, |
164 | .cpu.load_gs_index = native_load_gs_index, |
165 | .cpu.write_ldt_entry = native_write_ldt_entry, |
166 | .cpu.write_gdt_entry = native_write_gdt_entry, |
167 | .cpu.write_idt_entry = native_write_idt_entry, |
168 | |
169 | .cpu.alloc_ldt = paravirt_nop, |
170 | .cpu.free_ldt = paravirt_nop, |
171 | |
172 | .cpu.load_sp0 = native_load_sp0, |
173 | |
174 | #ifdef CONFIG_X86_IOPL_IOPERM |
175 | .cpu.invalidate_io_bitmap = native_tss_invalidate_io_bitmap, |
176 | .cpu.update_io_bitmap = native_tss_update_io_bitmap, |
177 | #endif |
178 | |
179 | .cpu.start_context_switch = paravirt_nop, |
180 | .cpu.end_context_switch = paravirt_nop, |
181 | |
182 | /* Irq ops. */ |
183 | .irq.save_fl = __PV_IS_CALLEE_SAVE(pv_native_save_fl), |
184 | .irq.irq_disable = __PV_IS_CALLEE_SAVE(pv_native_irq_disable), |
185 | .irq.irq_enable = __PV_IS_CALLEE_SAVE(pv_native_irq_enable), |
186 | .irq.safe_halt = pv_native_safe_halt, |
187 | .irq.halt = native_halt, |
188 | #endif /* CONFIG_PARAVIRT_XXL */ |
189 | |
190 | /* Mmu ops. */ |
191 | .mmu.flush_tlb_user = native_flush_tlb_local, |
192 | .mmu.flush_tlb_kernel = native_flush_tlb_global, |
193 | .mmu.flush_tlb_one_user = native_flush_tlb_one_user, |
194 | .mmu.flush_tlb_multi = native_flush_tlb_multi, |
195 | .mmu.tlb_remove_table = native_tlb_remove_table, |
196 | |
197 | .mmu.exit_mmap = paravirt_nop, |
198 | .mmu.notify_page_enc_status_changed = paravirt_nop, |
199 | |
200 | #ifdef CONFIG_PARAVIRT_XXL |
201 | .mmu.read_cr2 = __PV_IS_CALLEE_SAVE(pv_native_read_cr2), |
202 | .mmu.write_cr2 = pv_native_write_cr2, |
203 | .mmu.read_cr3 = __native_read_cr3, |
204 | .mmu.write_cr3 = native_write_cr3, |
205 | |
206 | .mmu.pgd_alloc = __paravirt_pgd_alloc, |
207 | .mmu.pgd_free = paravirt_nop, |
208 | |
209 | .mmu.alloc_pte = paravirt_nop, |
210 | .mmu.alloc_pmd = paravirt_nop, |
211 | .mmu.alloc_pud = paravirt_nop, |
212 | .mmu.alloc_p4d = paravirt_nop, |
213 | .mmu.release_pte = paravirt_nop, |
214 | .mmu.release_pmd = paravirt_nop, |
215 | .mmu.release_pud = paravirt_nop, |
216 | .mmu.release_p4d = paravirt_nop, |
217 | |
218 | .mmu.set_pte = native_set_pte, |
219 | .mmu.set_pmd = native_set_pmd, |
220 | |
221 | .mmu.ptep_modify_prot_start = __ptep_modify_prot_start, |
222 | .mmu.ptep_modify_prot_commit = __ptep_modify_prot_commit, |
223 | |
224 | .mmu.set_pud = native_set_pud, |
225 | |
226 | .mmu.pmd_val = PTE_IDENT, |
227 | .mmu.make_pmd = PTE_IDENT, |
228 | |
229 | .mmu.pud_val = PTE_IDENT, |
230 | .mmu.make_pud = PTE_IDENT, |
231 | |
232 | .mmu.set_p4d = native_set_p4d, |
233 | |
234 | #if CONFIG_PGTABLE_LEVELS >= 5 |
235 | .mmu.p4d_val = PTE_IDENT, |
236 | .mmu.make_p4d = PTE_IDENT, |
237 | |
238 | .mmu.set_pgd = native_set_pgd, |
239 | #endif /* CONFIG_PGTABLE_LEVELS >= 5 */ |
240 | |
241 | .mmu.pte_val = PTE_IDENT, |
242 | .mmu.pgd_val = PTE_IDENT, |
243 | |
244 | .mmu.make_pte = PTE_IDENT, |
245 | .mmu.make_pgd = PTE_IDENT, |
246 | |
247 | .mmu.enter_mmap = paravirt_nop, |
248 | |
249 | .mmu.lazy_mode = { |
250 | .enter = paravirt_nop, |
251 | .leave = paravirt_nop, |
252 | .flush = paravirt_nop, |
253 | }, |
254 | |
255 | .mmu.set_fixmap = native_set_fixmap, |
256 | #endif /* CONFIG_PARAVIRT_XXL */ |
257 | |
258 | #if defined(CONFIG_PARAVIRT_SPINLOCKS) |
259 | /* Lock ops. */ |
260 | #ifdef CONFIG_SMP |
261 | .lock.queued_spin_lock_slowpath = native_queued_spin_lock_slowpath, |
262 | .lock.queued_spin_unlock = |
263 | PV_CALLEE_SAVE(__native_queued_spin_unlock), |
264 | .lock.wait = paravirt_nop, |
265 | .lock.kick = paravirt_nop, |
266 | .lock.vcpu_is_preempted = |
267 | PV_CALLEE_SAVE(__native_vcpu_is_preempted), |
268 | #endif /* SMP */ |
269 | #endif |
270 | }; |
271 | |
272 | #ifdef CONFIG_PARAVIRT_XXL |
273 | NOKPROBE_SYMBOL(native_load_idt); |
274 | #endif |
275 | |
276 | EXPORT_SYMBOL(pv_ops); |
277 | EXPORT_SYMBOL_GPL(pv_info); |
278 | |