1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef _ASM_X86_PARAVIRT_H |
3 | #define _ASM_X86_PARAVIRT_H |
4 | /* Various instructions on x86 need to be replaced for |
5 | * para-virtualization: those hooks are defined here. */ |
6 | |
7 | #include <asm/paravirt_types.h> |
8 | |
9 | #ifdef CONFIG_PARAVIRT |
10 | #include <asm/pgtable_types.h> |
11 | #include <asm/asm.h> |
12 | #include <asm/nospec-branch.h> |
13 | |
14 | #ifndef __ASSEMBLY__ |
15 | #include <linux/bug.h> |
16 | #include <linux/types.h> |
17 | #include <linux/cpumask.h> |
18 | #include <linux/static_call_types.h> |
19 | #include <asm/frame.h> |
20 | |
21 | u64 dummy_steal_clock(int cpu); |
22 | u64 dummy_sched_clock(void); |
23 | |
24 | DECLARE_STATIC_CALL(pv_steal_clock, dummy_steal_clock); |
25 | DECLARE_STATIC_CALL(pv_sched_clock, dummy_sched_clock); |
26 | |
27 | void paravirt_set_sched_clock(u64 (*func)(void)); |
28 | |
29 | static __always_inline u64 paravirt_sched_clock(void) |
30 | { |
31 | return static_call(pv_sched_clock)(); |
32 | } |
33 | |
34 | struct static_key; |
35 | extern struct static_key paravirt_steal_enabled; |
36 | extern struct static_key paravirt_steal_rq_enabled; |
37 | |
38 | __visible void __native_queued_spin_unlock(struct qspinlock *lock); |
39 | bool pv_is_native_spin_unlock(void); |
40 | __visible bool __native_vcpu_is_preempted(long cpu); |
41 | bool pv_is_native_vcpu_is_preempted(void); |
42 | |
43 | static inline u64 paravirt_steal_clock(int cpu) |
44 | { |
45 | return static_call(pv_steal_clock)(cpu); |
46 | } |
47 | |
48 | #ifdef CONFIG_PARAVIRT_SPINLOCKS |
49 | void __init paravirt_set_cap(void); |
50 | #endif |
51 | |
52 | /* The paravirtualized I/O functions */ |
53 | static inline void slow_down_io(void) |
54 | { |
55 | PVOP_VCALL0(cpu.io_delay); |
56 | #ifdef REALLY_SLOW_IO |
57 | PVOP_VCALL0(cpu.io_delay); |
58 | PVOP_VCALL0(cpu.io_delay); |
59 | PVOP_VCALL0(cpu.io_delay); |
60 | #endif |
61 | } |
62 | |
63 | void native_flush_tlb_local(void); |
64 | void native_flush_tlb_global(void); |
65 | void native_flush_tlb_one_user(unsigned long addr); |
66 | void native_flush_tlb_multi(const struct cpumask *cpumask, |
67 | const struct flush_tlb_info *info); |
68 | |
69 | static inline void __flush_tlb_local(void) |
70 | { |
71 | PVOP_VCALL0(mmu.flush_tlb_user); |
72 | } |
73 | |
74 | static inline void __flush_tlb_global(void) |
75 | { |
76 | PVOP_VCALL0(mmu.flush_tlb_kernel); |
77 | } |
78 | |
79 | static inline void __flush_tlb_one_user(unsigned long addr) |
80 | { |
81 | PVOP_VCALL1(mmu.flush_tlb_one_user, addr); |
82 | } |
83 | |
84 | static inline void __flush_tlb_multi(const struct cpumask *cpumask, |
85 | const struct flush_tlb_info *info) |
86 | { |
87 | PVOP_VCALL2(mmu.flush_tlb_multi, cpumask, info); |
88 | } |
89 | |
90 | static inline void paravirt_tlb_remove_table(struct mmu_gather *tlb, void *table) |
91 | { |
92 | PVOP_VCALL2(mmu.tlb_remove_table, tlb, table); |
93 | } |
94 | |
95 | static inline void paravirt_arch_exit_mmap(struct mm_struct *mm) |
96 | { |
97 | PVOP_VCALL1(mmu.exit_mmap, mm); |
98 | } |
99 | |
100 | static inline void notify_page_enc_status_changed(unsigned long pfn, |
101 | int npages, bool enc) |
102 | { |
103 | PVOP_VCALL3(mmu.notify_page_enc_status_changed, pfn, npages, enc); |
104 | } |
105 | |
106 | #ifdef CONFIG_PARAVIRT_XXL |
107 | static inline void load_sp0(unsigned long sp0) |
108 | { |
109 | PVOP_VCALL1(cpu.load_sp0, sp0); |
110 | } |
111 | |
112 | /* The paravirtualized CPUID instruction. */ |
113 | static inline void __cpuid(unsigned int *eax, unsigned int *ebx, |
114 | unsigned int *ecx, unsigned int *edx) |
115 | { |
116 | PVOP_VCALL4(cpu.cpuid, eax, ebx, ecx, edx); |
117 | } |
118 | |
119 | /* |
120 | * These special macros can be used to get or set a debugging register |
121 | */ |
122 | static __always_inline unsigned long paravirt_get_debugreg(int reg) |
123 | { |
124 | return PVOP_CALL1(unsigned long, cpu.get_debugreg, reg); |
125 | } |
126 | #define get_debugreg(var, reg) var = paravirt_get_debugreg(reg) |
127 | static __always_inline void set_debugreg(unsigned long val, int reg) |
128 | { |
129 | PVOP_VCALL2(cpu.set_debugreg, reg, val); |
130 | } |
131 | |
132 | static inline unsigned long read_cr0(void) |
133 | { |
134 | return PVOP_CALL0(unsigned long, cpu.read_cr0); |
135 | } |
136 | |
137 | static inline void write_cr0(unsigned long x) |
138 | { |
139 | PVOP_VCALL1(cpu.write_cr0, x); |
140 | } |
141 | |
142 | static __always_inline unsigned long read_cr2(void) |
143 | { |
144 | return PVOP_ALT_CALLEE0(unsigned long, mmu.read_cr2, |
145 | "mov %%cr2, %%rax;" , |
146 | ALT_NOT(X86_FEATURE_XENPV)); |
147 | } |
148 | |
149 | static __always_inline void write_cr2(unsigned long x) |
150 | { |
151 | PVOP_VCALL1(mmu.write_cr2, x); |
152 | } |
153 | |
154 | static inline unsigned long __read_cr3(void) |
155 | { |
156 | return PVOP_ALT_CALL0(unsigned long, mmu.read_cr3, |
157 | "mov %%cr3, %%rax;" , ALT_NOT(X86_FEATURE_XENPV)); |
158 | } |
159 | |
160 | static inline void write_cr3(unsigned long x) |
161 | { |
162 | PVOP_ALT_VCALL1(mmu.write_cr3, x, |
163 | "mov %%rdi, %%cr3" , ALT_NOT(X86_FEATURE_XENPV)); |
164 | } |
165 | |
166 | static inline void __write_cr4(unsigned long x) |
167 | { |
168 | PVOP_VCALL1(cpu.write_cr4, x); |
169 | } |
170 | |
171 | static __always_inline void arch_safe_halt(void) |
172 | { |
173 | PVOP_VCALL0(irq.safe_halt); |
174 | } |
175 | |
176 | static inline void halt(void) |
177 | { |
178 | PVOP_VCALL0(irq.halt); |
179 | } |
180 | |
181 | extern noinstr void pv_native_wbinvd(void); |
182 | |
183 | static __always_inline void wbinvd(void) |
184 | { |
185 | PVOP_ALT_VCALL0(cpu.wbinvd, "wbinvd" , ALT_NOT(X86_FEATURE_XENPV)); |
186 | } |
187 | |
188 | static inline u64 paravirt_read_msr(unsigned msr) |
189 | { |
190 | return PVOP_CALL1(u64, cpu.read_msr, msr); |
191 | } |
192 | |
193 | static inline void paravirt_write_msr(unsigned msr, |
194 | unsigned low, unsigned high) |
195 | { |
196 | PVOP_VCALL3(cpu.write_msr, msr, low, high); |
197 | } |
198 | |
199 | static inline u64 paravirt_read_msr_safe(unsigned msr, int *err) |
200 | { |
201 | return PVOP_CALL2(u64, cpu.read_msr_safe, msr, err); |
202 | } |
203 | |
204 | static inline int paravirt_write_msr_safe(unsigned msr, |
205 | unsigned low, unsigned high) |
206 | { |
207 | return PVOP_CALL3(int, cpu.write_msr_safe, msr, low, high); |
208 | } |
209 | |
210 | #define rdmsr(msr, val1, val2) \ |
211 | do { \ |
212 | u64 _l = paravirt_read_msr(msr); \ |
213 | val1 = (u32)_l; \ |
214 | val2 = _l >> 32; \ |
215 | } while (0) |
216 | |
217 | #define wrmsr(msr, val1, val2) \ |
218 | do { \ |
219 | paravirt_write_msr(msr, val1, val2); \ |
220 | } while (0) |
221 | |
222 | #define rdmsrl(msr, val) \ |
223 | do { \ |
224 | val = paravirt_read_msr(msr); \ |
225 | } while (0) |
226 | |
227 | static inline void wrmsrl(unsigned msr, u64 val) |
228 | { |
229 | wrmsr(msr, (u32)val, (u32)(val>>32)); |
230 | } |
231 | |
232 | #define wrmsr_safe(msr, a, b) paravirt_write_msr_safe(msr, a, b) |
233 | |
234 | /* rdmsr with exception handling */ |
235 | #define rdmsr_safe(msr, a, b) \ |
236 | ({ \ |
237 | int _err; \ |
238 | u64 _l = paravirt_read_msr_safe(msr, &_err); \ |
239 | (*a) = (u32)_l; \ |
240 | (*b) = _l >> 32; \ |
241 | _err; \ |
242 | }) |
243 | |
244 | static inline int rdmsrl_safe(unsigned msr, unsigned long long *p) |
245 | { |
246 | int err; |
247 | |
248 | *p = paravirt_read_msr_safe(msr, err: &err); |
249 | return err; |
250 | } |
251 | |
252 | static inline unsigned long long paravirt_read_pmc(int counter) |
253 | { |
254 | return PVOP_CALL1(u64, cpu.read_pmc, counter); |
255 | } |
256 | |
257 | #define rdpmc(counter, low, high) \ |
258 | do { \ |
259 | u64 _l = paravirt_read_pmc(counter); \ |
260 | low = (u32)_l; \ |
261 | high = _l >> 32; \ |
262 | } while (0) |
263 | |
264 | #define rdpmcl(counter, val) ((val) = paravirt_read_pmc(counter)) |
265 | |
266 | static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries) |
267 | { |
268 | PVOP_VCALL2(cpu.alloc_ldt, ldt, entries); |
269 | } |
270 | |
271 | static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries) |
272 | { |
273 | PVOP_VCALL2(cpu.free_ldt, ldt, entries); |
274 | } |
275 | |
276 | static inline void load_TR_desc(void) |
277 | { |
278 | PVOP_VCALL0(cpu.load_tr_desc); |
279 | } |
280 | static inline void load_gdt(const struct desc_ptr *dtr) |
281 | { |
282 | PVOP_VCALL1(cpu.load_gdt, dtr); |
283 | } |
284 | static inline void load_idt(const struct desc_ptr *dtr) |
285 | { |
286 | PVOP_VCALL1(cpu.load_idt, dtr); |
287 | } |
288 | static inline void set_ldt(const void *addr, unsigned entries) |
289 | { |
290 | PVOP_VCALL2(cpu.set_ldt, addr, entries); |
291 | } |
292 | static inline unsigned long paravirt_store_tr(void) |
293 | { |
294 | return PVOP_CALL0(unsigned long, cpu.store_tr); |
295 | } |
296 | |
297 | #define store_tr(tr) ((tr) = paravirt_store_tr()) |
298 | static inline void load_TLS(struct thread_struct *t, unsigned cpu) |
299 | { |
300 | PVOP_VCALL2(cpu.load_tls, t, cpu); |
301 | } |
302 | |
303 | static inline void load_gs_index(unsigned int gs) |
304 | { |
305 | PVOP_VCALL1(cpu.load_gs_index, gs); |
306 | } |
307 | |
308 | static inline void write_ldt_entry(struct desc_struct *dt, int entry, |
309 | const void *desc) |
310 | { |
311 | PVOP_VCALL3(cpu.write_ldt_entry, dt, entry, desc); |
312 | } |
313 | |
314 | static inline void write_gdt_entry(struct desc_struct *dt, int entry, |
315 | void *desc, int type) |
316 | { |
317 | PVOP_VCALL4(cpu.write_gdt_entry, dt, entry, desc, type); |
318 | } |
319 | |
320 | static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g) |
321 | { |
322 | PVOP_VCALL3(cpu.write_idt_entry, dt, entry, g); |
323 | } |
324 | |
325 | #ifdef CONFIG_X86_IOPL_IOPERM |
326 | static inline void tss_invalidate_io_bitmap(void) |
327 | { |
328 | PVOP_VCALL0(cpu.invalidate_io_bitmap); |
329 | } |
330 | |
331 | static inline void tss_update_io_bitmap(void) |
332 | { |
333 | PVOP_VCALL0(cpu.update_io_bitmap); |
334 | } |
335 | #endif |
336 | |
337 | static inline void paravirt_enter_mmap(struct mm_struct *next) |
338 | { |
339 | PVOP_VCALL1(mmu.enter_mmap, next); |
340 | } |
341 | |
342 | static inline int paravirt_pgd_alloc(struct mm_struct *mm) |
343 | { |
344 | return PVOP_CALL1(int, mmu.pgd_alloc, mm); |
345 | } |
346 | |
347 | static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd) |
348 | { |
349 | PVOP_VCALL2(mmu.pgd_free, mm, pgd); |
350 | } |
351 | |
352 | static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn) |
353 | { |
354 | PVOP_VCALL2(mmu.alloc_pte, mm, pfn); |
355 | } |
356 | static inline void paravirt_release_pte(unsigned long pfn) |
357 | { |
358 | PVOP_VCALL1(mmu.release_pte, pfn); |
359 | } |
360 | |
361 | static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn) |
362 | { |
363 | PVOP_VCALL2(mmu.alloc_pmd, mm, pfn); |
364 | } |
365 | |
366 | static inline void paravirt_release_pmd(unsigned long pfn) |
367 | { |
368 | PVOP_VCALL1(mmu.release_pmd, pfn); |
369 | } |
370 | |
371 | static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn) |
372 | { |
373 | PVOP_VCALL2(mmu.alloc_pud, mm, pfn); |
374 | } |
375 | static inline void paravirt_release_pud(unsigned long pfn) |
376 | { |
377 | PVOP_VCALL1(mmu.release_pud, pfn); |
378 | } |
379 | |
380 | static inline void paravirt_alloc_p4d(struct mm_struct *mm, unsigned long pfn) |
381 | { |
382 | PVOP_VCALL2(mmu.alloc_p4d, mm, pfn); |
383 | } |
384 | |
385 | static inline void paravirt_release_p4d(unsigned long pfn) |
386 | { |
387 | PVOP_VCALL1(mmu.release_p4d, pfn); |
388 | } |
389 | |
390 | static inline pte_t __pte(pteval_t val) |
391 | { |
392 | return (pte_t) { PVOP_ALT_CALLEE1(pteval_t, mmu.make_pte, val, |
393 | "mov %%rdi, %%rax" , |
394 | ALT_NOT(X86_FEATURE_XENPV)) }; |
395 | } |
396 | |
397 | static inline pteval_t pte_val(pte_t pte) |
398 | { |
399 | return PVOP_ALT_CALLEE1(pteval_t, mmu.pte_val, pte.pte, |
400 | "mov %%rdi, %%rax" , ALT_NOT(X86_FEATURE_XENPV)); |
401 | } |
402 | |
403 | static inline pgd_t __pgd(pgdval_t val) |
404 | { |
405 | return (pgd_t) { PVOP_ALT_CALLEE1(pgdval_t, mmu.make_pgd, val, |
406 | "mov %%rdi, %%rax" , |
407 | ALT_NOT(X86_FEATURE_XENPV)) }; |
408 | } |
409 | |
410 | static inline pgdval_t pgd_val(pgd_t pgd) |
411 | { |
412 | return PVOP_ALT_CALLEE1(pgdval_t, mmu.pgd_val, pgd.pgd, |
413 | "mov %%rdi, %%rax" , ALT_NOT(X86_FEATURE_XENPV)); |
414 | } |
415 | |
416 | #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION |
417 | static inline pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, |
418 | pte_t *ptep) |
419 | { |
420 | pteval_t ret; |
421 | |
422 | ret = PVOP_CALL3(pteval_t, mmu.ptep_modify_prot_start, vma, addr, ptep); |
423 | |
424 | return (pte_t) { .pte = ret }; |
425 | } |
426 | |
427 | static inline void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, |
428 | pte_t *ptep, pte_t old_pte, pte_t pte) |
429 | { |
430 | |
431 | PVOP_VCALL4(mmu.ptep_modify_prot_commit, vma, addr, ptep, pte.pte); |
432 | } |
433 | |
434 | static inline void set_pte(pte_t *ptep, pte_t pte) |
435 | { |
436 | PVOP_VCALL2(mmu.set_pte, ptep, pte.pte); |
437 | } |
438 | |
439 | static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) |
440 | { |
441 | PVOP_VCALL2(mmu.set_pmd, pmdp, native_pmd_val(pmd)); |
442 | } |
443 | |
444 | static inline pmd_t __pmd(pmdval_t val) |
445 | { |
446 | return (pmd_t) { PVOP_ALT_CALLEE1(pmdval_t, mmu.make_pmd, val, |
447 | "mov %%rdi, %%rax" , |
448 | ALT_NOT(X86_FEATURE_XENPV)) }; |
449 | } |
450 | |
451 | static inline pmdval_t pmd_val(pmd_t pmd) |
452 | { |
453 | return PVOP_ALT_CALLEE1(pmdval_t, mmu.pmd_val, pmd.pmd, |
454 | "mov %%rdi, %%rax" , ALT_NOT(X86_FEATURE_XENPV)); |
455 | } |
456 | |
457 | static inline void set_pud(pud_t *pudp, pud_t pud) |
458 | { |
459 | PVOP_VCALL2(mmu.set_pud, pudp, native_pud_val(pud)); |
460 | } |
461 | |
462 | static inline pud_t __pud(pudval_t val) |
463 | { |
464 | pudval_t ret; |
465 | |
466 | ret = PVOP_ALT_CALLEE1(pudval_t, mmu.make_pud, val, |
467 | "mov %%rdi, %%rax" , ALT_NOT(X86_FEATURE_XENPV)); |
468 | |
469 | return (pud_t) { ret }; |
470 | } |
471 | |
472 | static inline pudval_t pud_val(pud_t pud) |
473 | { |
474 | return PVOP_ALT_CALLEE1(pudval_t, mmu.pud_val, pud.pud, |
475 | "mov %%rdi, %%rax" , ALT_NOT(X86_FEATURE_XENPV)); |
476 | } |
477 | |
478 | static inline void pud_clear(pud_t *pudp) |
479 | { |
480 | set_pud(pudp, pud: native_make_pud(val: 0)); |
481 | } |
482 | |
483 | static inline void set_p4d(p4d_t *p4dp, p4d_t p4d) |
484 | { |
485 | p4dval_t val = native_p4d_val(p4d); |
486 | |
487 | PVOP_VCALL2(mmu.set_p4d, p4dp, val); |
488 | } |
489 | |
490 | #if CONFIG_PGTABLE_LEVELS >= 5 |
491 | |
492 | static inline p4d_t __p4d(p4dval_t val) |
493 | { |
494 | p4dval_t ret = PVOP_ALT_CALLEE1(p4dval_t, mmu.make_p4d, val, |
495 | "mov %%rdi, %%rax" , |
496 | ALT_NOT(X86_FEATURE_XENPV)); |
497 | |
498 | return (p4d_t) { ret }; |
499 | } |
500 | |
501 | static inline p4dval_t p4d_val(p4d_t p4d) |
502 | { |
503 | return PVOP_ALT_CALLEE1(p4dval_t, mmu.p4d_val, p4d.p4d, |
504 | "mov %%rdi, %%rax" , ALT_NOT(X86_FEATURE_XENPV)); |
505 | } |
506 | |
507 | static inline void __set_pgd(pgd_t *pgdp, pgd_t pgd) |
508 | { |
509 | PVOP_VCALL2(mmu.set_pgd, pgdp, native_pgd_val(pgd)); |
510 | } |
511 | |
512 | #define set_pgd(pgdp, pgdval) do { \ |
513 | if (pgtable_l5_enabled()) \ |
514 | __set_pgd(pgdp, pgdval); \ |
515 | else \ |
516 | set_p4d((p4d_t *)(pgdp), (p4d_t) { (pgdval).pgd }); \ |
517 | } while (0) |
518 | |
519 | #define pgd_clear(pgdp) do { \ |
520 | if (pgtable_l5_enabled()) \ |
521 | set_pgd(pgdp, native_make_pgd(0)); \ |
522 | } while (0) |
523 | |
524 | #endif /* CONFIG_PGTABLE_LEVELS == 5 */ |
525 | |
526 | static inline void p4d_clear(p4d_t *p4dp) |
527 | { |
528 | set_p4d(p4dp, p4d: native_make_p4d(val: 0)); |
529 | } |
530 | |
531 | static inline void set_pte_atomic(pte_t *ptep, pte_t pte) |
532 | { |
533 | set_pte(ptep, pte); |
534 | } |
535 | |
536 | static inline void pte_clear(struct mm_struct *mm, unsigned long addr, |
537 | pte_t *ptep) |
538 | { |
539 | set_pte(ptep, pte: native_make_pte(val: 0)); |
540 | } |
541 | |
542 | static inline void pmd_clear(pmd_t *pmdp) |
543 | { |
544 | set_pmd(pmdp, pmd: native_make_pmd(val: 0)); |
545 | } |
546 | |
547 | #define __HAVE_ARCH_START_CONTEXT_SWITCH |
548 | static inline void arch_start_context_switch(struct task_struct *prev) |
549 | { |
550 | PVOP_VCALL1(cpu.start_context_switch, prev); |
551 | } |
552 | |
553 | static inline void arch_end_context_switch(struct task_struct *next) |
554 | { |
555 | PVOP_VCALL1(cpu.end_context_switch, next); |
556 | } |
557 | |
558 | #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE |
559 | static inline void arch_enter_lazy_mmu_mode(void) |
560 | { |
561 | PVOP_VCALL0(mmu.lazy_mode.enter); |
562 | } |
563 | |
564 | static inline void arch_leave_lazy_mmu_mode(void) |
565 | { |
566 | PVOP_VCALL0(mmu.lazy_mode.leave); |
567 | } |
568 | |
569 | static inline void arch_flush_lazy_mmu_mode(void) |
570 | { |
571 | PVOP_VCALL0(mmu.lazy_mode.flush); |
572 | } |
573 | |
574 | static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx, |
575 | phys_addr_t phys, pgprot_t flags) |
576 | { |
577 | pv_ops.mmu.set_fixmap(idx, phys, flags); |
578 | } |
579 | #endif |
580 | |
581 | #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS) |
582 | |
583 | static __always_inline void pv_queued_spin_lock_slowpath(struct qspinlock *lock, |
584 | u32 val) |
585 | { |
586 | PVOP_VCALL2(lock.queued_spin_lock_slowpath, lock, val); |
587 | } |
588 | |
589 | static __always_inline void pv_queued_spin_unlock(struct qspinlock *lock) |
590 | { |
591 | PVOP_ALT_VCALLEE1(lock.queued_spin_unlock, lock, |
592 | "movb $0, (%%" _ASM_ARG1 ");" , |
593 | ALT_NOT(X86_FEATURE_PVUNLOCK)); |
594 | } |
595 | |
596 | static __always_inline void pv_wait(u8 *ptr, u8 val) |
597 | { |
598 | PVOP_VCALL2(lock.wait, ptr, val); |
599 | } |
600 | |
601 | static __always_inline void pv_kick(int cpu) |
602 | { |
603 | PVOP_VCALL1(lock.kick, cpu); |
604 | } |
605 | |
606 | static __always_inline bool pv_vcpu_is_preempted(long cpu) |
607 | { |
608 | return PVOP_ALT_CALLEE1(bool, lock.vcpu_is_preempted, cpu, |
609 | "xor %%" _ASM_AX ", %%" _ASM_AX ";" , |
610 | ALT_NOT(X86_FEATURE_VCPUPREEMPT)); |
611 | } |
612 | |
613 | void __raw_callee_save___native_queued_spin_unlock(struct qspinlock *lock); |
614 | bool __raw_callee_save___native_vcpu_is_preempted(long cpu); |
615 | |
616 | #endif /* SMP && PARAVIRT_SPINLOCKS */ |
617 | |
618 | #ifdef CONFIG_X86_32 |
619 | /* save and restore all caller-save registers, except return value */ |
620 | #define PV_SAVE_ALL_CALLER_REGS "pushl %ecx;" |
621 | #define PV_RESTORE_ALL_CALLER_REGS "popl %ecx;" |
622 | #else |
623 | /* save and restore all caller-save registers, except return value */ |
624 | #define PV_SAVE_ALL_CALLER_REGS \ |
625 | "push %rcx;" \ |
626 | "push %rdx;" \ |
627 | "push %rsi;" \ |
628 | "push %rdi;" \ |
629 | "push %r8;" \ |
630 | "push %r9;" \ |
631 | "push %r10;" \ |
632 | "push %r11;" |
633 | #define PV_RESTORE_ALL_CALLER_REGS \ |
634 | "pop %r11;" \ |
635 | "pop %r10;" \ |
636 | "pop %r9;" \ |
637 | "pop %r8;" \ |
638 | "pop %rdi;" \ |
639 | "pop %rsi;" \ |
640 | "pop %rdx;" \ |
641 | "pop %rcx;" |
642 | #endif |
643 | |
644 | /* |
645 | * Generate a thunk around a function which saves all caller-save |
646 | * registers except for the return value. This allows C functions to |
647 | * be called from assembler code where fewer than normal registers are |
648 | * available. It may also help code generation around calls from C |
649 | * code if the common case doesn't use many registers. |
650 | * |
651 | * When a callee is wrapped in a thunk, the caller can assume that all |
652 | * arg regs and all scratch registers are preserved across the |
653 | * call. The return value in rax/eax will not be saved, even for void |
654 | * functions. |
655 | */ |
656 | #define PV_THUNK_NAME(func) "__raw_callee_save_" #func |
657 | #define __PV_CALLEE_SAVE_REGS_THUNK(func, section) \ |
658 | extern typeof(func) __raw_callee_save_##func; \ |
659 | \ |
660 | asm(".pushsection " section ", \"ax\";" \ |
661 | ".globl " PV_THUNK_NAME(func) ";" \ |
662 | ".type " PV_THUNK_NAME(func) ", @function;" \ |
663 | ASM_FUNC_ALIGN \ |
664 | PV_THUNK_NAME(func) ":" \ |
665 | ASM_ENDBR \ |
666 | FRAME_BEGIN \ |
667 | PV_SAVE_ALL_CALLER_REGS \ |
668 | "call " #func ";" \ |
669 | PV_RESTORE_ALL_CALLER_REGS \ |
670 | FRAME_END \ |
671 | ASM_RET \ |
672 | ".size " PV_THUNK_NAME(func) ", .-" PV_THUNK_NAME(func) ";" \ |
673 | ".popsection") |
674 | |
675 | #define PV_CALLEE_SAVE_REGS_THUNK(func) \ |
676 | __PV_CALLEE_SAVE_REGS_THUNK(func, ".text") |
677 | |
678 | /* Get a reference to a callee-save function */ |
679 | #define PV_CALLEE_SAVE(func) \ |
680 | ((struct paravirt_callee_save) { __raw_callee_save_##func }) |
681 | |
682 | /* Promise that "func" already uses the right calling convention */ |
683 | #define __PV_IS_CALLEE_SAVE(func) \ |
684 | ((struct paravirt_callee_save) { func }) |
685 | |
686 | #ifdef CONFIG_PARAVIRT_XXL |
687 | static __always_inline unsigned long arch_local_save_flags(void) |
688 | { |
689 | return PVOP_ALT_CALLEE0(unsigned long, irq.save_fl, "pushf; pop %%rax;" , |
690 | ALT_NOT(X86_FEATURE_XENPV)); |
691 | } |
692 | |
693 | static __always_inline void arch_local_irq_disable(void) |
694 | { |
695 | PVOP_ALT_VCALLEE0(irq.irq_disable, "cli;" , ALT_NOT(X86_FEATURE_XENPV)); |
696 | } |
697 | |
698 | static __always_inline void arch_local_irq_enable(void) |
699 | { |
700 | PVOP_ALT_VCALLEE0(irq.irq_enable, "sti;" , ALT_NOT(X86_FEATURE_XENPV)); |
701 | } |
702 | |
703 | static __always_inline unsigned long arch_local_irq_save(void) |
704 | { |
705 | unsigned long f; |
706 | |
707 | f = arch_local_save_flags(); |
708 | arch_local_irq_disable(); |
709 | return f; |
710 | } |
711 | #endif |
712 | |
713 | |
714 | /* Make sure as little as possible of this mess escapes. */ |
715 | #undef PARAVIRT_CALL |
716 | #undef __PVOP_CALL |
717 | #undef __PVOP_VCALL |
718 | #undef PVOP_VCALL0 |
719 | #undef PVOP_CALL0 |
720 | #undef PVOP_VCALL1 |
721 | #undef PVOP_CALL1 |
722 | #undef PVOP_VCALL2 |
723 | #undef PVOP_CALL2 |
724 | #undef PVOP_VCALL3 |
725 | #undef PVOP_CALL3 |
726 | #undef PVOP_VCALL4 |
727 | #undef PVOP_CALL4 |
728 | |
729 | #define DEFINE_PARAVIRT_ASM(func, instr, sec) \ |
730 | asm (".pushsection " #sec ", \"ax\"\n" \ |
731 | ".global " #func "\n\t" \ |
732 | ".type " #func ", @function\n\t" \ |
733 | ASM_FUNC_ALIGN "\n" \ |
734 | #func ":\n\t" \ |
735 | ASM_ENDBR \ |
736 | instr "\n\t" \ |
737 | ASM_RET \ |
738 | ".size " #func ", . - " #func "\n\t" \ |
739 | ".popsection") |
740 | |
741 | extern void default_banner(void); |
742 | void native_pv_lock_init(void) __init; |
743 | |
744 | #else /* __ASSEMBLY__ */ |
745 | |
746 | #define _PVSITE(ptype, ops, word, algn) \ |
747 | 771:; \ |
748 | ops; \ |
749 | 772:; \ |
750 | .pushsection .parainstructions,"a"; \ |
751 | .align algn; \ |
752 | word 771b; \ |
753 | .byte ptype; \ |
754 | .byte 772b-771b; \ |
755 | _ASM_ALIGN; \ |
756 | .popsection |
757 | |
758 | |
759 | #ifdef CONFIG_X86_64 |
760 | #ifdef CONFIG_PARAVIRT_XXL |
761 | |
762 | #define PARA_PATCH(off) ((off) / 8) |
763 | #define PARA_SITE(ptype, ops) _PVSITE(ptype, ops, .quad, 8) |
764 | #define PARA_INDIRECT(addr) *addr(%rip) |
765 | |
766 | #ifdef CONFIG_DEBUG_ENTRY |
767 | .macro PARA_IRQ_save_fl |
768 | PARA_SITE(PARA_PATCH(PV_IRQ_save_fl), |
769 | ANNOTATE_RETPOLINE_SAFE; |
770 | call PARA_INDIRECT(pv_ops+PV_IRQ_save_fl);) |
771 | .endm |
772 | |
773 | #define SAVE_FLAGS ALTERNATIVE "PARA_IRQ_save_fl;", "pushf; pop %rax;", \ |
774 | ALT_NOT(X86_FEATURE_XENPV) |
775 | #endif |
776 | #endif /* CONFIG_PARAVIRT_XXL */ |
777 | #endif /* CONFIG_X86_64 */ |
778 | |
779 | #endif /* __ASSEMBLY__ */ |
780 | #else /* CONFIG_PARAVIRT */ |
781 | # define default_banner x86_init_noop |
782 | |
783 | #ifndef __ASSEMBLY__ |
784 | static inline void native_pv_lock_init(void) |
785 | { |
786 | } |
787 | #endif |
788 | #endif /* !CONFIG_PARAVIRT */ |
789 | |
790 | #ifndef __ASSEMBLY__ |
791 | #ifndef CONFIG_PARAVIRT_XXL |
792 | static inline void paravirt_enter_mmap(struct mm_struct *mm) |
793 | { |
794 | } |
795 | #endif |
796 | |
797 | #ifndef CONFIG_PARAVIRT |
798 | static inline void paravirt_arch_exit_mmap(struct mm_struct *mm) |
799 | { |
800 | } |
801 | #endif |
802 | |
803 | #ifndef CONFIG_PARAVIRT_SPINLOCKS |
804 | static inline void paravirt_set_cap(void) |
805 | { |
806 | } |
807 | #endif |
808 | #endif /* __ASSEMBLY__ */ |
809 | #endif /* _ASM_X86_PARAVIRT_H */ |
810 | |