1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef _ASM_X86_PARAVIRT_TYPES_H |
3 | #define _ASM_X86_PARAVIRT_TYPES_H |
4 | |
5 | /* Bitmask of what can be clobbered: usually at least eax. */ |
6 | #define CLBR_NONE 0 |
7 | #define CLBR_EAX (1 << 0) |
8 | #define CLBR_ECX (1 << 1) |
9 | #define CLBR_EDX (1 << 2) |
10 | #define CLBR_EDI (1 << 3) |
11 | |
12 | #ifdef CONFIG_X86_32 |
13 | /* CLBR_ANY should match all regs platform has. For i386, that's just it */ |
14 | #define CLBR_ANY ((1 << 4) - 1) |
15 | |
16 | #define CLBR_ARG_REGS (CLBR_EAX | CLBR_EDX | CLBR_ECX) |
17 | #define CLBR_RET_REG (CLBR_EAX | CLBR_EDX) |
18 | #define CLBR_SCRATCH (0) |
19 | #else |
20 | #define CLBR_RAX CLBR_EAX |
21 | #define CLBR_RCX CLBR_ECX |
22 | #define CLBR_RDX CLBR_EDX |
23 | #define CLBR_RDI CLBR_EDI |
24 | #define CLBR_RSI (1 << 4) |
25 | #define CLBR_R8 (1 << 5) |
26 | #define CLBR_R9 (1 << 6) |
27 | #define CLBR_R10 (1 << 7) |
28 | #define CLBR_R11 (1 << 8) |
29 | |
30 | #define CLBR_ANY ((1 << 9) - 1) |
31 | |
32 | #define CLBR_ARG_REGS (CLBR_RDI | CLBR_RSI | CLBR_RDX | \ |
33 | CLBR_RCX | CLBR_R8 | CLBR_R9) |
34 | #define CLBR_RET_REG (CLBR_RAX) |
35 | #define CLBR_SCRATCH (CLBR_R10 | CLBR_R11) |
36 | |
37 | #endif /* X86_64 */ |
38 | |
39 | #define CLBR_CALLEE_SAVE ((CLBR_ARG_REGS | CLBR_SCRATCH) & ~CLBR_RET_REG) |
40 | |
41 | #ifndef __ASSEMBLY__ |
42 | |
43 | #include <asm/desc_defs.h> |
44 | #include <asm/kmap_types.h> |
45 | #include <asm/pgtable_types.h> |
46 | #include <asm/nospec-branch.h> |
47 | |
48 | struct page; |
49 | struct thread_struct; |
50 | struct desc_ptr; |
51 | struct tss_struct; |
52 | struct mm_struct; |
53 | struct desc_struct; |
54 | struct task_struct; |
55 | struct cpumask; |
56 | struct flush_tlb_info; |
57 | struct mmu_gather; |
58 | struct vm_area_struct; |
59 | |
60 | /* |
61 | * Wrapper type for pointers to code which uses the non-standard |
62 | * calling convention. See PV_CALL_SAVE_REGS_THUNK below. |
63 | */ |
64 | struct paravirt_callee_save { |
65 | void *func; |
66 | }; |
67 | |
68 | /* general info */ |
69 | struct pv_info { |
70 | #ifdef CONFIG_PARAVIRT_XXL |
71 | unsigned int kernel_rpl; |
72 | int shared_kernel_pmd; |
73 | |
74 | #ifdef CONFIG_X86_64 |
75 | u16 ; /* __USER_CS if none */ |
76 | #endif |
77 | #endif |
78 | |
79 | const char *name; |
80 | }; |
81 | |
82 | struct pv_init_ops { |
83 | /* |
84 | * Patch may replace one of the defined code sequences with |
85 | * arbitrary code, subject to the same register constraints. |
86 | * This generally means the code is not free to clobber any |
87 | * registers other than EAX. The patch function should return |
88 | * the number of bytes of code generated, as we nop pad the |
89 | * rest in generic code. |
90 | */ |
91 | unsigned (*patch)(u8 type, void *insnbuf, |
92 | unsigned long addr, unsigned len); |
93 | } __no_randomize_layout; |
94 | |
95 | #ifdef CONFIG_PARAVIRT_XXL |
96 | struct pv_lazy_ops { |
97 | /* Set deferred update mode, used for batching operations. */ |
98 | void (*enter)(void); |
99 | void (*leave)(void); |
100 | void (*flush)(void); |
101 | } __no_randomize_layout; |
102 | #endif |
103 | |
104 | struct pv_time_ops { |
105 | unsigned long long (*sched_clock)(void); |
106 | unsigned long long (*steal_clock)(int cpu); |
107 | } __no_randomize_layout; |
108 | |
109 | struct pv_cpu_ops { |
110 | /* hooks for various privileged instructions */ |
111 | void (*io_delay)(void); |
112 | |
113 | #ifdef CONFIG_PARAVIRT_XXL |
114 | unsigned long (*get_debugreg)(int regno); |
115 | void (*set_debugreg)(int regno, unsigned long value); |
116 | |
117 | unsigned long (*read_cr0)(void); |
118 | void (*write_cr0)(unsigned long); |
119 | |
120 | void (*write_cr4)(unsigned long); |
121 | |
122 | #ifdef CONFIG_X86_64 |
123 | unsigned long (*read_cr8)(void); |
124 | void (*write_cr8)(unsigned long); |
125 | #endif |
126 | |
127 | /* Segment descriptor handling */ |
128 | void (*load_tr_desc)(void); |
129 | void (*load_gdt)(const struct desc_ptr *); |
130 | void (*load_idt)(const struct desc_ptr *); |
131 | void (*set_ldt)(const void *desc, unsigned entries); |
132 | unsigned long (*store_tr)(void); |
133 | void (*load_tls)(struct thread_struct *t, unsigned int cpu); |
134 | #ifdef CONFIG_X86_64 |
135 | void (*load_gs_index)(unsigned int idx); |
136 | #endif |
137 | void (*write_ldt_entry)(struct desc_struct *ldt, int entrynum, |
138 | const void *desc); |
139 | void (*write_gdt_entry)(struct desc_struct *, |
140 | int entrynum, const void *desc, int size); |
141 | void (*write_idt_entry)(gate_desc *, |
142 | int entrynum, const gate_desc *gate); |
143 | void (*alloc_ldt)(struct desc_struct *ldt, unsigned entries); |
144 | void (*free_ldt)(struct desc_struct *ldt, unsigned entries); |
145 | |
146 | void (*load_sp0)(unsigned long sp0); |
147 | |
148 | void (*set_iopl_mask)(unsigned mask); |
149 | |
150 | void (*wbinvd)(void); |
151 | |
152 | /* cpuid emulation, mostly so that caps bits can be disabled */ |
153 | void (*cpuid)(unsigned int *eax, unsigned int *ebx, |
154 | unsigned int *ecx, unsigned int *edx); |
155 | |
156 | /* Unsafe MSR operations. These will warn or panic on failure. */ |
157 | u64 (*read_msr)(unsigned int msr); |
158 | void (*write_msr)(unsigned int msr, unsigned low, unsigned high); |
159 | |
160 | /* |
161 | * Safe MSR operations. |
162 | * read sets err to 0 or -EIO. write returns 0 or -EIO. |
163 | */ |
164 | u64 (*read_msr_safe)(unsigned int msr, int *err); |
165 | int (*write_msr_safe)(unsigned int msr, unsigned low, unsigned high); |
166 | |
167 | u64 (*read_pmc)(int counter); |
168 | |
169 | /* |
170 | * Switch to usermode gs and return to 64-bit usermode using |
171 | * sysret. Only used in 64-bit kernels to return to 64-bit |
172 | * processes. Usermode register state, including %rsp, must |
173 | * already be restored. |
174 | */ |
175 | void (*usergs_sysret64)(void); |
176 | |
177 | /* Normal iret. Jump to this with the standard iret stack |
178 | frame set up. */ |
179 | void (*iret)(void); |
180 | |
181 | void (*swapgs)(void); |
182 | |
183 | void (*start_context_switch)(struct task_struct *prev); |
184 | void (*end_context_switch)(struct task_struct *next); |
185 | #endif |
186 | } __no_randomize_layout; |
187 | |
188 | struct pv_irq_ops { |
189 | #ifdef CONFIG_PARAVIRT_XXL |
190 | /* |
191 | * Get/set interrupt state. save_fl and restore_fl are only |
192 | * expected to use X86_EFLAGS_IF; all other bits |
193 | * returned from save_fl are undefined, and may be ignored by |
194 | * restore_fl. |
195 | * |
196 | * NOTE: These functions callers expect the callee to preserve |
197 | * more registers than the standard C calling convention. |
198 | */ |
199 | struct paravirt_callee_save save_fl; |
200 | struct paravirt_callee_save restore_fl; |
201 | struct paravirt_callee_save irq_disable; |
202 | struct paravirt_callee_save irq_enable; |
203 | |
204 | void (*safe_halt)(void); |
205 | void (*halt)(void); |
206 | #endif |
207 | } __no_randomize_layout; |
208 | |
209 | struct pv_mmu_ops { |
210 | /* TLB operations */ |
211 | void (*flush_tlb_user)(void); |
212 | void (*flush_tlb_kernel)(void); |
213 | void (*flush_tlb_one_user)(unsigned long addr); |
214 | void (*flush_tlb_others)(const struct cpumask *cpus, |
215 | const struct flush_tlb_info *info); |
216 | |
217 | void (*tlb_remove_table)(struct mmu_gather *tlb, void *table); |
218 | |
219 | /* Hook for intercepting the destruction of an mm_struct. */ |
220 | void (*exit_mmap)(struct mm_struct *mm); |
221 | |
222 | #ifdef CONFIG_PARAVIRT_XXL |
223 | unsigned long (*read_cr2)(void); |
224 | void (*write_cr2)(unsigned long); |
225 | |
226 | unsigned long (*read_cr3)(void); |
227 | void (*write_cr3)(unsigned long); |
228 | |
229 | /* Hooks for intercepting the creation/use of an mm_struct. */ |
230 | void (*activate_mm)(struct mm_struct *prev, |
231 | struct mm_struct *next); |
232 | void (*dup_mmap)(struct mm_struct *oldmm, |
233 | struct mm_struct *mm); |
234 | |
235 | /* Hooks for allocating and freeing a pagetable top-level */ |
236 | int (*pgd_alloc)(struct mm_struct *mm); |
237 | void (*pgd_free)(struct mm_struct *mm, pgd_t *pgd); |
238 | |
239 | /* |
240 | * Hooks for allocating/releasing pagetable pages when they're |
241 | * attached to a pagetable |
242 | */ |
243 | void (*alloc_pte)(struct mm_struct *mm, unsigned long pfn); |
244 | void (*alloc_pmd)(struct mm_struct *mm, unsigned long pfn); |
245 | void (*alloc_pud)(struct mm_struct *mm, unsigned long pfn); |
246 | void (*alloc_p4d)(struct mm_struct *mm, unsigned long pfn); |
247 | void (*release_pte)(unsigned long pfn); |
248 | void (*release_pmd)(unsigned long pfn); |
249 | void (*release_pud)(unsigned long pfn); |
250 | void (*release_p4d)(unsigned long pfn); |
251 | |
252 | /* Pagetable manipulation functions */ |
253 | void (*set_pte)(pte_t *ptep, pte_t pteval); |
254 | void (*set_pte_at)(struct mm_struct *mm, unsigned long addr, |
255 | pte_t *ptep, pte_t pteval); |
256 | void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval); |
257 | |
258 | pte_t (*ptep_modify_prot_start)(struct vm_area_struct *vma, unsigned long addr, |
259 | pte_t *ptep); |
260 | void (*ptep_modify_prot_commit)(struct vm_area_struct *vma, unsigned long addr, |
261 | pte_t *ptep, pte_t pte); |
262 | |
263 | struct paravirt_callee_save pte_val; |
264 | struct paravirt_callee_save make_pte; |
265 | |
266 | struct paravirt_callee_save pgd_val; |
267 | struct paravirt_callee_save make_pgd; |
268 | |
269 | #if CONFIG_PGTABLE_LEVELS >= 3 |
270 | #ifdef CONFIG_X86_PAE |
271 | void (*set_pte_atomic)(pte_t *ptep, pte_t pteval); |
272 | void (*pte_clear)(struct mm_struct *mm, unsigned long addr, |
273 | pte_t *ptep); |
274 | void (*pmd_clear)(pmd_t *pmdp); |
275 | |
276 | #endif /* CONFIG_X86_PAE */ |
277 | |
278 | void (*set_pud)(pud_t *pudp, pud_t pudval); |
279 | |
280 | struct paravirt_callee_save pmd_val; |
281 | struct paravirt_callee_save make_pmd; |
282 | |
283 | #if CONFIG_PGTABLE_LEVELS >= 4 |
284 | struct paravirt_callee_save pud_val; |
285 | struct paravirt_callee_save make_pud; |
286 | |
287 | void (*set_p4d)(p4d_t *p4dp, p4d_t p4dval); |
288 | |
289 | #if CONFIG_PGTABLE_LEVELS >= 5 |
290 | struct paravirt_callee_save p4d_val; |
291 | struct paravirt_callee_save make_p4d; |
292 | |
293 | void (*set_pgd)(pgd_t *pgdp, pgd_t pgdval); |
294 | #endif /* CONFIG_PGTABLE_LEVELS >= 5 */ |
295 | |
296 | #endif /* CONFIG_PGTABLE_LEVELS >= 4 */ |
297 | |
298 | #endif /* CONFIG_PGTABLE_LEVELS >= 3 */ |
299 | |
300 | struct pv_lazy_ops lazy_mode; |
301 | |
302 | /* dom0 ops */ |
303 | |
304 | /* Sometimes the physical address is a pfn, and sometimes its |
305 | an mfn. We can tell which is which from the index. */ |
306 | void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx, |
307 | phys_addr_t phys, pgprot_t flags); |
308 | #endif |
309 | } __no_randomize_layout; |
310 | |
311 | struct arch_spinlock; |
312 | #ifdef CONFIG_SMP |
313 | #include <asm/spinlock_types.h> |
314 | #endif |
315 | |
316 | struct qspinlock; |
317 | |
318 | struct pv_lock_ops { |
319 | void (*queued_spin_lock_slowpath)(struct qspinlock *lock, u32 val); |
320 | struct paravirt_callee_save queued_spin_unlock; |
321 | |
322 | void (*wait)(u8 *ptr, u8 val); |
323 | void (*kick)(int cpu); |
324 | |
325 | struct paravirt_callee_save vcpu_is_preempted; |
326 | } __no_randomize_layout; |
327 | |
328 | /* This contains all the paravirt structures: we get a convenient |
329 | * number for each function using the offset which we use to indicate |
330 | * what to patch. */ |
331 | struct paravirt_patch_template { |
332 | struct pv_init_ops init; |
333 | struct pv_time_ops time; |
334 | struct pv_cpu_ops cpu; |
335 | struct pv_irq_ops irq; |
336 | struct pv_mmu_ops mmu; |
337 | struct pv_lock_ops lock; |
338 | } __no_randomize_layout; |
339 | |
340 | extern struct pv_info pv_info; |
341 | extern struct paravirt_patch_template pv_ops; |
342 | |
343 | #define PARAVIRT_PATCH(x) \ |
344 | (offsetof(struct paravirt_patch_template, x) / sizeof(void *)) |
345 | |
346 | #define paravirt_type(op) \ |
347 | [paravirt_typenum] "i" (PARAVIRT_PATCH(op)), \ |
348 | [paravirt_opptr] "i" (&(pv_ops.op)) |
349 | #define paravirt_clobber(clobber) \ |
350 | [paravirt_clobber] "i" (clobber) |
351 | |
352 | /* |
353 | * Generate some code, and mark it as patchable by the |
354 | * apply_paravirt() alternate instruction patcher. |
355 | */ |
356 | #define _paravirt_alt(insn_string, type, clobber) \ |
357 | "771:\n\t" insn_string "\n" "772:\n" \ |
358 | ".pushsection .parainstructions,\"a\"\n" \ |
359 | _ASM_ALIGN "\n" \ |
360 | _ASM_PTR " 771b\n" \ |
361 | " .byte " type "\n" \ |
362 | " .byte 772b-771b\n" \ |
363 | " .short " clobber "\n" \ |
364 | ".popsection\n" |
365 | |
366 | /* Generate patchable code, with the default asm parameters. */ |
367 | #define paravirt_alt(insn_string) \ |
368 | _paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]") |
369 | |
370 | /* Simple instruction patching code. */ |
371 | #define NATIVE_LABEL(a,x,b) "\n\t.globl " a #x "_" #b "\n" a #x "_" #b ":\n\t" |
372 | |
373 | #define DEF_NATIVE(ops, name, code) \ |
374 | __visible extern const char start_##ops##_##name[], end_##ops##_##name[]; \ |
375 | asm(NATIVE_LABEL("start_", ops, name) code NATIVE_LABEL("end_", ops, name)) |
376 | |
377 | unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len); |
378 | unsigned paravirt_patch_default(u8 type, void *insnbuf, |
379 | unsigned long addr, unsigned len); |
380 | |
381 | unsigned paravirt_patch_insns(void *insnbuf, unsigned len, |
382 | const char *start, const char *end); |
383 | |
384 | unsigned native_patch(u8 type, void *ibuf, unsigned long addr, unsigned len); |
385 | |
386 | int paravirt_disable_iospace(void); |
387 | |
388 | /* |
389 | * This generates an indirect call based on the operation type number. |
390 | * The type number, computed in PARAVIRT_PATCH, is derived from the |
391 | * offset into the paravirt_patch_template structure, and can therefore be |
392 | * freely converted back into a structure offset. |
393 | */ |
394 | #define PARAVIRT_CALL \ |
395 | ANNOTATE_RETPOLINE_SAFE \ |
396 | "call *%c[paravirt_opptr];" |
397 | |
398 | /* |
399 | * These macros are intended to wrap calls through one of the paravirt |
400 | * ops structs, so that they can be later identified and patched at |
401 | * runtime. |
402 | * |
403 | * Normally, a call to a pv_op function is a simple indirect call: |
404 | * (pv_op_struct.operations)(args...). |
405 | * |
406 | * Unfortunately, this is a relatively slow operation for modern CPUs, |
407 | * because it cannot necessarily determine what the destination |
408 | * address is. In this case, the address is a runtime constant, so at |
409 | * the very least we can patch the call to e a simple direct call, or |
410 | * ideally, patch an inline implementation into the callsite. (Direct |
411 | * calls are essentially free, because the call and return addresses |
412 | * are completely predictable.) |
413 | * |
414 | * For i386, these macros rely on the standard gcc "regparm(3)" calling |
415 | * convention, in which the first three arguments are placed in %eax, |
416 | * %edx, %ecx (in that order), and the remaining arguments are placed |
417 | * on the stack. All caller-save registers (eax,edx,ecx) are expected |
418 | * to be modified (either clobbered or used for return values). |
419 | * X86_64, on the other hand, already specifies a register-based calling |
420 | * conventions, returning at %rax, with parameteres going on %rdi, %rsi, |
421 | * %rdx, and %rcx. Note that for this reason, x86_64 does not need any |
422 | * special handling for dealing with 4 arguments, unlike i386. |
423 | * However, x86_64 also have to clobber all caller saved registers, which |
424 | * unfortunately, are quite a bit (r8 - r11) |
425 | * |
426 | * The call instruction itself is marked by placing its start address |
427 | * and size into the .parainstructions section, so that |
428 | * apply_paravirt() in arch/i386/kernel/alternative.c can do the |
429 | * appropriate patching under the control of the backend pv_init_ops |
430 | * implementation. |
431 | * |
432 | * Unfortunately there's no way to get gcc to generate the args setup |
433 | * for the call, and then allow the call itself to be generated by an |
434 | * inline asm. Because of this, we must do the complete arg setup and |
435 | * return value handling from within these macros. This is fairly |
436 | * cumbersome. |
437 | * |
438 | * There are 5 sets of PVOP_* macros for dealing with 0-4 arguments. |
439 | * It could be extended to more arguments, but there would be little |
440 | * to be gained from that. For each number of arguments, there are |
441 | * the two VCALL and CALL variants for void and non-void functions. |
442 | * |
443 | * When there is a return value, the invoker of the macro must specify |
444 | * the return type. The macro then uses sizeof() on that type to |
445 | * determine whether its a 32 or 64 bit value, and places the return |
446 | * in the right register(s) (just %eax for 32-bit, and %edx:%eax for |
447 | * 64-bit). For x86_64 machines, it just returns at %rax regardless of |
448 | * the return value size. |
449 | * |
450 | * 64-bit arguments are passed as a pair of adjacent 32-bit arguments |
451 | * i386 also passes 64-bit arguments as a pair of adjacent 32-bit arguments |
452 | * in low,high order |
453 | * |
454 | * Small structures are passed and returned in registers. The macro |
455 | * calling convention can't directly deal with this, so the wrapper |
456 | * functions must do this. |
457 | * |
458 | * These PVOP_* macros are only defined within this header. This |
459 | * means that all uses must be wrapped in inline functions. This also |
460 | * makes sure the incoming and outgoing types are always correct. |
461 | */ |
462 | #ifdef CONFIG_X86_32 |
463 | #define PVOP_VCALL_ARGS \ |
464 | unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; |
465 | |
466 | #define PVOP_CALL_ARGS PVOP_VCALL_ARGS |
467 | |
468 | #define PVOP_CALL_ARG1(x) "a" ((unsigned long)(x)) |
469 | #define PVOP_CALL_ARG2(x) "d" ((unsigned long)(x)) |
470 | #define PVOP_CALL_ARG3(x) "c" ((unsigned long)(x)) |
471 | |
472 | #define PVOP_VCALL_CLOBBERS "=a" (__eax), "=d" (__edx), \ |
473 | "=c" (__ecx) |
474 | #define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS |
475 | |
476 | #define PVOP_VCALLEE_CLOBBERS "=a" (__eax), "=d" (__edx) |
477 | #define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS |
478 | |
479 | #define EXTRA_CLOBBERS |
480 | #define VEXTRA_CLOBBERS |
481 | #else /* CONFIG_X86_64 */ |
482 | /* [re]ax isn't an arg, but the return val */ |
483 | #define PVOP_VCALL_ARGS \ |
484 | unsigned long __edi = __edi, __esi = __esi, \ |
485 | __edx = __edx, __ecx = __ecx, __eax = __eax; |
486 | |
487 | #define PVOP_CALL_ARGS PVOP_VCALL_ARGS |
488 | |
489 | #define PVOP_CALL_ARG1(x) "D" ((unsigned long)(x)) |
490 | #define PVOP_CALL_ARG2(x) "S" ((unsigned long)(x)) |
491 | #define PVOP_CALL_ARG3(x) "d" ((unsigned long)(x)) |
492 | #define PVOP_CALL_ARG4(x) "c" ((unsigned long)(x)) |
493 | |
494 | #define PVOP_VCALL_CLOBBERS "=D" (__edi), \ |
495 | "=S" (__esi), "=d" (__edx), \ |
496 | "=c" (__ecx) |
497 | #define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax) |
498 | |
499 | /* void functions are still allowed [re]ax for scratch */ |
500 | #define PVOP_VCALLEE_CLOBBERS "=a" (__eax) |
501 | #define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS |
502 | |
503 | #define , "r8", "r9", "r10", "r11" |
504 | #define , "rax", "r8", "r9", "r10", "r11" |
505 | #endif /* CONFIG_X86_32 */ |
506 | |
507 | #ifdef CONFIG_PARAVIRT_DEBUG |
508 | #define PVOP_TEST_NULL(op) BUG_ON(pv_ops.op == NULL) |
509 | #else |
510 | #define PVOP_TEST_NULL(op) ((void)pv_ops.op) |
511 | #endif |
512 | |
513 | #define PVOP_RETMASK(rettype) \ |
514 | ({ unsigned long __mask = ~0UL; \ |
515 | switch (sizeof(rettype)) { \ |
516 | case 1: __mask = 0xffUL; break; \ |
517 | case 2: __mask = 0xffffUL; break; \ |
518 | case 4: __mask = 0xffffffffUL; break; \ |
519 | default: break; \ |
520 | } \ |
521 | __mask; \ |
522 | }) |
523 | |
524 | |
525 | #define ____PVOP_CALL(rettype, op, clbr, call_clbr, extra_clbr, \ |
526 | pre, post, ...) \ |
527 | ({ \ |
528 | rettype __ret; \ |
529 | PVOP_CALL_ARGS; \ |
530 | PVOP_TEST_NULL(op); \ |
531 | /* This is 32-bit specific, but is okay in 64-bit */ \ |
532 | /* since this condition will never hold */ \ |
533 | if (sizeof(rettype) > sizeof(unsigned long)) { \ |
534 | asm volatile(pre \ |
535 | paravirt_alt(PARAVIRT_CALL) \ |
536 | post \ |
537 | : call_clbr, ASM_CALL_CONSTRAINT \ |
538 | : paravirt_type(op), \ |
539 | paravirt_clobber(clbr), \ |
540 | ##__VA_ARGS__ \ |
541 | : "memory", "cc" extra_clbr); \ |
542 | __ret = (rettype)((((u64)__edx) << 32) | __eax); \ |
543 | } else { \ |
544 | asm volatile(pre \ |
545 | paravirt_alt(PARAVIRT_CALL) \ |
546 | post \ |
547 | : call_clbr, ASM_CALL_CONSTRAINT \ |
548 | : paravirt_type(op), \ |
549 | paravirt_clobber(clbr), \ |
550 | ##__VA_ARGS__ \ |
551 | : "memory", "cc" extra_clbr); \ |
552 | __ret = (rettype)(__eax & PVOP_RETMASK(rettype)); \ |
553 | } \ |
554 | __ret; \ |
555 | }) |
556 | |
557 | #define __PVOP_CALL(rettype, op, pre, post, ...) \ |
558 | ____PVOP_CALL(rettype, op, CLBR_ANY, PVOP_CALL_CLOBBERS, \ |
559 | EXTRA_CLOBBERS, pre, post, ##__VA_ARGS__) |
560 | |
561 | #define __PVOP_CALLEESAVE(rettype, op, pre, post, ...) \ |
562 | ____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \ |
563 | PVOP_CALLEE_CLOBBERS, , \ |
564 | pre, post, ##__VA_ARGS__) |
565 | |
566 | |
567 | #define ____PVOP_VCALL(op, clbr, call_clbr, extra_clbr, pre, post, ...) \ |
568 | ({ \ |
569 | PVOP_VCALL_ARGS; \ |
570 | PVOP_TEST_NULL(op); \ |
571 | asm volatile(pre \ |
572 | paravirt_alt(PARAVIRT_CALL) \ |
573 | post \ |
574 | : call_clbr, ASM_CALL_CONSTRAINT \ |
575 | : paravirt_type(op), \ |
576 | paravirt_clobber(clbr), \ |
577 | ##__VA_ARGS__ \ |
578 | : "memory", "cc" extra_clbr); \ |
579 | }) |
580 | |
581 | #define __PVOP_VCALL(op, pre, post, ...) \ |
582 | ____PVOP_VCALL(op, CLBR_ANY, PVOP_VCALL_CLOBBERS, \ |
583 | VEXTRA_CLOBBERS, \ |
584 | pre, post, ##__VA_ARGS__) |
585 | |
586 | #define __PVOP_VCALLEESAVE(op, pre, post, ...) \ |
587 | ____PVOP_VCALL(op.func, CLBR_RET_REG, \ |
588 | PVOP_VCALLEE_CLOBBERS, , \ |
589 | pre, post, ##__VA_ARGS__) |
590 | |
591 | |
592 | |
593 | #define PVOP_CALL0(rettype, op) \ |
594 | __PVOP_CALL(rettype, op, "", "") |
595 | #define PVOP_VCALL0(op) \ |
596 | __PVOP_VCALL(op, "", "") |
597 | |
598 | #define PVOP_CALLEE0(rettype, op) \ |
599 | __PVOP_CALLEESAVE(rettype, op, "", "") |
600 | #define PVOP_VCALLEE0(op) \ |
601 | __PVOP_VCALLEESAVE(op, "", "") |
602 | |
603 | |
604 | #define PVOP_CALL1(rettype, op, arg1) \ |
605 | __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1)) |
606 | #define PVOP_VCALL1(op, arg1) \ |
607 | __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1)) |
608 | |
609 | #define PVOP_CALLEE1(rettype, op, arg1) \ |
610 | __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1)) |
611 | #define PVOP_VCALLEE1(op, arg1) \ |
612 | __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1)) |
613 | |
614 | |
615 | #define PVOP_CALL2(rettype, op, arg1, arg2) \ |
616 | __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \ |
617 | PVOP_CALL_ARG2(arg2)) |
618 | #define PVOP_VCALL2(op, arg1, arg2) \ |
619 | __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \ |
620 | PVOP_CALL_ARG2(arg2)) |
621 | |
622 | #define PVOP_CALLEE2(rettype, op, arg1, arg2) \ |
623 | __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \ |
624 | PVOP_CALL_ARG2(arg2)) |
625 | #define PVOP_VCALLEE2(op, arg1, arg2) \ |
626 | __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1), \ |
627 | PVOP_CALL_ARG2(arg2)) |
628 | |
629 | |
630 | #define PVOP_CALL3(rettype, op, arg1, arg2, arg3) \ |
631 | __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \ |
632 | PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3)) |
633 | #define PVOP_VCALL3(op, arg1, arg2, arg3) \ |
634 | __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \ |
635 | PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3)) |
636 | |
637 | /* This is the only difference in x86_64. We can make it much simpler */ |
638 | #ifdef CONFIG_X86_32 |
639 | #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \ |
640 | __PVOP_CALL(rettype, op, \ |
641 | "push %[_arg4];", "lea 4(%%esp),%%esp;", \ |
642 | PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \ |
643 | PVOP_CALL_ARG3(arg3), [_arg4] "mr" ((u32)(arg4))) |
644 | #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \ |
645 | __PVOP_VCALL(op, \ |
646 | "push %[_arg4];", "lea 4(%%esp),%%esp;", \ |
647 | "0" ((u32)(arg1)), "1" ((u32)(arg2)), \ |
648 | "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4))) |
649 | #else |
650 | #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \ |
651 | __PVOP_CALL(rettype, op, "", "", \ |
652 | PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \ |
653 | PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4)) |
654 | #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \ |
655 | __PVOP_VCALL(op, "", "", \ |
656 | PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \ |
657 | PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4)) |
658 | #endif |
659 | |
660 | /* Lazy mode for batching updates / context switch */ |
661 | enum paravirt_lazy_mode { |
662 | PARAVIRT_LAZY_NONE, |
663 | PARAVIRT_LAZY_MMU, |
664 | PARAVIRT_LAZY_CPU, |
665 | }; |
666 | |
667 | enum paravirt_lazy_mode paravirt_get_lazy_mode(void); |
668 | void paravirt_start_context_switch(struct task_struct *prev); |
669 | void paravirt_end_context_switch(struct task_struct *next); |
670 | |
671 | void paravirt_enter_lazy_mmu(void); |
672 | void paravirt_leave_lazy_mmu(void); |
673 | void paravirt_flush_lazy_mmu(void); |
674 | |
675 | void _paravirt_nop(void); |
676 | u64 _paravirt_ident_64(u64); |
677 | |
678 | #define paravirt_nop ((void *)_paravirt_nop) |
679 | |
680 | /* These all sit in the .parainstructions section to tell us what to patch. */ |
681 | struct paravirt_patch_site { |
682 | u8 *instr; /* original instructions */ |
683 | u8 instrtype; /* type of this instruction */ |
684 | u8 len; /* length of original instruction */ |
685 | }; |
686 | |
687 | extern struct paravirt_patch_site __parainstructions[], |
688 | __parainstructions_end[]; |
689 | |
690 | #endif /* __ASSEMBLY__ */ |
691 | |
692 | #endif /* _ASM_X86_PARAVIRT_TYPES_H */ |
693 | |