1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef _ASM_X86_PARAVIRT_TYPES_H |
3 | #define _ASM_X86_PARAVIRT_TYPES_H |
4 | |
5 | #ifndef __ASSEMBLY__ |
6 | /* These all sit in the .parainstructions section to tell us what to patch. */ |
7 | struct paravirt_patch_site { |
8 | u8 *instr; /* original instructions */ |
9 | u8 type; /* type of this instruction */ |
10 | u8 len; /* length of original instruction */ |
11 | }; |
12 | #endif |
13 | |
14 | #ifdef CONFIG_PARAVIRT |
15 | |
16 | #ifndef __ASSEMBLY__ |
17 | |
18 | #include <asm/desc_defs.h> |
19 | #include <asm/pgtable_types.h> |
20 | #include <asm/nospec-branch.h> |
21 | |
22 | struct page; |
23 | struct thread_struct; |
24 | struct desc_ptr; |
25 | struct tss_struct; |
26 | struct mm_struct; |
27 | struct desc_struct; |
28 | struct task_struct; |
29 | struct cpumask; |
30 | struct flush_tlb_info; |
31 | struct mmu_gather; |
32 | struct vm_area_struct; |
33 | |
34 | /* |
35 | * Wrapper type for pointers to code which uses the non-standard |
36 | * calling convention. See PV_CALL_SAVE_REGS_THUNK below. |
37 | */ |
38 | struct paravirt_callee_save { |
39 | void *func; |
40 | }; |
41 | |
42 | /* general info */ |
43 | struct pv_info { |
44 | #ifdef CONFIG_PARAVIRT_XXL |
45 | u16 ; /* __USER_CS if none */ |
46 | #endif |
47 | |
48 | const char *name; |
49 | }; |
50 | |
51 | #ifdef CONFIG_PARAVIRT_XXL |
52 | struct pv_lazy_ops { |
53 | /* Set deferred update mode, used for batching operations. */ |
54 | void (*enter)(void); |
55 | void (*leave)(void); |
56 | void (*flush)(void); |
57 | } __no_randomize_layout; |
58 | #endif |
59 | |
60 | struct pv_cpu_ops { |
61 | /* hooks for various privileged instructions */ |
62 | void (*io_delay)(void); |
63 | |
64 | #ifdef CONFIG_PARAVIRT_XXL |
65 | unsigned long (*get_debugreg)(int regno); |
66 | void (*set_debugreg)(int regno, unsigned long value); |
67 | |
68 | unsigned long (*read_cr0)(void); |
69 | void (*write_cr0)(unsigned long); |
70 | |
71 | void (*write_cr4)(unsigned long); |
72 | |
73 | /* Segment descriptor handling */ |
74 | void (*load_tr_desc)(void); |
75 | void (*load_gdt)(const struct desc_ptr *); |
76 | void (*load_idt)(const struct desc_ptr *); |
77 | void (*set_ldt)(const void *desc, unsigned entries); |
78 | unsigned long (*store_tr)(void); |
79 | void (*load_tls)(struct thread_struct *t, unsigned int cpu); |
80 | void (*load_gs_index)(unsigned int idx); |
81 | void (*write_ldt_entry)(struct desc_struct *ldt, int entrynum, |
82 | const void *desc); |
83 | void (*write_gdt_entry)(struct desc_struct *, |
84 | int entrynum, const void *desc, int size); |
85 | void (*write_idt_entry)(gate_desc *, |
86 | int entrynum, const gate_desc *gate); |
87 | void (*alloc_ldt)(struct desc_struct *ldt, unsigned entries); |
88 | void (*free_ldt)(struct desc_struct *ldt, unsigned entries); |
89 | |
90 | void (*load_sp0)(unsigned long sp0); |
91 | |
92 | #ifdef CONFIG_X86_IOPL_IOPERM |
93 | void (*invalidate_io_bitmap)(void); |
94 | void (*update_io_bitmap)(void); |
95 | #endif |
96 | |
97 | void (*wbinvd)(void); |
98 | |
99 | /* cpuid emulation, mostly so that caps bits can be disabled */ |
100 | void (*cpuid)(unsigned int *eax, unsigned int *ebx, |
101 | unsigned int *ecx, unsigned int *edx); |
102 | |
103 | /* Unsafe MSR operations. These will warn or panic on failure. */ |
104 | u64 (*read_msr)(unsigned int msr); |
105 | void (*write_msr)(unsigned int msr, unsigned low, unsigned high); |
106 | |
107 | /* |
108 | * Safe MSR operations. |
109 | * read sets err to 0 or -EIO. write returns 0 or -EIO. |
110 | */ |
111 | u64 (*read_msr_safe)(unsigned int msr, int *err); |
112 | int (*write_msr_safe)(unsigned int msr, unsigned low, unsigned high); |
113 | |
114 | u64 (*read_pmc)(int counter); |
115 | |
116 | void (*start_context_switch)(struct task_struct *prev); |
117 | void (*end_context_switch)(struct task_struct *next); |
118 | #endif |
119 | } __no_randomize_layout; |
120 | |
121 | struct pv_irq_ops { |
122 | #ifdef CONFIG_PARAVIRT_XXL |
123 | /* |
124 | * Get/set interrupt state. save_fl is expected to use X86_EFLAGS_IF; |
125 | * all other bits returned from save_fl are undefined. |
126 | * |
127 | * NOTE: These functions callers expect the callee to preserve |
128 | * more registers than the standard C calling convention. |
129 | */ |
130 | struct paravirt_callee_save save_fl; |
131 | struct paravirt_callee_save irq_disable; |
132 | struct paravirt_callee_save irq_enable; |
133 | |
134 | void (*safe_halt)(void); |
135 | void (*halt)(void); |
136 | #endif |
137 | } __no_randomize_layout; |
138 | |
139 | struct pv_mmu_ops { |
140 | /* TLB operations */ |
141 | void (*flush_tlb_user)(void); |
142 | void (*flush_tlb_kernel)(void); |
143 | void (*flush_tlb_one_user)(unsigned long addr); |
144 | void (*flush_tlb_multi)(const struct cpumask *cpus, |
145 | const struct flush_tlb_info *info); |
146 | |
147 | void (*tlb_remove_table)(struct mmu_gather *tlb, void *table); |
148 | |
149 | /* Hook for intercepting the destruction of an mm_struct. */ |
150 | void (*exit_mmap)(struct mm_struct *mm); |
151 | void (*notify_page_enc_status_changed)(unsigned long pfn, int npages, bool enc); |
152 | |
153 | #ifdef CONFIG_PARAVIRT_XXL |
154 | struct paravirt_callee_save read_cr2; |
155 | void (*write_cr2)(unsigned long); |
156 | |
157 | unsigned long (*read_cr3)(void); |
158 | void (*write_cr3)(unsigned long); |
159 | |
160 | /* Hook for intercepting the creation/use of an mm_struct. */ |
161 | void (*enter_mmap)(struct mm_struct *mm); |
162 | |
163 | /* Hooks for allocating and freeing a pagetable top-level */ |
164 | int (*pgd_alloc)(struct mm_struct *mm); |
165 | void (*pgd_free)(struct mm_struct *mm, pgd_t *pgd); |
166 | |
167 | /* |
168 | * Hooks for allocating/releasing pagetable pages when they're |
169 | * attached to a pagetable |
170 | */ |
171 | void (*alloc_pte)(struct mm_struct *mm, unsigned long pfn); |
172 | void (*alloc_pmd)(struct mm_struct *mm, unsigned long pfn); |
173 | void (*alloc_pud)(struct mm_struct *mm, unsigned long pfn); |
174 | void (*alloc_p4d)(struct mm_struct *mm, unsigned long pfn); |
175 | void (*release_pte)(unsigned long pfn); |
176 | void (*release_pmd)(unsigned long pfn); |
177 | void (*release_pud)(unsigned long pfn); |
178 | void (*release_p4d)(unsigned long pfn); |
179 | |
180 | /* Pagetable manipulation functions */ |
181 | void (*set_pte)(pte_t *ptep, pte_t pteval); |
182 | void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval); |
183 | |
184 | pte_t (*ptep_modify_prot_start)(struct vm_area_struct *vma, unsigned long addr, |
185 | pte_t *ptep); |
186 | void (*ptep_modify_prot_commit)(struct vm_area_struct *vma, unsigned long addr, |
187 | pte_t *ptep, pte_t pte); |
188 | |
189 | struct paravirt_callee_save pte_val; |
190 | struct paravirt_callee_save make_pte; |
191 | |
192 | struct paravirt_callee_save pgd_val; |
193 | struct paravirt_callee_save make_pgd; |
194 | |
195 | void (*set_pud)(pud_t *pudp, pud_t pudval); |
196 | |
197 | struct paravirt_callee_save pmd_val; |
198 | struct paravirt_callee_save make_pmd; |
199 | |
200 | struct paravirt_callee_save pud_val; |
201 | struct paravirt_callee_save make_pud; |
202 | |
203 | void (*set_p4d)(p4d_t *p4dp, p4d_t p4dval); |
204 | |
205 | #if CONFIG_PGTABLE_LEVELS >= 5 |
206 | struct paravirt_callee_save p4d_val; |
207 | struct paravirt_callee_save make_p4d; |
208 | |
209 | void (*set_pgd)(pgd_t *pgdp, pgd_t pgdval); |
210 | #endif /* CONFIG_PGTABLE_LEVELS >= 5 */ |
211 | |
212 | struct pv_lazy_ops lazy_mode; |
213 | |
214 | /* dom0 ops */ |
215 | |
216 | /* Sometimes the physical address is a pfn, and sometimes its |
217 | an mfn. We can tell which is which from the index. */ |
218 | void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx, |
219 | phys_addr_t phys, pgprot_t flags); |
220 | #endif |
221 | } __no_randomize_layout; |
222 | |
223 | struct arch_spinlock; |
224 | #ifdef CONFIG_SMP |
225 | #include <asm/spinlock_types.h> |
226 | #endif |
227 | |
228 | struct qspinlock; |
229 | |
230 | struct pv_lock_ops { |
231 | void (*queued_spin_lock_slowpath)(struct qspinlock *lock, u32 val); |
232 | struct paravirt_callee_save queued_spin_unlock; |
233 | |
234 | void (*wait)(u8 *ptr, u8 val); |
235 | void (*kick)(int cpu); |
236 | |
237 | struct paravirt_callee_save vcpu_is_preempted; |
238 | } __no_randomize_layout; |
239 | |
240 | /* This contains all the paravirt structures: we get a convenient |
241 | * number for each function using the offset which we use to indicate |
242 | * what to patch. */ |
243 | struct paravirt_patch_template { |
244 | struct pv_cpu_ops cpu; |
245 | struct pv_irq_ops irq; |
246 | struct pv_mmu_ops mmu; |
247 | struct pv_lock_ops lock; |
248 | } __no_randomize_layout; |
249 | |
250 | extern struct pv_info pv_info; |
251 | extern struct paravirt_patch_template pv_ops; |
252 | |
253 | #define PARAVIRT_PATCH(x) \ |
254 | (offsetof(struct paravirt_patch_template, x) / sizeof(void *)) |
255 | |
256 | #define paravirt_type(op) \ |
257 | [paravirt_typenum] "i" (PARAVIRT_PATCH(op)), \ |
258 | [paravirt_opptr] "m" (pv_ops.op) |
259 | /* |
260 | * Generate some code, and mark it as patchable by the |
261 | * apply_paravirt() alternate instruction patcher. |
262 | */ |
263 | #define _paravirt_alt(insn_string, type) \ |
264 | "771:\n\t" insn_string "\n" "772:\n" \ |
265 | ".pushsection .parainstructions,\"a\"\n" \ |
266 | _ASM_ALIGN "\n" \ |
267 | _ASM_PTR " 771b\n" \ |
268 | " .byte " type "\n" \ |
269 | " .byte 772b-771b\n" \ |
270 | _ASM_ALIGN "\n" \ |
271 | ".popsection\n" |
272 | |
273 | /* Generate patchable code, with the default asm parameters. */ |
274 | #define paravirt_alt(insn_string) \ |
275 | _paravirt_alt(insn_string, "%c[paravirt_typenum]") |
276 | |
277 | /* Simple instruction patching code. */ |
278 | #define NATIVE_LABEL(a,x,b) "\n\t.globl " a #x "_" #b "\n" a #x "_" #b ":\n\t" |
279 | |
280 | unsigned int paravirt_patch(u8 type, void *insn_buff, unsigned long addr, unsigned int len); |
281 | |
282 | int paravirt_disable_iospace(void); |
283 | |
284 | /* |
285 | * This generates an indirect call based on the operation type number. |
286 | * The type number, computed in PARAVIRT_PATCH, is derived from the |
287 | * offset into the paravirt_patch_template structure, and can therefore be |
288 | * freely converted back into a structure offset. |
289 | */ |
290 | #define PARAVIRT_CALL \ |
291 | ANNOTATE_RETPOLINE_SAFE \ |
292 | "call *%[paravirt_opptr];" |
293 | |
294 | /* |
295 | * These macros are intended to wrap calls through one of the paravirt |
296 | * ops structs, so that they can be later identified and patched at |
297 | * runtime. |
298 | * |
299 | * Normally, a call to a pv_op function is a simple indirect call: |
300 | * (pv_op_struct.operations)(args...). |
301 | * |
302 | * Unfortunately, this is a relatively slow operation for modern CPUs, |
303 | * because it cannot necessarily determine what the destination |
304 | * address is. In this case, the address is a runtime constant, so at |
305 | * the very least we can patch the call to a simple direct call, or, |
306 | * ideally, patch an inline implementation into the callsite. (Direct |
307 | * calls are essentially free, because the call and return addresses |
308 | * are completely predictable.) |
309 | * |
310 | * For i386, these macros rely on the standard gcc "regparm(3)" calling |
311 | * convention, in which the first three arguments are placed in %eax, |
312 | * %edx, %ecx (in that order), and the remaining arguments are placed |
313 | * on the stack. All caller-save registers (eax,edx,ecx) are expected |
314 | * to be modified (either clobbered or used for return values). |
315 | * X86_64, on the other hand, already specifies a register-based calling |
316 | * conventions, returning at %rax, with parameters going in %rdi, %rsi, |
317 | * %rdx, and %rcx. Note that for this reason, x86_64 does not need any |
318 | * special handling for dealing with 4 arguments, unlike i386. |
319 | * However, x86_64 also has to clobber all caller saved registers, which |
320 | * unfortunately, are quite a bit (r8 - r11) |
321 | * |
322 | * The call instruction itself is marked by placing its start address |
323 | * and size into the .parainstructions section, so that |
324 | * apply_paravirt() in arch/i386/kernel/alternative.c can do the |
325 | * appropriate patching under the control of the backend pv_init_ops |
326 | * implementation. |
327 | * |
328 | * Unfortunately there's no way to get gcc to generate the args setup |
329 | * for the call, and then allow the call itself to be generated by an |
330 | * inline asm. Because of this, we must do the complete arg setup and |
331 | * return value handling from within these macros. This is fairly |
332 | * cumbersome. |
333 | * |
334 | * There are 5 sets of PVOP_* macros for dealing with 0-4 arguments. |
335 | * It could be extended to more arguments, but there would be little |
336 | * to be gained from that. For each number of arguments, there are |
337 | * two VCALL and CALL variants for void and non-void functions. |
338 | * |
339 | * When there is a return value, the invoker of the macro must specify |
340 | * the return type. The macro then uses sizeof() on that type to |
341 | * determine whether it's a 32 or 64 bit value and places the return |
342 | * in the right register(s) (just %eax for 32-bit, and %edx:%eax for |
343 | * 64-bit). For x86_64 machines, it just returns in %rax regardless of |
344 | * the return value size. |
345 | * |
346 | * 64-bit arguments are passed as a pair of adjacent 32-bit arguments; |
347 | * i386 also passes 64-bit arguments as a pair of adjacent 32-bit arguments |
348 | * in low,high order |
349 | * |
350 | * Small structures are passed and returned in registers. The macro |
351 | * calling convention can't directly deal with this, so the wrapper |
352 | * functions must do it. |
353 | * |
354 | * These PVOP_* macros are only defined within this header. This |
355 | * means that all uses must be wrapped in inline functions. This also |
356 | * makes sure the incoming and outgoing types are always correct. |
357 | */ |
358 | #ifdef CONFIG_X86_32 |
359 | #define PVOP_CALL_ARGS \ |
360 | unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; |
361 | |
362 | #define PVOP_CALL_ARG1(x) "a" ((unsigned long)(x)) |
363 | #define PVOP_CALL_ARG2(x) "d" ((unsigned long)(x)) |
364 | #define PVOP_CALL_ARG3(x) "c" ((unsigned long)(x)) |
365 | |
366 | #define PVOP_VCALL_CLOBBERS "=a" (__eax), "=d" (__edx), \ |
367 | "=c" (__ecx) |
368 | #define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS |
369 | |
370 | #define PVOP_VCALLEE_CLOBBERS "=a" (__eax), "=d" (__edx) |
371 | #define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS |
372 | |
373 | #define EXTRA_CLOBBERS |
374 | #define VEXTRA_CLOBBERS |
375 | #else /* CONFIG_X86_64 */ |
376 | /* [re]ax isn't an arg, but the return val */ |
377 | #define PVOP_CALL_ARGS \ |
378 | unsigned long __edi = __edi, __esi = __esi, \ |
379 | __edx = __edx, __ecx = __ecx, __eax = __eax; |
380 | |
381 | #define PVOP_CALL_ARG1(x) "D" ((unsigned long)(x)) |
382 | #define PVOP_CALL_ARG2(x) "S" ((unsigned long)(x)) |
383 | #define PVOP_CALL_ARG3(x) "d" ((unsigned long)(x)) |
384 | #define PVOP_CALL_ARG4(x) "c" ((unsigned long)(x)) |
385 | |
386 | #define PVOP_VCALL_CLOBBERS "=D" (__edi), \ |
387 | "=S" (__esi), "=d" (__edx), \ |
388 | "=c" (__ecx) |
389 | #define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax) |
390 | |
391 | /* |
392 | * void functions are still allowed [re]ax for scratch. |
393 | * |
394 | * The ZERO_CALL_USED REGS feature may end up zeroing out callee-saved |
395 | * registers. Make sure we model this with the appropriate clobbers. |
396 | */ |
397 | #ifdef CONFIG_ZERO_CALL_USED_REGS |
398 | #define PVOP_VCALLEE_CLOBBERS "=a" (__eax), PVOP_VCALL_CLOBBERS |
399 | #else |
400 | #define PVOP_VCALLEE_CLOBBERS "=a" (__eax) |
401 | #endif |
402 | #define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS |
403 | |
404 | #define , "r8", "r9", "r10", "r11" |
405 | #define , "rax", "r8", "r9", "r10", "r11" |
406 | #endif /* CONFIG_X86_32 */ |
407 | |
408 | #ifdef CONFIG_PARAVIRT_DEBUG |
409 | #define PVOP_TEST_NULL(op) BUG_ON(pv_ops.op == NULL) |
410 | #else |
411 | #define PVOP_TEST_NULL(op) ((void)pv_ops.op) |
412 | #endif |
413 | |
414 | #define PVOP_RETVAL(rettype) \ |
415 | ({ unsigned long __mask = ~0UL; \ |
416 | BUILD_BUG_ON(sizeof(rettype) > sizeof(unsigned long)); \ |
417 | switch (sizeof(rettype)) { \ |
418 | case 1: __mask = 0xffUL; break; \ |
419 | case 2: __mask = 0xffffUL; break; \ |
420 | case 4: __mask = 0xffffffffUL; break; \ |
421 | default: break; \ |
422 | } \ |
423 | __mask & __eax; \ |
424 | }) |
425 | |
426 | |
427 | #define ____PVOP_CALL(ret, op, call_clbr, extra_clbr, ...) \ |
428 | ({ \ |
429 | PVOP_CALL_ARGS; \ |
430 | PVOP_TEST_NULL(op); \ |
431 | asm volatile(paravirt_alt(PARAVIRT_CALL) \ |
432 | : call_clbr, ASM_CALL_CONSTRAINT \ |
433 | : paravirt_type(op), \ |
434 | ##__VA_ARGS__ \ |
435 | : "memory", "cc" extra_clbr); \ |
436 | ret; \ |
437 | }) |
438 | |
439 | #define ____PVOP_ALT_CALL(ret, op, alt, cond, call_clbr, \ |
440 | extra_clbr, ...) \ |
441 | ({ \ |
442 | PVOP_CALL_ARGS; \ |
443 | PVOP_TEST_NULL(op); \ |
444 | asm volatile(ALTERNATIVE(paravirt_alt(PARAVIRT_CALL), \ |
445 | alt, cond) \ |
446 | : call_clbr, ASM_CALL_CONSTRAINT \ |
447 | : paravirt_type(op), \ |
448 | ##__VA_ARGS__ \ |
449 | : "memory", "cc" extra_clbr); \ |
450 | ret; \ |
451 | }) |
452 | |
453 | #define __PVOP_CALL(rettype, op, ...) \ |
454 | ____PVOP_CALL(PVOP_RETVAL(rettype), op, \ |
455 | PVOP_CALL_CLOBBERS, EXTRA_CLOBBERS, ##__VA_ARGS__) |
456 | |
457 | #define __PVOP_ALT_CALL(rettype, op, alt, cond, ...) \ |
458 | ____PVOP_ALT_CALL(PVOP_RETVAL(rettype), op, alt, cond, \ |
459 | PVOP_CALL_CLOBBERS, EXTRA_CLOBBERS, \ |
460 | ##__VA_ARGS__) |
461 | |
462 | #define __PVOP_CALLEESAVE(rettype, op, ...) \ |
463 | ____PVOP_CALL(PVOP_RETVAL(rettype), op.func, \ |
464 | PVOP_CALLEE_CLOBBERS, , ##__VA_ARGS__) |
465 | |
466 | #define __PVOP_ALT_CALLEESAVE(rettype, op, alt, cond, ...) \ |
467 | ____PVOP_ALT_CALL(PVOP_RETVAL(rettype), op.func, alt, cond, \ |
468 | PVOP_CALLEE_CLOBBERS, , ##__VA_ARGS__) |
469 | |
470 | |
471 | #define __PVOP_VCALL(op, ...) \ |
472 | (void)____PVOP_CALL(, op, PVOP_VCALL_CLOBBERS, \ |
473 | VEXTRA_CLOBBERS, ##__VA_ARGS__) |
474 | |
475 | #define __PVOP_ALT_VCALL(op, alt, cond, ...) \ |
476 | (void)____PVOP_ALT_CALL(, op, alt, cond, \ |
477 | PVOP_VCALL_CLOBBERS, VEXTRA_CLOBBERS, \ |
478 | ##__VA_ARGS__) |
479 | |
480 | #define __PVOP_VCALLEESAVE(op, ...) \ |
481 | (void)____PVOP_CALL(, op.func, \ |
482 | PVOP_VCALLEE_CLOBBERS, , ##__VA_ARGS__) |
483 | |
484 | #define __PVOP_ALT_VCALLEESAVE(op, alt, cond, ...) \ |
485 | (void)____PVOP_ALT_CALL(, op.func, alt, cond, \ |
486 | PVOP_VCALLEE_CLOBBERS, , ##__VA_ARGS__) |
487 | |
488 | |
489 | #define PVOP_CALL0(rettype, op) \ |
490 | __PVOP_CALL(rettype, op) |
491 | #define PVOP_VCALL0(op) \ |
492 | __PVOP_VCALL(op) |
493 | #define PVOP_ALT_CALL0(rettype, op, alt, cond) \ |
494 | __PVOP_ALT_CALL(rettype, op, alt, cond) |
495 | #define PVOP_ALT_VCALL0(op, alt, cond) \ |
496 | __PVOP_ALT_VCALL(op, alt, cond) |
497 | |
498 | #define PVOP_CALLEE0(rettype, op) \ |
499 | __PVOP_CALLEESAVE(rettype, op) |
500 | #define PVOP_VCALLEE0(op) \ |
501 | __PVOP_VCALLEESAVE(op) |
502 | #define PVOP_ALT_CALLEE0(rettype, op, alt, cond) \ |
503 | __PVOP_ALT_CALLEESAVE(rettype, op, alt, cond) |
504 | #define PVOP_ALT_VCALLEE0(op, alt, cond) \ |
505 | __PVOP_ALT_VCALLEESAVE(op, alt, cond) |
506 | |
507 | |
508 | #define PVOP_CALL1(rettype, op, arg1) \ |
509 | __PVOP_CALL(rettype, op, PVOP_CALL_ARG1(arg1)) |
510 | #define PVOP_VCALL1(op, arg1) \ |
511 | __PVOP_VCALL(op, PVOP_CALL_ARG1(arg1)) |
512 | #define PVOP_ALT_VCALL1(op, arg1, alt, cond) \ |
513 | __PVOP_ALT_VCALL(op, alt, cond, PVOP_CALL_ARG1(arg1)) |
514 | |
515 | #define PVOP_CALLEE1(rettype, op, arg1) \ |
516 | __PVOP_CALLEESAVE(rettype, op, PVOP_CALL_ARG1(arg1)) |
517 | #define PVOP_VCALLEE1(op, arg1) \ |
518 | __PVOP_VCALLEESAVE(op, PVOP_CALL_ARG1(arg1)) |
519 | #define PVOP_ALT_CALLEE1(rettype, op, arg1, alt, cond) \ |
520 | __PVOP_ALT_CALLEESAVE(rettype, op, alt, cond, PVOP_CALL_ARG1(arg1)) |
521 | #define PVOP_ALT_VCALLEE1(op, arg1, alt, cond) \ |
522 | __PVOP_ALT_VCALLEESAVE(op, alt, cond, PVOP_CALL_ARG1(arg1)) |
523 | |
524 | |
525 | #define PVOP_CALL2(rettype, op, arg1, arg2) \ |
526 | __PVOP_CALL(rettype, op, PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2)) |
527 | #define PVOP_VCALL2(op, arg1, arg2) \ |
528 | __PVOP_VCALL(op, PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2)) |
529 | |
530 | #define PVOP_CALL3(rettype, op, arg1, arg2, arg3) \ |
531 | __PVOP_CALL(rettype, op, PVOP_CALL_ARG1(arg1), \ |
532 | PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3)) |
533 | #define PVOP_VCALL3(op, arg1, arg2, arg3) \ |
534 | __PVOP_VCALL(op, PVOP_CALL_ARG1(arg1), \ |
535 | PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3)) |
536 | |
537 | #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \ |
538 | __PVOP_CALL(rettype, op, \ |
539 | PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \ |
540 | PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4)) |
541 | #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \ |
542 | __PVOP_VCALL(op, PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \ |
543 | PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4)) |
544 | |
545 | void _paravirt_nop(void); |
546 | void paravirt_BUG(void); |
547 | unsigned long paravirt_ret0(void); |
548 | #ifdef CONFIG_PARAVIRT_XXL |
549 | u64 _paravirt_ident_64(u64); |
550 | unsigned long pv_native_save_fl(void); |
551 | void pv_native_irq_disable(void); |
552 | void pv_native_irq_enable(void); |
553 | unsigned long pv_native_read_cr2(void); |
554 | #endif |
555 | |
556 | #define paravirt_nop ((void *)_paravirt_nop) |
557 | |
558 | extern struct paravirt_patch_site __parainstructions[], |
559 | __parainstructions_end[]; |
560 | |
561 | #endif /* __ASSEMBLY__ */ |
562 | #endif /* CONFIG_PARAVIRT */ |
563 | #endif /* _ASM_X86_PARAVIRT_TYPES_H */ |
564 | |