1 | // SPDX-License-Identifier: GPL-2.0 |
---|---|
2 | /* |
3 | * Split spinlock implementation out into its own file, so it can be |
4 | * compiled in a FTRACE-compatible way. |
5 | */ |
6 | #include <linux/spinlock.h> |
7 | #include <linux/export.h> |
8 | #include <linux/jump_label.h> |
9 | |
10 | #include <asm/paravirt.h> |
11 | |
12 | __visible void __native_queued_spin_unlock(struct qspinlock *lock) |
13 | { |
14 | native_queued_spin_unlock(lock); |
15 | } |
16 | PV_CALLEE_SAVE_REGS_THUNK(__native_queued_spin_unlock); |
17 | |
18 | bool pv_is_native_spin_unlock(void) |
19 | { |
20 | return pv_ops.lock.queued_spin_unlock.func == |
21 | __raw_callee_save___native_queued_spin_unlock; |
22 | } |
23 | |
24 | __visible bool __native_vcpu_is_preempted(long cpu) |
25 | { |
26 | return false; |
27 | } |
28 | PV_CALLEE_SAVE_REGS_THUNK(__native_vcpu_is_preempted); |
29 | |
30 | bool pv_is_native_vcpu_is_preempted(void) |
31 | { |
32 | return pv_ops.lock.vcpu_is_preempted.func == |
33 | __raw_callee_save___native_vcpu_is_preempted; |
34 | } |
35 | |
36 | void __init paravirt_set_cap(void) |
37 | { |
38 | if (!pv_is_native_spin_unlock()) |
39 | setup_force_cpu_cap(X86_FEATURE_PVUNLOCK); |
40 | |
41 | if (!pv_is_native_vcpu_is_preempted()) |
42 | setup_force_cpu_cap(X86_FEATURE_VCPUPREEMPT); |
43 | } |
44 |