1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef _ASM_X86_QSPINLOCK_H |
3 | #define _ASM_X86_QSPINLOCK_H |
4 | |
5 | #include <linux/jump_label.h> |
6 | #include <asm/cpufeature.h> |
7 | #include <asm-generic/qspinlock_types.h> |
8 | #include <asm/paravirt.h> |
9 | #include <asm/rmwcc.h> |
10 | |
11 | #define _Q_PENDING_LOOPS (1 << 9) |
12 | |
13 | #define queued_fetch_set_pending_acquire queued_fetch_set_pending_acquire |
14 | static __always_inline u32 queued_fetch_set_pending_acquire(struct qspinlock *lock) |
15 | { |
16 | u32 val; |
17 | |
18 | /* |
19 | * We can't use GEN_BINARY_RMWcc() inside an if() stmt because asm goto |
20 | * and CONFIG_PROFILE_ALL_BRANCHES=y results in a label inside a |
21 | * statement expression, which GCC doesn't like. |
22 | */ |
23 | val = GEN_BINARY_RMWcc(LOCK_PREFIX "btsl" , lock->val.counter, c, |
24 | "I" , _Q_PENDING_OFFSET) * _Q_PENDING_VAL; |
25 | val |= atomic_read(&lock->val) & ~_Q_PENDING_MASK; |
26 | |
27 | return val; |
28 | } |
29 | |
30 | #ifdef CONFIG_PARAVIRT_SPINLOCKS |
31 | extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); |
32 | extern void __pv_init_lock_hash(void); |
33 | extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); |
34 | extern void __raw_callee_save___pv_queued_spin_unlock(struct qspinlock *lock); |
35 | |
36 | #define queued_spin_unlock queued_spin_unlock |
37 | /** |
38 | * queued_spin_unlock - release a queued spinlock |
39 | * @lock : Pointer to queued spinlock structure |
40 | * |
41 | * A smp_store_release() on the least-significant byte. |
42 | */ |
43 | static inline void native_queued_spin_unlock(struct qspinlock *lock) |
44 | { |
45 | smp_store_release(&lock->locked, 0); |
46 | } |
47 | |
48 | static inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) |
49 | { |
50 | pv_queued_spin_lock_slowpath(lock, val); |
51 | } |
52 | |
53 | static inline void queued_spin_unlock(struct qspinlock *lock) |
54 | { |
55 | pv_queued_spin_unlock(lock); |
56 | } |
57 | |
58 | #define vcpu_is_preempted vcpu_is_preempted |
59 | static inline bool vcpu_is_preempted(long cpu) |
60 | { |
61 | return pv_vcpu_is_preempted(cpu); |
62 | } |
63 | #endif |
64 | |
65 | #ifdef CONFIG_PARAVIRT |
66 | DECLARE_STATIC_KEY_TRUE(virt_spin_lock_key); |
67 | |
68 | void native_pv_lock_init(void) __init; |
69 | |
70 | #define virt_spin_lock virt_spin_lock |
71 | static inline bool virt_spin_lock(struct qspinlock *lock) |
72 | { |
73 | if (!static_branch_likely(&virt_spin_lock_key)) |
74 | return false; |
75 | |
76 | /* |
77 | * On hypervisors without PARAVIRT_SPINLOCKS support we fall |
78 | * back to a Test-and-Set spinlock, because fair locks have |
79 | * horrible lock 'holder' preemption issues. |
80 | */ |
81 | |
82 | do { |
83 | while (atomic_read(&lock->val) != 0) |
84 | cpu_relax(); |
85 | } while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0); |
86 | |
87 | return true; |
88 | } |
89 | #else |
90 | static inline void native_pv_lock_init(void) |
91 | { |
92 | } |
93 | #endif /* CONFIG_PARAVIRT */ |
94 | |
95 | #include <asm-generic/qspinlock.h> |
96 | |
97 | #endif /* _ASM_X86_QSPINLOCK_H */ |
98 | |