1 | /* |
2 | * Queued spinlock |
3 | * |
4 | * This program is free software; you can redistribute it and/or modify |
5 | * it under the terms of the GNU General Public License as published by |
6 | * the Free Software Foundation; either version 2 of the License, or |
7 | * (at your option) any later version. |
8 | * |
9 | * This program is distributed in the hope that it will be useful, |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
12 | * GNU General Public License for more details. |
13 | * |
14 | * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P. |
15 | * (C) Copyright 2015 Hewlett-Packard Enterprise Development LP |
16 | * |
17 | * Authors: Waiman Long <waiman.long@hpe.com> |
18 | */ |
19 | #ifndef __ASM_GENERIC_QSPINLOCK_H |
20 | #define __ASM_GENERIC_QSPINLOCK_H |
21 | |
22 | #include <asm-generic/qspinlock_types.h> |
23 | |
24 | /** |
25 | * queued_spin_is_locked - is the spinlock locked? |
26 | * @lock: Pointer to queued spinlock structure |
27 | * Return: 1 if it is locked, 0 otherwise |
28 | */ |
29 | static __always_inline int queued_spin_is_locked(struct qspinlock *lock) |
30 | { |
31 | /* |
32 | * Any !0 state indicates it is locked, even if _Q_LOCKED_VAL |
33 | * isn't immediately observable. |
34 | */ |
35 | return atomic_read(&lock->val); |
36 | } |
37 | |
38 | /** |
39 | * queued_spin_value_unlocked - is the spinlock structure unlocked? |
40 | * @lock: queued spinlock structure |
41 | * Return: 1 if it is unlocked, 0 otherwise |
42 | * |
43 | * N.B. Whenever there are tasks waiting for the lock, it is considered |
44 | * locked wrt the lockref code to avoid lock stealing by the lockref |
45 | * code and change things underneath the lock. This also allows some |
46 | * optimizations to be applied without conflict with lockref. |
47 | */ |
48 | static __always_inline int queued_spin_value_unlocked(struct qspinlock lock) |
49 | { |
50 | return !atomic_read(&lock.val); |
51 | } |
52 | |
53 | /** |
54 | * queued_spin_is_contended - check if the lock is contended |
55 | * @lock : Pointer to queued spinlock structure |
56 | * Return: 1 if lock contended, 0 otherwise |
57 | */ |
58 | static __always_inline int queued_spin_is_contended(struct qspinlock *lock) |
59 | { |
60 | return atomic_read(&lock->val) & ~_Q_LOCKED_MASK; |
61 | } |
62 | /** |
63 | * queued_spin_trylock - try to acquire the queued spinlock |
64 | * @lock : Pointer to queued spinlock structure |
65 | * Return: 1 if lock acquired, 0 if failed |
66 | */ |
67 | static __always_inline int queued_spin_trylock(struct qspinlock *lock) |
68 | { |
69 | u32 val = atomic_read(&lock->val); |
70 | |
71 | if (unlikely(val)) |
72 | return 0; |
73 | |
74 | return likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL)); |
75 | } |
76 | |
77 | extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); |
78 | |
79 | /** |
80 | * queued_spin_lock - acquire a queued spinlock |
81 | * @lock: Pointer to queued spinlock structure |
82 | */ |
83 | static __always_inline void queued_spin_lock(struct qspinlock *lock) |
84 | { |
85 | u32 val = 0; |
86 | |
87 | if (likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL))) |
88 | return; |
89 | |
90 | queued_spin_lock_slowpath(lock, val); |
91 | } |
92 | |
93 | #ifndef queued_spin_unlock |
94 | /** |
95 | * queued_spin_unlock - release a queued spinlock |
96 | * @lock : Pointer to queued spinlock structure |
97 | */ |
98 | static __always_inline void queued_spin_unlock(struct qspinlock *lock) |
99 | { |
100 | /* |
101 | * unlock() needs release semantics: |
102 | */ |
103 | smp_store_release(&lock->locked, 0); |
104 | } |
105 | #endif |
106 | |
107 | #ifndef virt_spin_lock |
108 | static __always_inline bool virt_spin_lock(struct qspinlock *lock) |
109 | { |
110 | return false; |
111 | } |
112 | #endif |
113 | |
114 | /* |
115 | * Remapping spinlock architecture specific functions to the corresponding |
116 | * queued spinlock functions. |
117 | */ |
118 | #define arch_spin_is_locked(l) queued_spin_is_locked(l) |
119 | #define arch_spin_is_contended(l) queued_spin_is_contended(l) |
120 | #define arch_spin_value_unlocked(l) queued_spin_value_unlocked(l) |
121 | #define arch_spin_lock(l) queued_spin_lock(l) |
122 | #define arch_spin_trylock(l) queued_spin_trylock(l) |
123 | #define arch_spin_unlock(l) queued_spin_unlock(l) |
124 | |
125 | #endif /* __ASM_GENERIC_QSPINLOCK_H */ |
126 | |