1 | #ifndef __LINUX_RWLOCK_API_SMP_H |
2 | #define __LINUX_RWLOCK_API_SMP_H |
3 | |
4 | #ifndef __LINUX_SPINLOCK_API_SMP_H |
5 | # error "please don't include this file directly" |
6 | #endif |
7 | |
8 | /* |
9 | * include/linux/rwlock_api_smp.h |
10 | * |
11 | * spinlock API declarations on SMP (and debug) |
12 | * (implemented in kernel/spinlock.c) |
13 | * |
14 | * portions Copyright 2005, Red Hat, Inc., Ingo Molnar |
15 | * Released under the General Public License (GPL). |
16 | */ |
17 | |
18 | void __lockfunc _raw_read_lock(rwlock_t *lock) __acquires(lock); |
19 | void __lockfunc _raw_write_lock(rwlock_t *lock) __acquires(lock); |
20 | void __lockfunc _raw_write_lock_nested(rwlock_t *lock, int subclass) __acquires(lock); |
21 | void __lockfunc _raw_read_lock_bh(rwlock_t *lock) __acquires(lock); |
22 | void __lockfunc _raw_write_lock_bh(rwlock_t *lock) __acquires(lock); |
23 | void __lockfunc _raw_read_lock_irq(rwlock_t *lock) __acquires(lock); |
24 | void __lockfunc _raw_write_lock_irq(rwlock_t *lock) __acquires(lock); |
25 | unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock) |
26 | __acquires(lock); |
27 | unsigned long __lockfunc _raw_write_lock_irqsave(rwlock_t *lock) |
28 | __acquires(lock); |
29 | int __lockfunc _raw_read_trylock(rwlock_t *lock); |
30 | int __lockfunc _raw_write_trylock(rwlock_t *lock); |
31 | void __lockfunc _raw_read_unlock(rwlock_t *lock) __releases(lock); |
32 | void __lockfunc _raw_write_unlock(rwlock_t *lock) __releases(lock); |
33 | void __lockfunc _raw_read_unlock_bh(rwlock_t *lock) __releases(lock); |
34 | void __lockfunc _raw_write_unlock_bh(rwlock_t *lock) __releases(lock); |
35 | void __lockfunc _raw_read_unlock_irq(rwlock_t *lock) __releases(lock); |
36 | void __lockfunc _raw_write_unlock_irq(rwlock_t *lock) __releases(lock); |
37 | void __lockfunc |
38 | _raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) |
39 | __releases(lock); |
40 | void __lockfunc |
41 | _raw_write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) |
42 | __releases(lock); |
43 | |
44 | #ifdef CONFIG_INLINE_READ_LOCK |
45 | #define _raw_read_lock(lock) __raw_read_lock(lock) |
46 | #endif |
47 | |
48 | #ifdef CONFIG_INLINE_WRITE_LOCK |
49 | #define _raw_write_lock(lock) __raw_write_lock(lock) |
50 | #endif |
51 | |
52 | #ifdef CONFIG_INLINE_READ_LOCK_BH |
53 | #define _raw_read_lock_bh(lock) __raw_read_lock_bh(lock) |
54 | #endif |
55 | |
56 | #ifdef CONFIG_INLINE_WRITE_LOCK_BH |
57 | #define _raw_write_lock_bh(lock) __raw_write_lock_bh(lock) |
58 | #endif |
59 | |
60 | #ifdef CONFIG_INLINE_READ_LOCK_IRQ |
61 | #define _raw_read_lock_irq(lock) __raw_read_lock_irq(lock) |
62 | #endif |
63 | |
64 | #ifdef CONFIG_INLINE_WRITE_LOCK_IRQ |
65 | #define _raw_write_lock_irq(lock) __raw_write_lock_irq(lock) |
66 | #endif |
67 | |
68 | #ifdef CONFIG_INLINE_READ_LOCK_IRQSAVE |
69 | #define _raw_read_lock_irqsave(lock) __raw_read_lock_irqsave(lock) |
70 | #endif |
71 | |
72 | #ifdef CONFIG_INLINE_WRITE_LOCK_IRQSAVE |
73 | #define _raw_write_lock_irqsave(lock) __raw_write_lock_irqsave(lock) |
74 | #endif |
75 | |
76 | #ifdef CONFIG_INLINE_READ_TRYLOCK |
77 | #define _raw_read_trylock(lock) __raw_read_trylock(lock) |
78 | #endif |
79 | |
80 | #ifdef CONFIG_INLINE_WRITE_TRYLOCK |
81 | #define _raw_write_trylock(lock) __raw_write_trylock(lock) |
82 | #endif |
83 | |
84 | #ifdef CONFIG_INLINE_READ_UNLOCK |
85 | #define _raw_read_unlock(lock) __raw_read_unlock(lock) |
86 | #endif |
87 | |
88 | #ifdef CONFIG_INLINE_WRITE_UNLOCK |
89 | #define _raw_write_unlock(lock) __raw_write_unlock(lock) |
90 | #endif |
91 | |
92 | #ifdef CONFIG_INLINE_READ_UNLOCK_BH |
93 | #define _raw_read_unlock_bh(lock) __raw_read_unlock_bh(lock) |
94 | #endif |
95 | |
96 | #ifdef CONFIG_INLINE_WRITE_UNLOCK_BH |
97 | #define _raw_write_unlock_bh(lock) __raw_write_unlock_bh(lock) |
98 | #endif |
99 | |
100 | #ifdef CONFIG_INLINE_READ_UNLOCK_IRQ |
101 | #define _raw_read_unlock_irq(lock) __raw_read_unlock_irq(lock) |
102 | #endif |
103 | |
104 | #ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQ |
105 | #define _raw_write_unlock_irq(lock) __raw_write_unlock_irq(lock) |
106 | #endif |
107 | |
108 | #ifdef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE |
109 | #define _raw_read_unlock_irqrestore(lock, flags) \ |
110 | __raw_read_unlock_irqrestore(lock, flags) |
111 | #endif |
112 | |
113 | #ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE |
114 | #define _raw_write_unlock_irqrestore(lock, flags) \ |
115 | __raw_write_unlock_irqrestore(lock, flags) |
116 | #endif |
117 | |
118 | static inline int __raw_read_trylock(rwlock_t *lock) |
119 | { |
120 | preempt_disable(); |
121 | if (do_raw_read_trylock(lock)) { |
122 | rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_); |
123 | return 1; |
124 | } |
125 | preempt_enable(); |
126 | return 0; |
127 | } |
128 | |
129 | static inline int __raw_write_trylock(rwlock_t *lock) |
130 | { |
131 | preempt_disable(); |
132 | if (do_raw_write_trylock(lock)) { |
133 | rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_); |
134 | return 1; |
135 | } |
136 | preempt_enable(); |
137 | return 0; |
138 | } |
139 | |
140 | /* |
141 | * If lockdep is enabled then we use the non-preemption spin-ops |
142 | * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are |
143 | * not re-enabled during lock-acquire (which the preempt-spin-ops do): |
144 | */ |
145 | #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) |
146 | |
147 | static inline void __raw_read_lock(rwlock_t *lock) |
148 | { |
149 | preempt_disable(); |
150 | rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); |
151 | LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock); |
152 | } |
153 | |
154 | static inline unsigned long __raw_read_lock_irqsave(rwlock_t *lock) |
155 | { |
156 | unsigned long flags; |
157 | |
158 | local_irq_save(flags); |
159 | preempt_disable(); |
160 | rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); |
161 | LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock); |
162 | return flags; |
163 | } |
164 | |
165 | static inline void __raw_read_lock_irq(rwlock_t *lock) |
166 | { |
167 | local_irq_disable(); |
168 | preempt_disable(); |
169 | rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); |
170 | LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock); |
171 | } |
172 | |
173 | static inline void __raw_read_lock_bh(rwlock_t *lock) |
174 | { |
175 | __local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET); |
176 | rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); |
177 | LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock); |
178 | } |
179 | |
180 | static inline unsigned long __raw_write_lock_irqsave(rwlock_t *lock) |
181 | { |
182 | unsigned long flags; |
183 | |
184 | local_irq_save(flags); |
185 | preempt_disable(); |
186 | rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); |
187 | LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock); |
188 | return flags; |
189 | } |
190 | |
191 | static inline void __raw_write_lock_irq(rwlock_t *lock) |
192 | { |
193 | local_irq_disable(); |
194 | preempt_disable(); |
195 | rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); |
196 | LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock); |
197 | } |
198 | |
199 | static inline void __raw_write_lock_bh(rwlock_t *lock) |
200 | { |
201 | __local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET); |
202 | rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); |
203 | LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock); |
204 | } |
205 | |
206 | static inline void __raw_write_lock(rwlock_t *lock) |
207 | { |
208 | preempt_disable(); |
209 | rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); |
210 | LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock); |
211 | } |
212 | |
213 | static inline void __raw_write_lock_nested(rwlock_t *lock, int subclass) |
214 | { |
215 | preempt_disable(); |
216 | rwlock_acquire(&lock->dep_map, subclass, 0, _RET_IP_); |
217 | LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock); |
218 | } |
219 | |
220 | #endif /* !CONFIG_GENERIC_LOCKBREAK || CONFIG_DEBUG_LOCK_ALLOC */ |
221 | |
222 | static inline void __raw_write_unlock(rwlock_t *lock) |
223 | { |
224 | rwlock_release(&lock->dep_map, _RET_IP_); |
225 | do_raw_write_unlock(lock); |
226 | preempt_enable(); |
227 | } |
228 | |
229 | static inline void __raw_read_unlock(rwlock_t *lock) |
230 | { |
231 | rwlock_release(&lock->dep_map, _RET_IP_); |
232 | do_raw_read_unlock(lock); |
233 | preempt_enable(); |
234 | } |
235 | |
236 | static inline void |
237 | __raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) |
238 | { |
239 | rwlock_release(&lock->dep_map, _RET_IP_); |
240 | do_raw_read_unlock(lock); |
241 | local_irq_restore(flags); |
242 | preempt_enable(); |
243 | } |
244 | |
245 | static inline void __raw_read_unlock_irq(rwlock_t *lock) |
246 | { |
247 | rwlock_release(&lock->dep_map, _RET_IP_); |
248 | do_raw_read_unlock(lock); |
249 | local_irq_enable(); |
250 | preempt_enable(); |
251 | } |
252 | |
253 | static inline void __raw_read_unlock_bh(rwlock_t *lock) |
254 | { |
255 | rwlock_release(&lock->dep_map, _RET_IP_); |
256 | do_raw_read_unlock(lock); |
257 | __local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET); |
258 | } |
259 | |
260 | static inline void __raw_write_unlock_irqrestore(rwlock_t *lock, |
261 | unsigned long flags) |
262 | { |
263 | rwlock_release(&lock->dep_map, _RET_IP_); |
264 | do_raw_write_unlock(lock); |
265 | local_irq_restore(flags); |
266 | preempt_enable(); |
267 | } |
268 | |
269 | static inline void __raw_write_unlock_irq(rwlock_t *lock) |
270 | { |
271 | rwlock_release(&lock->dep_map, _RET_IP_); |
272 | do_raw_write_unlock(lock); |
273 | local_irq_enable(); |
274 | preempt_enable(); |
275 | } |
276 | |
277 | static inline void __raw_write_unlock_bh(rwlock_t *lock) |
278 | { |
279 | rwlock_release(&lock->dep_map, _RET_IP_); |
280 | do_raw_write_unlock(lock); |
281 | __local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET); |
282 | } |
283 | |
284 | #endif /* __LINUX_RWLOCK_API_SMP_H */ |
285 | |