1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef __LINUX_LOCKREF_H |
3 | #define __LINUX_LOCKREF_H |
4 | |
5 | /* |
6 | * Locked reference counts. |
7 | * |
8 | * These are different from just plain atomic refcounts in that they |
9 | * are atomic with respect to the spinlock that goes with them. In |
10 | * particular, there can be implementations that don't actually get |
11 | * the spinlock for the common decrement/increment operations, but they |
12 | * still have to check that the operation is done semantically as if |
13 | * the spinlock had been taken (using a cmpxchg operation that covers |
14 | * both the lock and the count word, or using memory transactions, for |
15 | * example). |
16 | */ |
17 | |
18 | #include <linux/spinlock.h> |
19 | #include <generated/bounds.h> |
20 | |
21 | #define USE_CMPXCHG_LOCKREF \ |
22 | (IS_ENABLED(CONFIG_ARCH_USE_CMPXCHG_LOCKREF) && \ |
23 | IS_ENABLED(CONFIG_SMP) && SPINLOCK_SIZE <= 4) |
24 | |
25 | struct lockref { |
26 | union { |
27 | #if USE_CMPXCHG_LOCKREF |
28 | aligned_u64 lock_count; |
29 | #endif |
30 | struct { |
31 | spinlock_t lock; |
32 | int count; |
33 | }; |
34 | }; |
35 | }; |
36 | |
37 | extern void lockref_get(struct lockref *); |
38 | extern int lockref_put_return(struct lockref *); |
39 | extern int lockref_get_not_zero(struct lockref *); |
40 | extern int lockref_put_not_zero(struct lockref *); |
41 | extern int lockref_put_or_lock(struct lockref *); |
42 | |
43 | extern void lockref_mark_dead(struct lockref *); |
44 | extern int lockref_get_not_dead(struct lockref *); |
45 | |
46 | /* Must be called under spinlock for reliable results */ |
47 | static inline bool __lockref_is_dead(const struct lockref *l) |
48 | { |
49 | return ((int)l->count < 0); |
50 | } |
51 | |
52 | #endif /* __LINUX_LOCKREF_H */ |
53 | |