1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef _ASM_GENERIC_LOCAL64_H |
3 | #define _ASM_GENERIC_LOCAL64_H |
4 | |
5 | #include <linux/percpu.h> |
6 | #include <asm/types.h> |
7 | |
8 | /* |
9 | * A signed long type for operations which are atomic for a single CPU. |
10 | * Usually used in combination with per-cpu variables. |
11 | * |
12 | * This is the default implementation, which uses atomic64_t. Which is |
13 | * rather pointless. The whole point behind local64_t is that some processors |
14 | * can perform atomic adds and subtracts in a manner which is atomic wrt IRQs |
15 | * running on this CPU. local64_t allows exploitation of such capabilities. |
16 | */ |
17 | |
18 | /* Implement in terms of atomics. */ |
19 | |
20 | #if BITS_PER_LONG == 64 |
21 | |
22 | #include <asm/local.h> |
23 | |
24 | typedef struct { |
25 | local_t a; |
26 | } local64_t; |
27 | |
28 | #define LOCAL64_INIT(i) { LOCAL_INIT(i) } |
29 | |
30 | #define local64_read(l) local_read(&(l)->a) |
31 | #define local64_set(l,i) local_set((&(l)->a),(i)) |
32 | #define local64_inc(l) local_inc(&(l)->a) |
33 | #define local64_dec(l) local_dec(&(l)->a) |
34 | #define local64_add(i,l) local_add((i),(&(l)->a)) |
35 | #define local64_sub(i,l) local_sub((i),(&(l)->a)) |
36 | |
37 | #define local64_sub_and_test(i, l) local_sub_and_test((i), (&(l)->a)) |
38 | #define local64_dec_and_test(l) local_dec_and_test(&(l)->a) |
39 | #define local64_inc_and_test(l) local_inc_and_test(&(l)->a) |
40 | #define local64_add_negative(i, l) local_add_negative((i), (&(l)->a)) |
41 | #define local64_add_return(i, l) local_add_return((i), (&(l)->a)) |
42 | #define local64_sub_return(i, l) local_sub_return((i), (&(l)->a)) |
43 | #define local64_inc_return(l) local_inc_return(&(l)->a) |
44 | |
45 | static inline s64 local64_cmpxchg(local64_t *l, s64 old, s64 new) |
46 | { |
47 | return local_cmpxchg(l: &l->a, old, new); |
48 | } |
49 | |
50 | static inline bool local64_try_cmpxchg(local64_t *l, s64 *old, s64 new) |
51 | { |
52 | return local_try_cmpxchg(l: &l->a, old: (long *)old, new); |
53 | } |
54 | |
55 | #define local64_xchg(l, n) local_xchg((&(l)->a), (n)) |
56 | #define local64_add_unless(l, _a, u) local_add_unless((&(l)->a), (_a), (u)) |
57 | #define local64_inc_not_zero(l) local_inc_not_zero(&(l)->a) |
58 | |
59 | /* Non-atomic variants, ie. preemption disabled and won't be touched |
60 | * in interrupt, etc. Some archs can optimize this case well. */ |
61 | #define __local64_inc(l) local64_set((l), local64_read(l) + 1) |
62 | #define __local64_dec(l) local64_set((l), local64_read(l) - 1) |
63 | #define __local64_add(i,l) local64_set((l), local64_read(l) + (i)) |
64 | #define __local64_sub(i,l) local64_set((l), local64_read(l) - (i)) |
65 | |
66 | #else /* BITS_PER_LONG != 64 */ |
67 | |
68 | #include <linux/atomic.h> |
69 | |
70 | /* Don't use typedef: don't want them to be mixed with atomic_t's. */ |
71 | typedef struct { |
72 | atomic64_t a; |
73 | } local64_t; |
74 | |
75 | #define LOCAL64_INIT(i) { ATOMIC_LONG_INIT(i) } |
76 | |
77 | #define local64_read(l) atomic64_read(&(l)->a) |
78 | #define local64_set(l,i) atomic64_set((&(l)->a),(i)) |
79 | #define local64_inc(l) atomic64_inc(&(l)->a) |
80 | #define local64_dec(l) atomic64_dec(&(l)->a) |
81 | #define local64_add(i,l) atomic64_add((i),(&(l)->a)) |
82 | #define local64_sub(i,l) atomic64_sub((i),(&(l)->a)) |
83 | |
84 | #define local64_sub_and_test(i, l) atomic64_sub_and_test((i), (&(l)->a)) |
85 | #define local64_dec_and_test(l) atomic64_dec_and_test(&(l)->a) |
86 | #define local64_inc_and_test(l) atomic64_inc_and_test(&(l)->a) |
87 | #define local64_add_negative(i, l) atomic64_add_negative((i), (&(l)->a)) |
88 | #define local64_add_return(i, l) atomic64_add_return((i), (&(l)->a)) |
89 | #define local64_sub_return(i, l) atomic64_sub_return((i), (&(l)->a)) |
90 | #define local64_inc_return(l) atomic64_inc_return(&(l)->a) |
91 | |
92 | #define local64_cmpxchg(l, o, n) atomic64_cmpxchg((&(l)->a), (o), (n)) |
93 | #define local64_try_cmpxchg(l, po, n) atomic64_try_cmpxchg((&(l)->a), (po), (n)) |
94 | #define local64_xchg(l, n) atomic64_xchg((&(l)->a), (n)) |
95 | #define local64_add_unless(l, _a, u) atomic64_add_unless((&(l)->a), (_a), (u)) |
96 | #define local64_inc_not_zero(l) atomic64_inc_not_zero(&(l)->a) |
97 | |
98 | /* Non-atomic variants, ie. preemption disabled and won't be touched |
99 | * in interrupt, etc. Some archs can optimize this case well. */ |
100 | #define __local64_inc(l) local64_set((l), local64_read(l) + 1) |
101 | #define __local64_dec(l) local64_set((l), local64_read(l) - 1) |
102 | #define __local64_add(i,l) local64_set((l), local64_read(l) + (i)) |
103 | #define __local64_sub(i,l) local64_set((l), local64_read(l) - (i)) |
104 | |
105 | #endif /* BITS_PER_LONG != 64 */ |
106 | |
107 | #endif /* _ASM_GENERIC_LOCAL64_H */ |
108 | |