1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
2 | /* |
3 | * atomic64_t for 586+ |
4 | * |
5 | * Copyright © 2010 Luca Barbieri |
6 | */ |
7 | |
8 | #include <linux/linkage.h> |
9 | #include <asm/alternative.h> |
10 | |
11 | .macro read64 reg |
12 | movl %ebx, %eax |
13 | movl %ecx, %edx |
14 | /* we need LOCK_PREFIX since otherwise cmpxchg8b always does the write */ |
15 | LOCK_PREFIX |
16 | cmpxchg8b (\reg) |
17 | .endm |
18 | |
19 | SYM_FUNC_START(atomic64_read_cx8) |
20 | read64 %ecx |
21 | RET |
22 | SYM_FUNC_END(atomic64_read_cx8) |
23 | |
24 | SYM_FUNC_START(atomic64_set_cx8) |
25 | 1: |
26 | /* we don't need LOCK_PREFIX since aligned 64-bit writes |
27 | * are atomic on 586 and newer */ |
28 | cmpxchg8b (%esi) |
29 | jne 1b |
30 | |
31 | RET |
32 | SYM_FUNC_END(atomic64_set_cx8) |
33 | |
34 | SYM_FUNC_START(atomic64_xchg_cx8) |
35 | 1: |
36 | LOCK_PREFIX |
37 | cmpxchg8b (%esi) |
38 | jne 1b |
39 | |
40 | RET |
41 | SYM_FUNC_END(atomic64_xchg_cx8) |
42 | |
43 | .macro addsub_return func ins insc |
44 | SYM_FUNC_START(atomic64_\func\()_return_cx8) |
45 | pushl %ebp |
46 | pushl %ebx |
47 | pushl %esi |
48 | pushl %edi |
49 | |
50 | movl %eax, %esi |
51 | movl %edx, %edi |
52 | movl %ecx, %ebp |
53 | |
54 | read64 %ecx |
55 | 1: |
56 | movl %eax, %ebx |
57 | movl %edx, %ecx |
58 | \ins\()l %esi, %ebx |
59 | \insc\()l %edi, %ecx |
60 | LOCK_PREFIX |
61 | cmpxchg8b (%ebp) |
62 | jne 1b |
63 | |
64 | 10: |
65 | movl %ebx, %eax |
66 | movl %ecx, %edx |
67 | popl %edi |
68 | popl %esi |
69 | popl %ebx |
70 | popl %ebp |
71 | RET |
72 | SYM_FUNC_END(atomic64_\func\()_return_cx8) |
73 | .endm |
74 | |
75 | addsub_return add add adc |
76 | addsub_return sub sub sbb |
77 | |
78 | .macro incdec_return func ins insc |
79 | SYM_FUNC_START(atomic64_\func\()_return_cx8) |
80 | pushl %ebx |
81 | |
82 | read64 %esi |
83 | 1: |
84 | movl %eax, %ebx |
85 | movl %edx, %ecx |
86 | \ins\()l $1, %ebx |
87 | \insc\()l $0, %ecx |
88 | LOCK_PREFIX |
89 | cmpxchg8b (%esi) |
90 | jne 1b |
91 | |
92 | 10: |
93 | movl %ebx, %eax |
94 | movl %ecx, %edx |
95 | popl %ebx |
96 | RET |
97 | SYM_FUNC_END(atomic64_\func\()_return_cx8) |
98 | .endm |
99 | |
100 | incdec_return inc add adc |
101 | incdec_return dec sub sbb |
102 | |
103 | SYM_FUNC_START(atomic64_dec_if_positive_cx8) |
104 | pushl %ebx |
105 | |
106 | read64 %esi |
107 | 1: |
108 | movl %eax, %ebx |
109 | movl %edx, %ecx |
110 | subl $1, %ebx |
111 | sbb $0, %ecx |
112 | js 2f |
113 | LOCK_PREFIX |
114 | cmpxchg8b (%esi) |
115 | jne 1b |
116 | |
117 | 2: |
118 | movl %ebx, %eax |
119 | movl %ecx, %edx |
120 | popl %ebx |
121 | RET |
122 | SYM_FUNC_END(atomic64_dec_if_positive_cx8) |
123 | |
124 | SYM_FUNC_START(atomic64_add_unless_cx8) |
125 | pushl %ebp |
126 | pushl %ebx |
127 | /* these just push these two parameters on the stack */ |
128 | pushl %edi |
129 | pushl %ecx |
130 | |
131 | movl %eax, %ebp |
132 | movl %edx, %edi |
133 | |
134 | read64 %esi |
135 | 1: |
136 | cmpl %eax, 0(%esp) |
137 | je 4f |
138 | 2: |
139 | movl %eax, %ebx |
140 | movl %edx, %ecx |
141 | addl %ebp, %ebx |
142 | adcl %edi, %ecx |
143 | LOCK_PREFIX |
144 | cmpxchg8b (%esi) |
145 | jne 1b |
146 | |
147 | movl $1, %eax |
148 | 3: |
149 | addl $8, %esp |
150 | popl %ebx |
151 | popl %ebp |
152 | RET |
153 | 4: |
154 | cmpl %edx, 4(%esp) |
155 | jne 2b |
156 | xorl %eax, %eax |
157 | jmp 3b |
158 | SYM_FUNC_END(atomic64_add_unless_cx8) |
159 | |
160 | SYM_FUNC_START(atomic64_inc_not_zero_cx8) |
161 | pushl %ebx |
162 | |
163 | read64 %esi |
164 | 1: |
165 | movl %eax, %ecx |
166 | orl %edx, %ecx |
167 | jz 3f |
168 | movl %eax, %ebx |
169 | xorl %ecx, %ecx |
170 | addl $1, %ebx |
171 | adcl %edx, %ecx |
172 | LOCK_PREFIX |
173 | cmpxchg8b (%esi) |
174 | jne 1b |
175 | |
176 | movl $1, %eax |
177 | 3: |
178 | popl %ebx |
179 | RET |
180 | SYM_FUNC_END(atomic64_inc_not_zero_cx8) |
181 | |