Warning: That file was not part of the compilation database. It may have many parsing errors.

1/* Copyright (C) 2003-2019 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3
4 The GNU C Library is free software; you can redistribute it and/or
5 modify it under the terms of the GNU Lesser General Public
6 License as published by the Free Software Foundation; either
7 version 2.1 of the License, or (at your option) any later version.
8
9 The GNU C Library is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 Lesser General Public License for more details.
13
14 You should have received a copy of the GNU Lesser General Public
15 License along with the GNU C Library. If not, see
16 <http://www.gnu.org/licenses/>. */
17
18#include <stdint.h>
19
20typedef int8_t atomic8_t;
21typedef uint8_t uatomic8_t;
22typedef int_fast8_t atomic_fast8_t;
23typedef uint_fast8_t uatomic_fast8_t;
24
25typedef int16_t atomic16_t;
26typedef uint16_t uatomic16_t;
27typedef int_fast16_t atomic_fast16_t;
28typedef uint_fast16_t uatomic_fast16_t;
29
30typedef int32_t atomic32_t;
31typedef uint32_t uatomic32_t;
32typedef int_fast32_t atomic_fast32_t;
33typedef uint_fast32_t uatomic_fast32_t;
34
35typedef int64_t atomic64_t;
36typedef uint64_t uatomic64_t;
37typedef int_fast64_t atomic_fast64_t;
38typedef uint_fast64_t uatomic_fast64_t;
39
40typedef intptr_t atomicptr_t;
41typedef uintptr_t uatomicptr_t;
42typedef intmax_t atomic_max_t;
43typedef uintmax_t uatomic_max_t;
44
45#define __HAVE_64B_ATOMICS 1
46#define USE_ATOMIC_COMPILER_BUILTINS 0
47
48/* XXX Is this actually correct? */
49#define ATOMIC_EXCHANGE_USES_CAS 1
50
51
52#ifdef UP
53# define __MB /* nothing */
54#else
55# define __MB " mb\n"
56#endif
57
58
59/* Compare and exchange. For all of the "xxx" routines, we expect a
60 "__prev" and a "__cmp" variable to be provided by the enclosing scope,
61 in which values are returned. */
62
63#define __arch_compare_and_exchange_xxx_8_int(mem, new, old, mb1, mb2) \
64({ \
65 unsigned long __tmp, __snew, __addr64; \
66 __asm__ __volatile__ ( \
67 mb1 \
68 " andnot %[__addr8],7,%[__addr64]\n" \
69 " insbl %[__new],%[__addr8],%[__snew]\n" \
70 "1: ldq_l %[__tmp],0(%[__addr64])\n" \
71 " extbl %[__tmp],%[__addr8],%[__prev]\n" \
72 " cmpeq %[__prev],%[__old],%[__cmp]\n" \
73 " beq %[__cmp],2f\n" \
74 " mskbl %[__tmp],%[__addr8],%[__tmp]\n" \
75 " or %[__snew],%[__tmp],%[__tmp]\n" \
76 " stq_c %[__tmp],0(%[__addr64])\n" \
77 " beq %[__tmp],1b\n" \
78 mb2 \
79 "2:" \
80 : [__prev] "=&r" (__prev), \
81 [__snew] "=&r" (__snew), \
82 [__tmp] "=&r" (__tmp), \
83 [__cmp] "=&r" (__cmp), \
84 [__addr64] "=&r" (__addr64) \
85 : [__addr8] "r" (mem), \
86 [__old] "Ir" ((uint64_t)(uint8_t)(uint64_t)(old)), \
87 [__new] "r" (new) \
88 : "memory"); \
89})
90
91#define __arch_compare_and_exchange_xxx_16_int(mem, new, old, mb1, mb2) \
92({ \
93 unsigned long __tmp, __snew, __addr64; \
94 __asm__ __volatile__ ( \
95 mb1 \
96 " andnot %[__addr16],7,%[__addr64]\n" \
97 " inswl %[__new],%[__addr16],%[__snew]\n" \
98 "1: ldq_l %[__tmp],0(%[__addr64])\n" \
99 " extwl %[__tmp],%[__addr16],%[__prev]\n" \
100 " cmpeq %[__prev],%[__old],%[__cmp]\n" \
101 " beq %[__cmp],2f\n" \
102 " mskwl %[__tmp],%[__addr16],%[__tmp]\n" \
103 " or %[__snew],%[__tmp],%[__tmp]\n" \
104 " stq_c %[__tmp],0(%[__addr64])\n" \
105 " beq %[__tmp],1b\n" \
106 mb2 \
107 "2:" \
108 : [__prev] "=&r" (__prev), \
109 [__snew] "=&r" (__snew), \
110 [__tmp] "=&r" (__tmp), \
111 [__cmp] "=&r" (__cmp), \
112 [__addr64] "=&r" (__addr64) \
113 : [__addr16] "r" (mem), \
114 [__old] "Ir" ((uint64_t)(uint16_t)(uint64_t)(old)), \
115 [__new] "r" (new) \
116 : "memory"); \
117})
118
119#define __arch_compare_and_exchange_xxx_32_int(mem, new, old, mb1, mb2) \
120({ \
121 __asm__ __volatile__ ( \
122 mb1 \
123 "1: ldl_l %[__prev],%[__mem]\n" \
124 " cmpeq %[__prev],%[__old],%[__cmp]\n" \
125 " beq %[__cmp],2f\n" \
126 " mov %[__new],%[__cmp]\n" \
127 " stl_c %[__cmp],%[__mem]\n" \
128 " beq %[__cmp],1b\n" \
129 mb2 \
130 "2:" \
131 : [__prev] "=&r" (__prev), \
132 [__cmp] "=&r" (__cmp) \
133 : [__mem] "m" (*(mem)), \
134 [__old] "Ir" ((uint64_t)(atomic32_t)(uint64_t)(old)), \
135 [__new] "Ir" (new) \
136 : "memory"); \
137})
138
139#define __arch_compare_and_exchange_xxx_64_int(mem, new, old, mb1, mb2) \
140({ \
141 __asm__ __volatile__ ( \
142 mb1 \
143 "1: ldq_l %[__prev],%[__mem]\n" \
144 " cmpeq %[__prev],%[__old],%[__cmp]\n" \
145 " beq %[__cmp],2f\n" \
146 " mov %[__new],%[__cmp]\n" \
147 " stq_c %[__cmp],%[__mem]\n" \
148 " beq %[__cmp],1b\n" \
149 mb2 \
150 "2:" \
151 : [__prev] "=&r" (__prev), \
152 [__cmp] "=&r" (__cmp) \
153 : [__mem] "m" (*(mem)), \
154 [__old] "Ir" ((uint64_t)(old)), \
155 [__new] "Ir" (new) \
156 : "memory"); \
157})
158
159/* For all "bool" routines, we return FALSE if exchange succesful. */
160
161#define __arch_compare_and_exchange_bool_8_int(mem, new, old, mb1, mb2) \
162({ unsigned long __prev; int __cmp; \
163 __arch_compare_and_exchange_xxx_8_int(mem, new, old, mb1, mb2); \
164 !__cmp; })
165
166#define __arch_compare_and_exchange_bool_16_int(mem, new, old, mb1, mb2) \
167({ unsigned long __prev; int __cmp; \
168 __arch_compare_and_exchange_xxx_16_int(mem, new, old, mb1, mb2); \
169 !__cmp; })
170
171#define __arch_compare_and_exchange_bool_32_int(mem, new, old, mb1, mb2) \
172({ unsigned long __prev; int __cmp; \
173 __arch_compare_and_exchange_xxx_32_int(mem, new, old, mb1, mb2); \
174 !__cmp; })
175
176#define __arch_compare_and_exchange_bool_64_int(mem, new, old, mb1, mb2) \
177({ unsigned long __prev; int __cmp; \
178 __arch_compare_and_exchange_xxx_64_int(mem, new, old, mb1, mb2); \
179 !__cmp; })
180
181/* For all "val" routines, return the old value whether exchange
182 successful or not. */
183
184#define __arch_compare_and_exchange_val_8_int(mem, new, old, mb1, mb2) \
185({ unsigned long __prev; int __cmp; \
186 __arch_compare_and_exchange_xxx_8_int(mem, new, old, mb1, mb2); \
187 (typeof (*mem))__prev; })
188
189#define __arch_compare_and_exchange_val_16_int(mem, new, old, mb1, mb2) \
190({ unsigned long __prev; int __cmp; \
191 __arch_compare_and_exchange_xxx_16_int(mem, new, old, mb1, mb2); \
192 (typeof (*mem))__prev; })
193
194#define __arch_compare_and_exchange_val_32_int(mem, new, old, mb1, mb2) \
195({ unsigned long __prev; int __cmp; \
196 __arch_compare_and_exchange_xxx_32_int(mem, new, old, mb1, mb2); \
197 (typeof (*mem))__prev; })
198
199#define __arch_compare_and_exchange_val_64_int(mem, new, old, mb1, mb2) \
200({ unsigned long __prev; int __cmp; \
201 __arch_compare_and_exchange_xxx_64_int(mem, new, old, mb1, mb2); \
202 (typeof (*mem))__prev; })
203
204/* Compare and exchange with "acquire" semantics, ie barrier after. */
205
206#define atomic_compare_and_exchange_bool_acq(mem, new, old) \
207 __atomic_bool_bysize (__arch_compare_and_exchange_bool, int, \
208 mem, new, old, "", __MB)
209
210#define atomic_compare_and_exchange_val_acq(mem, new, old) \
211 __atomic_val_bysize (__arch_compare_and_exchange_val, int, \
212 mem, new, old, "", __MB)
213
214/* Compare and exchange with "release" semantics, ie barrier before. */
215
216#define atomic_compare_and_exchange_val_rel(mem, new, old) \
217 __atomic_val_bysize (__arch_compare_and_exchange_val, int, \
218 mem, new, old, __MB, "")
219
220
221/* Atomically store value and return the previous value. */
222
223#define __arch_exchange_8_int(mem, value, mb1, mb2) \
224({ \
225 unsigned long __tmp, __addr64, __sval; __typeof(*mem) __ret; \
226 __asm__ __volatile__ ( \
227 mb1 \
228 " andnot %[__addr8],7,%[__addr64]\n" \
229 " insbl %[__value],%[__addr8],%[__sval]\n" \
230 "1: ldq_l %[__tmp],0(%[__addr64])\n" \
231 " extbl %[__tmp],%[__addr8],%[__ret]\n" \
232 " mskbl %[__tmp],%[__addr8],%[__tmp]\n" \
233 " or %[__sval],%[__tmp],%[__tmp]\n" \
234 " stq_c %[__tmp],0(%[__addr64])\n" \
235 " beq %[__tmp],1b\n" \
236 mb2 \
237 : [__ret] "=&r" (__ret), \
238 [__sval] "=&r" (__sval), \
239 [__tmp] "=&r" (__tmp), \
240 [__addr64] "=&r" (__addr64) \
241 : [__addr8] "r" (mem), \
242 [__value] "r" (value) \
243 : "memory"); \
244 __ret; })
245
246#define __arch_exchange_16_int(mem, value, mb1, mb2) \
247({ \
248 unsigned long __tmp, __addr64, __sval; __typeof(*mem) __ret; \
249 __asm__ __volatile__ ( \
250 mb1 \
251 " andnot %[__addr16],7,%[__addr64]\n" \
252 " inswl %[__value],%[__addr16],%[__sval]\n" \
253 "1: ldq_l %[__tmp],0(%[__addr64])\n" \
254 " extwl %[__tmp],%[__addr16],%[__ret]\n" \
255 " mskwl %[__tmp],%[__addr16],%[__tmp]\n" \
256 " or %[__sval],%[__tmp],%[__tmp]\n" \
257 " stq_c %[__tmp],0(%[__addr64])\n" \
258 " beq %[__tmp],1b\n" \
259 mb2 \
260 : [__ret] "=&r" (__ret), \
261 [__sval] "=&r" (__sval), \
262 [__tmp] "=&r" (__tmp), \
263 [__addr64] "=&r" (__addr64) \
264 : [__addr16] "r" (mem), \
265 [__value] "r" (value) \
266 : "memory"); \
267 __ret; })
268
269#define __arch_exchange_32_int(mem, value, mb1, mb2) \
270({ \
271 signed int __tmp; __typeof(*mem) __ret; \
272 __asm__ __volatile__ ( \
273 mb1 \
274 "1: ldl_l %[__ret],%[__mem]\n" \
275 " mov %[__val],%[__tmp]\n" \
276 " stl_c %[__tmp],%[__mem]\n" \
277 " beq %[__tmp],1b\n" \
278 mb2 \
279 : [__ret] "=&r" (__ret), \
280 [__tmp] "=&r" (__tmp) \
281 : [__mem] "m" (*(mem)), \
282 [__val] "Ir" (value) \
283 : "memory"); \
284 __ret; })
285
286#define __arch_exchange_64_int(mem, value, mb1, mb2) \
287({ \
288 unsigned long __tmp; __typeof(*mem) __ret; \
289 __asm__ __volatile__ ( \
290 mb1 \
291 "1: ldq_l %[__ret],%[__mem]\n" \
292 " mov %[__val],%[__tmp]\n" \
293 " stq_c %[__tmp],%[__mem]\n" \
294 " beq %[__tmp],1b\n" \
295 mb2 \
296 : [__ret] "=&r" (__ret), \
297 [__tmp] "=&r" (__tmp) \
298 : [__mem] "m" (*(mem)), \
299 [__val] "Ir" (value) \
300 : "memory"); \
301 __ret; })
302
303#define atomic_exchange_acq(mem, value) \
304 __atomic_val_bysize (__arch_exchange, int, mem, value, "", __MB)
305
306#define atomic_exchange_rel(mem, value) \
307 __atomic_val_bysize (__arch_exchange, int, mem, value, __MB, "")
308
309
310/* Atomically add value and return the previous (unincremented) value. */
311
312#define __arch_exchange_and_add_8_int(mem, value, mb1, mb2) \
313 ({ __builtin_trap (); 0; })
314
315#define __arch_exchange_and_add_16_int(mem, value, mb1, mb2) \
316 ({ __builtin_trap (); 0; })
317
318#define __arch_exchange_and_add_32_int(mem, value, mb1, mb2) \
319({ \
320 signed int __tmp; __typeof(*mem) __ret; \
321 __asm__ __volatile__ ( \
322 mb1 \
323 "1: ldl_l %[__ret],%[__mem]\n" \
324 " addl %[__ret],%[__val],%[__tmp]\n" \
325 " stl_c %[__tmp],%[__mem]\n" \
326 " beq %[__tmp],1b\n" \
327 mb2 \
328 : [__ret] "=&r" (__ret), \
329 [__tmp] "=&r" (__tmp) \
330 : [__mem] "m" (*(mem)), \
331 [__val] "Ir" ((signed int)(value)) \
332 : "memory"); \
333 __ret; })
334
335#define __arch_exchange_and_add_64_int(mem, value, mb1, mb2) \
336({ \
337 unsigned long __tmp; __typeof(*mem) __ret; \
338 __asm__ __volatile__ ( \
339 mb1 \
340 "1: ldq_l %[__ret],%[__mem]\n" \
341 " addq %[__ret],%[__val],%[__tmp]\n" \
342 " stq_c %[__tmp],%[__mem]\n" \
343 " beq %[__tmp],1b\n" \
344 mb2 \
345 : [__ret] "=&r" (__ret), \
346 [__tmp] "=&r" (__tmp) \
347 : [__mem] "m" (*(mem)), \
348 [__val] "Ir" ((unsigned long)(value)) \
349 : "memory"); \
350 __ret; })
351
352/* ??? Barrier semantics for atomic_exchange_and_add appear to be
353 undefined. Use full barrier for now, as that's safe. */
354#define atomic_exchange_and_add(mem, value) \
355 __atomic_val_bysize (__arch_exchange_and_add, int, mem, value, __MB, __MB)
356
357
358/* ??? Blah, I'm lazy. Implement these later. Can do better than the
359 compare-and-exchange loop provided by generic code.
360
361#define atomic_decrement_if_positive(mem)
362#define atomic_bit_test_set(mem, bit)
363
364*/
365
366#ifndef UP
367# define atomic_full_barrier() __asm ("mb" : : : "memory");
368# define atomic_read_barrier() __asm ("mb" : : : "memory");
369# define atomic_write_barrier() __asm ("wmb" : : : "memory");
370#endif
371

Warning: That file was not part of the compilation database. It may have many parsing errors.