1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * Generic barrier definitions.
4 *
5 * It should be possible to use these on really simple architectures,
6 * but it serves more as a starting point for new ports.
7 *
8 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
9 * Written by David Howells (dhowells@redhat.com)
10 */
11#ifndef __ASM_GENERIC_BARRIER_H
12#define __ASM_GENERIC_BARRIER_H
13
14#ifndef __ASSEMBLY__
15
16#include <linux/compiler.h>
17#include <linux/kcsan-checks.h>
18#include <asm/rwonce.h>
19
20#ifndef nop
21#define nop() asm volatile ("nop")
22#endif
23
24/*
25 * Architectures that want generic instrumentation can define __ prefixed
26 * variants of all barriers.
27 */
28
29#ifdef __mb
30#define mb() do { kcsan_mb(); __mb(); } while (0)
31#endif
32
33#ifdef __rmb
34#define rmb() do { kcsan_rmb(); __rmb(); } while (0)
35#endif
36
37#ifdef __wmb
38#define wmb() do { kcsan_wmb(); __wmb(); } while (0)
39#endif
40
41#ifdef __dma_mb
42#define dma_mb() do { kcsan_mb(); __dma_mb(); } while (0)
43#endif
44
45#ifdef __dma_rmb
46#define dma_rmb() do { kcsan_rmb(); __dma_rmb(); } while (0)
47#endif
48
49#ifdef __dma_wmb
50#define dma_wmb() do { kcsan_wmb(); __dma_wmb(); } while (0)
51#endif
52
53/*
54 * Force strict CPU ordering. And yes, this is required on UP too when we're
55 * talking to devices.
56 *
57 * Fall back to compiler barriers if nothing better is provided.
58 */
59
60#ifndef mb
61#define mb() barrier()
62#endif
63
64#ifndef rmb
65#define rmb() mb()
66#endif
67
68#ifndef wmb
69#define wmb() mb()
70#endif
71
72#ifndef dma_mb
73#define dma_mb() mb()
74#endif
75
76#ifndef dma_rmb
77#define dma_rmb() rmb()
78#endif
79
80#ifndef dma_wmb
81#define dma_wmb() wmb()
82#endif
83
84#ifndef __smp_mb
85#define __smp_mb() mb()
86#endif
87
88#ifndef __smp_rmb
89#define __smp_rmb() rmb()
90#endif
91
92#ifndef __smp_wmb
93#define __smp_wmb() wmb()
94#endif
95
96#ifdef CONFIG_SMP
97
98#ifndef smp_mb
99#define smp_mb() do { kcsan_mb(); __smp_mb(); } while (0)
100#endif
101
102#ifndef smp_rmb
103#define smp_rmb() do { kcsan_rmb(); __smp_rmb(); } while (0)
104#endif
105
106#ifndef smp_wmb
107#define smp_wmb() do { kcsan_wmb(); __smp_wmb(); } while (0)
108#endif
109
110#else /* !CONFIG_SMP */
111
112#ifndef smp_mb
113#define smp_mb() barrier()
114#endif
115
116#ifndef smp_rmb
117#define smp_rmb() barrier()
118#endif
119
120#ifndef smp_wmb
121#define smp_wmb() barrier()
122#endif
123
124#endif /* CONFIG_SMP */
125
126#ifndef __smp_store_mb
127#define __smp_store_mb(var, value) do { WRITE_ONCE(var, value); __smp_mb(); } while (0)
128#endif
129
130#ifndef __smp_mb__before_atomic
131#define __smp_mb__before_atomic() __smp_mb()
132#endif
133
134#ifndef __smp_mb__after_atomic
135#define __smp_mb__after_atomic() __smp_mb()
136#endif
137
138#ifndef __smp_store_release
139#define __smp_store_release(p, v) \
140do { \
141 compiletime_assert_atomic_type(*p); \
142 __smp_mb(); \
143 WRITE_ONCE(*p, v); \
144} while (0)
145#endif
146
147#ifndef __smp_load_acquire
148#define __smp_load_acquire(p) \
149({ \
150 __unqual_scalar_typeof(*p) ___p1 = READ_ONCE(*p); \
151 compiletime_assert_atomic_type(*p); \
152 __smp_mb(); \
153 (typeof(*p))___p1; \
154})
155#endif
156
157#ifdef CONFIG_SMP
158
159#ifndef smp_store_mb
160#define smp_store_mb(var, value) do { kcsan_mb(); __smp_store_mb(var, value); } while (0)
161#endif
162
163#ifndef smp_mb__before_atomic
164#define smp_mb__before_atomic() do { kcsan_mb(); __smp_mb__before_atomic(); } while (0)
165#endif
166
167#ifndef smp_mb__after_atomic
168#define smp_mb__after_atomic() do { kcsan_mb(); __smp_mb__after_atomic(); } while (0)
169#endif
170
171#ifndef smp_store_release
172#define smp_store_release(p, v) do { kcsan_release(); __smp_store_release(p, v); } while (0)
173#endif
174
175#ifndef smp_load_acquire
176#define smp_load_acquire(p) __smp_load_acquire(p)
177#endif
178
179#else /* !CONFIG_SMP */
180
181#ifndef smp_store_mb
182#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0)
183#endif
184
185#ifndef smp_mb__before_atomic
186#define smp_mb__before_atomic() barrier()
187#endif
188
189#ifndef smp_mb__after_atomic
190#define smp_mb__after_atomic() barrier()
191#endif
192
193#ifndef smp_store_release
194#define smp_store_release(p, v) \
195do { \
196 compiletime_assert_atomic_type(*p); \
197 barrier(); \
198 WRITE_ONCE(*p, v); \
199} while (0)
200#endif
201
202#ifndef smp_load_acquire
203#define smp_load_acquire(p) \
204({ \
205 __unqual_scalar_typeof(*p) ___p1 = READ_ONCE(*p); \
206 compiletime_assert_atomic_type(*p); \
207 barrier(); \
208 (typeof(*p))___p1; \
209})
210#endif
211
212#endif /* CONFIG_SMP */
213
214/* Barriers for virtual machine guests when talking to an SMP host */
215#define virt_mb() do { kcsan_mb(); __smp_mb(); } while (0)
216#define virt_rmb() do { kcsan_rmb(); __smp_rmb(); } while (0)
217#define virt_wmb() do { kcsan_wmb(); __smp_wmb(); } while (0)
218#define virt_store_mb(var, value) do { kcsan_mb(); __smp_store_mb(var, value); } while (0)
219#define virt_mb__before_atomic() do { kcsan_mb(); __smp_mb__before_atomic(); } while (0)
220#define virt_mb__after_atomic() do { kcsan_mb(); __smp_mb__after_atomic(); } while (0)
221#define virt_store_release(p, v) do { kcsan_release(); __smp_store_release(p, v); } while (0)
222#define virt_load_acquire(p) __smp_load_acquire(p)
223
224/**
225 * smp_acquire__after_ctrl_dep() - Provide ACQUIRE ordering after a control dependency
226 *
227 * A control dependency provides a LOAD->STORE order, the additional RMB
228 * provides LOAD->LOAD order, together they provide LOAD->{LOAD,STORE} order,
229 * aka. (load)-ACQUIRE.
230 *
231 * Architectures that do not do load speculation can have this be barrier().
232 */
233#ifndef smp_acquire__after_ctrl_dep
234#define smp_acquire__after_ctrl_dep() smp_rmb()
235#endif
236
237/**
238 * smp_cond_load_relaxed() - (Spin) wait for cond with no ordering guarantees
239 * @ptr: pointer to the variable to wait on
240 * @cond: boolean expression to wait for
241 *
242 * Equivalent to using READ_ONCE() on the condition variable.
243 *
244 * Due to C lacking lambda expressions we load the value of *ptr into a
245 * pre-named variable @VAL to be used in @cond.
246 */
247#ifndef smp_cond_load_relaxed
248#define smp_cond_load_relaxed(ptr, cond_expr) ({ \
249 typeof(ptr) __PTR = (ptr); \
250 __unqual_scalar_typeof(*ptr) VAL; \
251 for (;;) { \
252 VAL = READ_ONCE(*__PTR); \
253 if (cond_expr) \
254 break; \
255 cpu_relax(); \
256 } \
257 (typeof(*ptr))VAL; \
258})
259#endif
260
261/**
262 * smp_cond_load_acquire() - (Spin) wait for cond with ACQUIRE ordering
263 * @ptr: pointer to the variable to wait on
264 * @cond: boolean expression to wait for
265 *
266 * Equivalent to using smp_load_acquire() on the condition variable but employs
267 * the control dependency of the wait to reduce the barrier on many platforms.
268 */
269#ifndef smp_cond_load_acquire
270#define smp_cond_load_acquire(ptr, cond_expr) ({ \
271 __unqual_scalar_typeof(*ptr) _val; \
272 _val = smp_cond_load_relaxed(ptr, cond_expr); \
273 smp_acquire__after_ctrl_dep(); \
274 (typeof(*ptr))_val; \
275})
276#endif
277
278/*
279 * pmem_wmb() ensures that all stores for which the modification
280 * are written to persistent storage by preceding instructions have
281 * updated persistent storage before any data access or data transfer
282 * caused by subsequent instructions is initiated.
283 */
284#ifndef pmem_wmb
285#define pmem_wmb() wmb()
286#endif
287
288/*
289 * ioremap_wc() maps I/O memory as memory with write-combining attributes. For
290 * this kind of memory accesses, the CPU may wait for prior accesses to be
291 * merged with subsequent ones. In some situation, such wait is bad for the
292 * performance. io_stop_wc() can be used to prevent the merging of
293 * write-combining memory accesses before this macro with those after it.
294 */
295#ifndef io_stop_wc
296#define io_stop_wc() do { } while (0)
297#endif
298
299#endif /* !__ASSEMBLY__ */
300#endif /* __ASM_GENERIC_BARRIER_H */
301

source code of linux/include/asm-generic/barrier.h