1/*
2 * Copyright (C) 2007-2008, 2010, 2012-2015 Apple Inc. All rights reserved.
3 * Copyright (C) 2007 Justin Haygood (jhaygood@reaktix.com)
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
15 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
16 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
17 * DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
18 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
19 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
20 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
21 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
23 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#ifndef Atomics_h
27#define Atomics_h
28
29#include <atomic>
30#include <wtf/StdLibExtras.h>
31
32#if OS(WINDOWS)
33#if !COMPILER(GCC_OR_CLANG)
34extern "C" void _ReadWriteBarrier(void);
35#pragma intrinsic(_ReadWriteBarrier)
36#endif
37#include <windows.h>
38#endif
39
40namespace WTF {
41
42// Atomic wraps around std::atomic with the sole purpose of making the compare_exchange
43// operations not alter the expected value. This is more in line with how we typically
44// use CAS in our code.
45//
46// Atomic is a struct without explicitly defined constructors so that it can be
47// initialized at compile time.
48
49template<typename T>
50struct Atomic {
51 // Don't pass a non-default value for the order parameter unless you really know
52 // what you are doing and have thought about it very hard. The cost of seq_cst
53 // is usually not high enough to justify the risk.
54
55 T load(std::memory_order order = std::memory_order_seq_cst) const { return value.load(order); }
56
57 void store(T desired, std::memory_order order = std::memory_order_seq_cst) { value.store(desired, order); }
58
59 bool compareExchangeWeak(T expected, T desired, std::memory_order order = std::memory_order_seq_cst)
60 {
61#if OS(WINDOWS)
62 // Windows makes strange assertions about the argument to compare_exchange_weak, and anyway,
63 // Windows is X86 so seq_cst is cheap.
64 order = std::memory_order_seq_cst;
65#endif
66 T expectedOrActual = expected;
67 return value.compare_exchange_weak(expectedOrActual, desired, order);
68 }
69
70 bool compareExchangeStrong(T expected, T desired, std::memory_order order = std::memory_order_seq_cst)
71 {
72#if OS(WINDOWS)
73 // See above.
74 order = std::memory_order_seq_cst;
75#endif
76 T expectedOrActual = expected;
77 return value.compare_exchange_strong(expectedOrActual, desired, order);
78 }
79
80 std::atomic<T> value;
81};
82
83// This is a weak CAS function that takes a direct pointer and has no portable fencing guarantees.
84template<typename T>
85inline bool weakCompareAndSwap(volatile T* location, T expected, T newValue)
86{
87 return bitwise_cast<Atomic<T>*>(location)->compareExchangeWeak(expected, newValue, std::memory_order_relaxed);
88}
89
90// Just a compiler fence. Has no effect on the hardware, but tells the compiler
91// not to move things around this call. Should not affect the compiler's ability
92// to do things like register allocation and code motion over pure operations.
93inline void compilerFence()
94{
95#if OS(WINDOWS) && !COMPILER(GCC_OR_CLANG)
96 _ReadWriteBarrier();
97#else
98 asm volatile("" ::: "memory");
99#endif
100}
101
102#if CPU(ARM_THUMB2) || CPU(ARM64)
103
104// Full memory fence. No accesses will float above this, and no accesses will sink
105// below it.
106inline void armV7_dmb()
107{
108 asm volatile("dmb sy" ::: "memory");
109}
110
111// Like the above, but only affects stores.
112inline void armV7_dmb_st()
113{
114 asm volatile("dmb st" ::: "memory");
115}
116
117inline void loadLoadFence() { armV7_dmb(); }
118inline void loadStoreFence() { armV7_dmb(); }
119inline void storeLoadFence() { armV7_dmb(); }
120inline void storeStoreFence() { armV7_dmb_st(); }
121inline void memoryBarrierAfterLock() { armV7_dmb(); }
122inline void memoryBarrierBeforeUnlock() { armV7_dmb(); }
123
124#elif CPU(X86) || CPU(X86_64)
125
126inline void x86_mfence()
127{
128#if OS(WINDOWS)
129 // I think that this does the equivalent of a dummy interlocked instruction,
130 // instead of using the 'mfence' instruction, at least according to MSDN. I
131 // know that it is equivalent for our purposes, but it would be good to
132 // investigate if that is actually better.
133 MemoryBarrier();
134#else
135 asm volatile("mfence" ::: "memory");
136#endif
137}
138
139inline void loadLoadFence() { compilerFence(); }
140inline void loadStoreFence() { compilerFence(); }
141inline void storeLoadFence() { x86_mfence(); }
142inline void storeStoreFence() { compilerFence(); }
143inline void memoryBarrierAfterLock() { compilerFence(); }
144inline void memoryBarrierBeforeUnlock() { compilerFence(); }
145
146#else
147
148inline void loadLoadFence() { compilerFence(); }
149inline void loadStoreFence() { compilerFence(); }
150inline void storeLoadFence() { compilerFence(); }
151inline void storeStoreFence() { compilerFence(); }
152inline void memoryBarrierAfterLock() { compilerFence(); }
153inline void memoryBarrierBeforeUnlock() { compilerFence(); }
154
155#endif
156
157} // namespace WTF
158
159using WTF::Atomic;
160
161#endif // Atomics_h
162