/root/doris/be/src/gutil/atomicops.h
| Line | Count | Source (jump to first uncovered line) | 
| 1 |  | // Copyright 2003 Google Inc. | 
| 2 |  | // All Rights Reserved. | 
| 3 |  | // | 
| 4 |  |  | 
| 5 |  | // For atomic operations on statistics counters, see atomic_stats_counter.h. | 
| 6 |  | // For atomic operations on sequence numbers, see atomic_sequence_num.h. | 
| 7 |  | // For atomic operations on reference counts, see atomic_refcount.h. | 
| 8 |  |  | 
| 9 |  | // Some fast atomic operations -- typically with machine-dependent | 
| 10 |  | // implementations.  This file may need editing as Google code is | 
| 11 |  | // ported to different architectures. | 
| 12 |  |  | 
| 13 |  | // The routines exported by this module are subtle.  If you use them, even if | 
| 14 |  | // you get the code right, it will depend on careful reasoning about atomicity | 
| 15 |  | // and memory ordering; it will be less readable, and harder to maintain.  If | 
| 16 |  | // you plan to use these routines, you should have a good reason, such as solid | 
| 17 |  | // evidence that performance would otherwise suffer, or there being no | 
| 18 |  | // alternative.  You should assume only properties explicitly guaranteed by the | 
| 19 |  | // specifications in this file.  You are almost certainly _not_ writing code | 
| 20 |  | // just for the x86; if you assume x86 semantics, x86 hardware bugs and | 
| 21 |  | // implementations on other architectures will cause your code to break.  If you | 
| 22 |  | // do not know what you are doing, avoid these routines, and use a Mutex. | 
| 23 |  | // | 
| 24 |  | // These following lower-level operations are typically useful only to people | 
| 25 |  | // implementing higher-level synchronization operations like spinlocks, | 
| 26 |  | // mutexes, and condition-variables.  They combine CompareAndSwap(), | 
| 27 |  | // addition, exchange, a load, or a store with appropriate memory-ordering | 
| 28 |  | // instructions.  "Acquire" operations ensure that no later memory access by | 
| 29 |  | // the same thread can be reordered ahead of the operation.  "Release" | 
| 30 |  | // operations ensure that no previous memory access by the same thread can be | 
| 31 |  | // reordered after the operation.  "Barrier" operations have both "Acquire" and | 
| 32 |  | // "Release" semantics.  A MemoryBarrier() has "Barrier" semantics, but does no | 
| 33 |  | // memory access.  "NoBarrier" operations have no barrier:  the CPU is | 
| 34 |  | // permitted to reorder them freely (as seen by other threads), even in ways | 
| 35 |  | // the appear to violate functional dependence, just as it can for any normal | 
| 36 |  | // variable access. | 
| 37 |  | // | 
| 38 |  | // It is incorrect to make direct assignments to/from an atomic variable. | 
| 39 |  | // You should use one of the Load or Store routines.  The NoBarrier | 
| 40 |  | // versions are provided when no barriers are needed: | 
| 41 |  | //   NoBarrier_Store() | 
| 42 |  | //   NoBarrier_Load() | 
| 43 |  | // Although there are currently no compiler enforcement, you are encouraged | 
| 44 |  | // to use these.  Moreover, if you choose to use base::subtle::Atomic64 type, | 
| 45 |  | // you MUST use one of the Load or Store routines to get correct behavior | 
| 46 |  | // on 32-bit platforms. | 
| 47 |  | // | 
| 48 |  | // The intent is eventually to put all of these routines in namespace | 
| 49 |  | // base::subtle | 
| 50 |  |  | 
| 51 |  | #pragma once | 
| 52 |  |  | 
| 53 |  | #include <stdint.h> | 
| 54 |  |  | 
| 55 |  | // ------------------------------------------------------------------------ | 
| 56 |  | // Include the platform specific implementations of the types | 
| 57 |  | // and operations listed below.  Implementations are to provide Atomic32 | 
| 58 |  | // and Atomic64 operations. If there is a mismatch between intptr_t and | 
| 59 |  | // the Atomic32 or Atomic64 types for a platform, the platform-specific header | 
| 60 |  | // should define the macro, AtomicWordCastType in a clause similar to the | 
| 61 |  | // following: | 
| 62 |  | // #if ...pointers are 64 bits... | 
| 63 |  | // # define AtomicWordCastType base::subtle::Atomic64 | 
| 64 |  | // #else | 
| 65 |  | // # define AtomicWordCastType Atomic32 | 
| 66 |  | // #endif | 
| 67 |  | // ------------------------------------------------------------------------ | 
| 68 |  |  | 
| 69 |  | #define GUTILS_GCC_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) | 
| 70 |  |  | 
| 71 |  | #define GUTILS_CLANG_VERSION \ | 
| 72 |  |     (__clang_major__ * 10000 + __clang_minor__ * 100 + __clang_patchlevel__) | 
| 73 |  |  | 
| 74 |  | // ThreadSanitizer provides own implementation of atomicops. | 
| 75 |  | #if defined(THREAD_SANITIZER) | 
| 76 |  | #include "gutil/atomicops-internals-tsan.h" // IWYU pragma: export | 
| 77 |  | #elif defined(__GNUC__) && (defined(__i386) || defined(__x86_64__)) | 
| 78 |  | #include "gutil/atomicops-internals-x86.h" // IWYU pragma: export | 
| 79 |  | #elif defined(__GNUC__) && GUTILS_GCC_VERSION >= 40700 | 
| 80 |  | #include "gutil/atomicops-internals-gcc.h" // IWYU pragma: export | 
| 81 |  | #elif defined(__clang__) && GUTILS_CLANG_VERSION >= 30400 | 
| 82 |  | #include "gutil/atomicops-internals-gcc.h" // IWYU pragma: export | 
| 83 |  | #else | 
| 84 |  | #error You need to implement atomic operations for this architecture | 
| 85 |  | #endif | 
| 86 |  |  | 
| 87 |  | // Signed type that can hold a pointer and supports the atomic ops below, as | 
| 88 |  | // well as atomic loads and stores.  Instances must be naturally-aligned. | 
| 89 |  | typedef intptr_t AtomicWord; | 
| 90 |  |  | 
| 91 |  | #ifdef AtomicWordCastType | 
| 92 |  | // ------------------------------------------------------------------------ | 
| 93 |  | // This section is needed only when explicit type casting is required to | 
| 94 |  | // cast AtomicWord to one of the basic atomic types (Atomic64 or Atomic32). | 
| 95 |  | // It also serves to document the AtomicWord interface. | 
| 96 |  | // ------------------------------------------------------------------------ | 
| 97 |  |  | 
| 98 |  | namespace base { | 
| 99 |  | namespace subtle { | 
| 100 |  |  | 
| 101 |  | // Atomically execute: | 
| 102 |  | //      result = *ptr; | 
| 103 |  | //      if (*ptr == old_value) | 
| 104 |  | //        *ptr = new_value; | 
| 105 |  | //      return result; | 
| 106 |  | // | 
| 107 |  | // I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value". | 
| 108 |  | // Always return the old value of "*ptr" | 
| 109 |  | // | 
| 110 |  | // This routine implies no memory barriers. | 
| 111 |  | inline AtomicWord NoBarrier_CompareAndSwap(volatile AtomicWord* ptr, AtomicWord old_value, | 
| 112 |  |                                            AtomicWord new_value) { | 
| 113 |  |     return NoBarrier_CompareAndSwap(reinterpret_cast<volatile AtomicWordCastType*>(ptr), old_value, | 
| 114 |  |                                     new_value); | 
| 115 |  | } | 
| 116 |  |  | 
| 117 |  | // Atomically store new_value into *ptr, returning the previous value held in | 
| 118 |  | // *ptr.  This routine implies no memory barriers. | 
| 119 |  | inline AtomicWord NoBarrier_AtomicExchange(volatile AtomicWord* ptr, AtomicWord new_value) { | 
| 120 |  |     return NoBarrier_AtomicExchange(reinterpret_cast<volatile AtomicWordCastType*>(ptr), new_value); | 
| 121 |  | } | 
| 122 |  |  | 
| 123 |  | inline AtomicWord Acquire_AtomicExchange(volatile AtomicWord* ptr, AtomicWord new_value) { | 
| 124 |  |     return Acquire_AtomicExchange(reinterpret_cast<volatile AtomicWordCastType*>(ptr), new_value); | 
| 125 |  | } | 
| 126 |  |  | 
| 127 |  | inline AtomicWord Release_AtomicExchange(volatile AtomicWord* ptr, AtomicWord new_value) { | 
| 128 |  |     return Release_AtomicExchange(reinterpret_cast<volatile AtomicWordCastType*>(ptr), new_value); | 
| 129 |  | } | 
| 130 |  |  | 
| 131 |  | // Atomically increment *ptr by "increment".  Returns the new value of | 
| 132 |  | // *ptr with the increment applied.  This routine implies no memory | 
| 133 |  | // barriers. | 
| 134 |  | inline AtomicWord NoBarrier_AtomicIncrement(volatile AtomicWord* ptr, AtomicWord increment) { | 
| 135 |  |     return NoBarrier_AtomicIncrement(reinterpret_cast<volatile AtomicWordCastType*>(ptr), | 
| 136 |  |                                      increment); | 
| 137 |  | } | 
| 138 |  |  | 
| 139 |  | inline AtomicWord Barrier_AtomicIncrement(volatile AtomicWord* ptr, AtomicWord increment) { | 
| 140 |  |     return Barrier_AtomicIncrement(reinterpret_cast<volatile AtomicWordCastType*>(ptr), increment); | 
| 141 |  | } | 
| 142 |  |  | 
| 143 |  | inline AtomicWord Acquire_CompareAndSwap(volatile AtomicWord* ptr, AtomicWord old_value, | 
| 144 |  |                                          AtomicWord new_value) { | 
| 145 |  |     return base::subtle::Acquire_CompareAndSwap(reinterpret_cast<volatile AtomicWordCastType*>(ptr), | 
| 146 |  |                                                 old_value, new_value); | 
| 147 |  | } | 
| 148 |  |  | 
| 149 |  | inline AtomicWord Release_CompareAndSwap(volatile AtomicWord* ptr, AtomicWord old_value, | 
| 150 |  |                                          AtomicWord new_value) { | 
| 151 |  |     return base::subtle::Release_CompareAndSwap(reinterpret_cast<volatile AtomicWordCastType*>(ptr), | 
| 152 |  |                                                 old_value, new_value); | 
| 153 |  | } | 
| 154 |  |  | 
| 155 |  | inline void NoBarrier_Store(volatile AtomicWord* ptr, AtomicWord value) { | 
| 156 |  |     NoBarrier_Store(reinterpret_cast<volatile AtomicWordCastType*>(ptr), value); | 
| 157 |  | } | 
| 158 |  |  | 
| 159 |  | inline void Acquire_Store(volatile AtomicWord* ptr, AtomicWord value) { | 
| 160 |  |     return base::subtle::Acquire_Store(reinterpret_cast<volatile AtomicWordCastType*>(ptr), value); | 
| 161 |  | } | 
| 162 |  |  | 
| 163 |  | inline void Release_Store(volatile AtomicWord* ptr, AtomicWord value) { | 
| 164 |  |     return base::subtle::Release_Store(reinterpret_cast<volatile AtomicWordCastType*>(ptr), value); | 
| 165 |  | } | 
| 166 |  |  | 
| 167 |  | inline AtomicWord NoBarrier_Load(volatile const AtomicWord* ptr) { | 
| 168 |  |     return NoBarrier_Load(reinterpret_cast<volatile const AtomicWordCastType*>(ptr)); | 
| 169 |  | } | 
| 170 |  |  | 
| 171 |  | inline AtomicWord Acquire_Load(volatile const AtomicWord* ptr) { | 
| 172 |  |     return base::subtle::Acquire_Load(reinterpret_cast<volatile const AtomicWordCastType*>(ptr)); | 
| 173 |  | } | 
| 174 |  |  | 
| 175 |  | inline AtomicWord Release_Load(volatile const AtomicWord* ptr) { | 
| 176 |  |     return base::subtle::Release_Load(reinterpret_cast<volatile const AtomicWordCastType*>(ptr)); | 
| 177 |  | } | 
| 178 |  |  | 
| 179 |  | } // namespace subtle | 
| 180 |  | } // namespace base | 
| 181 |  | #endif // AtomicWordCastType | 
| 182 |  |  | 
| 183 |  | // ------------------------------------------------------------------------ | 
| 184 |  | // Commented out type definitions and method declarations for documentation | 
| 185 |  | // of the interface provided by this module. | 
| 186 |  | // ------------------------------------------------------------------------ | 
| 187 |  |  | 
| 188 |  | // ------------------------------------------------------------------------ | 
| 189 |  | // The following are to be deprecated when all uses have been changed to | 
| 190 |  | // use the base::subtle namespace. | 
| 191 |  | // ------------------------------------------------------------------------ | 
| 192 |  |  | 
| 193 |  | #ifdef AtomicWordCastType | 
| 194 |  | // AtomicWord versions to be deprecated | 
| 195 |  | inline AtomicWord Acquire_CompareAndSwap(volatile AtomicWord* ptr, AtomicWord old_value, | 
| 196 |  |                                          AtomicWord new_value) { | 
| 197 |  |     return base::subtle::Acquire_CompareAndSwap(ptr, old_value, new_value); | 
| 198 |  | } | 
| 199 |  |  | 
| 200 |  | inline AtomicWord Release_CompareAndSwap(volatile AtomicWord* ptr, AtomicWord old_value, | 
| 201 |  |                                          AtomicWord new_value) { | 
| 202 |  |     return base::subtle::Release_CompareAndSwap(ptr, old_value, new_value); | 
| 203 |  | } | 
| 204 |  |  | 
| 205 |  | inline void Acquire_Store(volatile AtomicWord* ptr, AtomicWord value) { | 
| 206 |  |     return base::subtle::Acquire_Store(ptr, value); | 
| 207 |  | } | 
| 208 |  |  | 
| 209 |  | inline void Release_Store(volatile AtomicWord* ptr, AtomicWord value) { | 
| 210 |  |     return base::subtle::Release_Store(ptr, value); | 
| 211 |  | } | 
| 212 |  |  | 
| 213 |  | inline AtomicWord Acquire_Load(volatile const AtomicWord* ptr) { | 
| 214 |  |     return base::subtle::Acquire_Load(ptr); | 
| 215 |  | } | 
| 216 |  |  | 
| 217 |  | inline AtomicWord Release_Load(volatile const AtomicWord* ptr) { | 
| 218 |  |     return base::subtle::Release_Load(ptr); | 
| 219 |  | } | 
| 220 |  | #endif // AtomicWordCastType | 
| 221 |  |  | 
| 222 |  | // 32-bit Acquire/Release operations to be deprecated. | 
| 223 |  |  | 
| 224 |  | inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, Atomic32 old_value, | 
| 225 | 0 |                                        Atomic32 new_value) { | 
| 226 | 0 |     return base::subtle::Acquire_CompareAndSwap(ptr, old_value, new_value); | 
| 227 | 0 | } | 
| 228 |  | inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, Atomic32 old_value, | 
| 229 | 0 |                                        Atomic32 new_value) { | 
| 230 | 0 |     return base::subtle::Release_CompareAndSwap(ptr, old_value, new_value); | 
| 231 | 0 | } | 
| 232 | 0 | inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { | 
| 233 | 0 |     base::subtle::Acquire_Store(ptr, value); | 
| 234 | 0 | } | 
| 235 | 0 | inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { | 
| 236 | 0 |     return base::subtle::Release_Store(ptr, value); | 
| 237 | 0 | } | 
| 238 | 0 | inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { | 
| 239 | 0 |     return base::subtle::Acquire_Load(ptr); | 
| 240 | 0 | } | 
| 241 | 0 | inline Atomic32 Release_Load(volatile const Atomic32* ptr) { | 
| 242 | 0 |     return base::subtle::Release_Load(ptr); | 
| 243 | 0 | } | 
| 244 |  |  | 
| 245 |  | // 64-bit Acquire/Release operations to be deprecated. | 
| 246 |  |  | 
| 247 |  | inline base::subtle::Atomic64 Acquire_CompareAndSwap(volatile base::subtle::Atomic64* ptr, | 
| 248 |  |                                                      base::subtle::Atomic64 old_value, | 
| 249 | 0 |                                                      base::subtle::Atomic64 new_value) { | 
| 250 | 0 |     return base::subtle::Acquire_CompareAndSwap(ptr, old_value, new_value); | 
| 251 | 0 | } | 
| 252 |  | inline base::subtle::Atomic64 Release_CompareAndSwap(volatile base::subtle::Atomic64* ptr, | 
| 253 |  |                                                      base::subtle::Atomic64 old_value, | 
| 254 | 0 |                                                      base::subtle::Atomic64 new_value) { | 
| 255 | 0 |     return base::subtle::Release_CompareAndSwap(ptr, old_value, new_value); | 
| 256 | 0 | } | 
| 257 | 0 | inline void Acquire_Store(volatile base::subtle::Atomic64* ptr, base::subtle::Atomic64 value) { | 
| 258 | 0 |     base::subtle::Acquire_Store(ptr, value); | 
| 259 | 0 | } | 
| 260 | 26.9k | inline void Release_Store(volatile base::subtle::Atomic64* ptr, base::subtle::Atomic64 value) { | 
| 261 | 26.9k |     return base::subtle::Release_Store(ptr, value); | 
| 262 | 26.9k | } | 
| 263 | 38 | inline base::subtle::Atomic64 Acquire_Load(volatile const base::subtle::Atomic64* ptr) { | 
| 264 | 38 |     return base::subtle::Acquire_Load(ptr); | 
| 265 | 38 | } | 
| 266 | 0 | inline base::subtle::Atomic64 Release_Load(volatile const base::subtle::Atomic64* ptr) { | 
| 267 | 0 |     return base::subtle::Release_Load(ptr); | 
| 268 | 0 | } |