blob: e7a2bcd26817d569e1895788d347f345d12762da [file] [log] [blame]
Avi Drissmane4622aa2022-09-08 20:36:061// Copyright 2012 The Chromium Authors
license.botbf09a502008-08-24 00:55:552// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
[email protected]611dbe02008-08-05 09:57:364
Benoit Lize79cf0592023-01-27 10:01:575// IMPORTANT NOTE: deprecated. Use std::atomic instead.
6//
7// Rationale:
8// - Uniformity: most of the code uses std::atomic, and the underlying
9// implementation is the same. Use the STL one.
10// - Clearer code: return values from some operations (e.g. CompareAndSwap)
11// differ from the equivalent ones in std::atomic, leading to confusion.
12// - Richer semantics: can use actual types, rather than e.g. Atomic32 for a
13// boolean flag, or AtomicWord for T*. Bitwise operations (e.g. fetch_or())
14// are only in std::atomic.
15// - Harder to misuse: base::subtle::Atomic32 is just an int, making it possible
16// to accidentally manipulate, not realizing that there are no atomic
17// semantics attached to it. For instance, "Atomic32 a; a++;" is almost
18// certainly incorrect.
19
[email protected]611dbe02008-08-05 09:57:3620// For atomic operations on reference counts, see atomic_refcount.h.
21// For atomic operations on sequence numbers, see atomic_sequence_num.h.
22
23// The routines exported by this module are subtle. If you use them, even if
24// you get the code right, it will depend on careful reasoning about atomicity
25// and memory ordering; it will be less readable, and harder to maintain. If
26// you plan to use these routines, you should have a good reason, such as solid
27// evidence that performance would otherwise suffer, or there being no
28// alternative. You should assume only properties explicitly guaranteed by the
29// specifications in this file. You are almost certainly _not_ writing code
30// just for the x86; if you assume x86 semantics, x86 hardware bugs and
31// implementations on other archtectures will cause your code to break. If you
32// do not know what you are doing, avoid these routines, and use a Mutex.
33//
34// It is incorrect to make direct assignments to/from an atomic variable.
35// You should use one of the Load or Store routines. The NoBarrier
36// versions are provided when no barriers are needed:
37// NoBarrier_Store()
38// NoBarrier_Load()
39// Although there are currently no compiler enforcement, you are encouraged
40// to use these.
41//
42
43#ifndef BASE_ATOMICOPS_H_
44#define BASE_ATOMICOPS_H_
45
[email protected]92677102014-02-08 04:59:5546#include <stdint.h>
47
thestig29356662015-01-21 11:06:0248// Small C++ header which defines implementation specific macros used to
49// identify the STL implementation.
50// - libc++: captures __config for _LIBCPP_VERSION
51// - libstdc++: captures bits/c++config.h for __GLIBCXX__
52#include <cstddef>
53
Eugene Zemtsovd29b01732024-12-19 21:20:1254#include "base/base_export.h"
55#include "base/compiler_specific.h"
56#include "base/containers/span.h"
[email protected]2edc2862011-04-04 18:04:3757#include "build/build_config.h"
[email protected]611dbe02008-08-05 09:57:3658
59namespace base {
60namespace subtle {
61
[email protected]92677102014-02-08 04:59:5562typedef int32_t Atomic32;
[email protected]616f9a12009-07-27 21:17:2363#ifdef ARCH_CPU_64_BITS
64// We need to be able to go between Atomic64 and AtomicWord implicitly. This
65// means Atomic64 and AtomicWord should be the same type on 64-bit.
Nico Weber6f2d26d2025-06-27 07:32:0866#if defined(__ILP32__)
[email protected]bd254b42010-12-01 07:34:5867typedef int64_t Atomic64;
68#else
[email protected]616f9a12009-07-27 21:17:2369typedef intptr_t Atomic64;
[email protected]611dbe02008-08-05 09:57:3670#endif
[email protected]bd254b42010-12-01 07:34:5871#endif
[email protected]611dbe02008-08-05 09:57:3672
73// Use AtomicWord for a machine-sized pointer. It will use the Atomic32 or
74// Atomic64 routines below, depending on your architecture.
75typedef intptr_t AtomicWord;
76
77// Atomically execute:
78// result = *ptr;
79// if (*ptr == old_value)
80// *ptr = new_value;
81// return result;
82//
83// I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value".
84// Always return the old value of "*ptr"
85//
86// This routine implies no memory barriers.
87Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
88 Atomic32 old_value,
89 Atomic32 new_value);
90
91// Atomically store new_value into *ptr, returning the previous value held in
92// *ptr. This routine implies no memory barriers.
93Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value);
94
95// Atomically increment *ptr by "increment". Returns the new value of
96// *ptr with the increment applied. This routine implies no memory barriers.
97Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, Atomic32 increment);
98
Peter Kasting134ef9af2024-12-28 02:30:0999Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, Atomic32 increment);
[email protected]611dbe02008-08-05 09:57:36100
101// These following lower-level operations are typically useful only to people
102// implementing higher-level synchronization operations like spinlocks,
103// mutexes, and condition-variables. They combine CompareAndSwap(), a load, or
104// a store with appropriate memory-ordering instructions. "Acquire" operations
105// ensure that no later memory access can be reordered ahead of the operation.
106// "Release" operations ensure that no previous memory access can be reordered
107// after the operation. "Barrier" operations have both "Acquire" and "Release"
Tomasz Moniuszkobdbaaf42019-05-07 08:19:44108// semantics.
[email protected]611dbe02008-08-05 09:57:36109Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
110 Atomic32 old_value,
111 Atomic32 new_value);
112Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
113 Atomic32 old_value,
114 Atomic32 new_value);
115
[email protected]611dbe02008-08-05 09:57:36116void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value);
[email protected]611dbe02008-08-05 09:57:36117void Release_Store(volatile Atomic32* ptr, Atomic32 value);
118
119Atomic32 NoBarrier_Load(volatile const Atomic32* ptr);
120Atomic32 Acquire_Load(volatile const Atomic32* ptr);
[email protected]611dbe02008-08-05 09:57:36121
122// 64-bit atomic operations (only available on 64-bit processors).
[email protected]616f9a12009-07-27 21:17:23123#ifdef ARCH_CPU_64_BITS
[email protected]611dbe02008-08-05 09:57:36124Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value);
125Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment);
126Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment);
127
128Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
129 Atomic64 old_value,
130 Atomic64 new_value);
131Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
132 Atomic64 old_value,
133 Atomic64 new_value);
[email protected]611dbe02008-08-05 09:57:36134void Release_Store(volatile Atomic64* ptr, Atomic64 value);
135Atomic64 NoBarrier_Load(volatile const Atomic64* ptr);
136Atomic64 Acquire_Load(volatile const Atomic64* ptr);
[email protected]616f9a12009-07-27 21:17:23137#endif // ARCH_CPU_64_BITS
[email protected]611dbe02008-08-05 09:57:36138
Eugene Zemtsovd29b01732024-12-19 21:20:12139// Copies non-overlapping spans of the same size. Writes are done using C++
140// atomics with `std::memory_order_relaxed`.
141//
142// This is an analogue of `WTF::AtomicWriteMemcpy` and it should be used
143// for copying data into buffers that are accessible from another
144// thread while the copy is being done. The buffer will appear inconsistent,
145// but it won't trigger C++ UB and won't upset TSAN. The end of copy needs to
146// be signaled through a synchronization mechanism like fence, after
147// which the `dst` buffer will be observed as consistent.
148//
149// Notable example is a buffer owned by `SharedArrayBuffer`.
150// While the copy is being done, JS and WASM code can access the `dst` buffer
151// on a different thread. The data observed by JS may not be consistent
152// from application point of view (which is always the case with
153// `SharedArrayBuffer`).
154//
155// Reads from the `src` buffer are not atomic and `src` access
156// should be synchronized via other means.
157// More info: crbug.com/340606792
158BASE_EXPORT void RelaxedAtomicWriteMemcpy(base::span<uint8_t> dst,
159 base::span<const uint8_t> src);
160
[email protected]abe936a32014-04-13 17:36:27161} // namespace subtle
[email protected]611dbe02008-08-05 09:57:36162} // namespace base
163
Benoit Lize163598d2021-04-01 16:46:59164#include "base/atomicops_internals_portable.h"
jfb57a4e4a2014-10-14 15:06:05165
[email protected]0c6f3b0c2011-04-15 06:15:04166// On some platforms we need additional declarations to make
167// AtomicWord compatible with our other Atomic* types.
Xiaohan Wang38e4ebb2022-01-19 06:57:43168#if BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_OPENBSD)
[email protected]0c6f3b0c2011-04-15 06:15:04169#include "base/atomicops_internals_atomicword_compat.h"
170#endif
171
[email protected]611dbe02008-08-05 09:57:36172#endif // BASE_ATOMICOPS_H_