Avi Drissman | e4622aa | 2022-09-08 20:36:06 | [diff] [blame] | 1 | // Copyright 2015 The Chromium Authors |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 2 | // Use of this source code is governed by a BSD-style license that can be |
| 3 | // found in the LICENSE file. |
| 4 | |
| 5 | #ifndef BASE_METRICS_PERSISTENT_MEMORY_ALLOCATOR_H_ |
| 6 | #define BASE_METRICS_PERSISTENT_MEMORY_ALLOCATOR_H_ |
| 7 | |
| 8 | #include <stdint.h> |
dcheng | 093de9b | 2016-04-04 21:25:51 | [diff] [blame] | 9 | |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 10 | #include <atomic> |
dcheng | 093de9b | 2016-04-04 21:25:51 | [diff] [blame] | 11 | #include <memory> |
Helmut Januschka | 274c380 | 2024-03-12 23:31:29 | [diff] [blame] | 12 | #include <string_view> |
piman | 03cd21b | 2016-11-22 21:03:29 | [diff] [blame] | 13 | #include <type_traits> |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 14 | |
| 15 | #include "base/atomicops.h" |
| 16 | #include "base/base_export.h" |
David Sanders | 8cfb63a | 2022-04-14 19:36:30 | [diff] [blame] | 17 | #include "base/check.h" |
Daniel Cheng | 610a205c | 2025-06-07 21:04:51 | [diff] [blame] | 18 | #include "base/check_op.h" |
Tom Sepez | e6f11a55 | 2025-02-18 19:01:51 | [diff] [blame] | 19 | #include "base/compiler_specific.h" |
danakj | ad724a8 | 2024-01-25 17:37:40 | [diff] [blame] | 20 | #include "base/containers/span.h" |
bcwhite | 65e57d0 | 2016-05-13 14:39:40 | [diff] [blame] | 21 | #include "base/files/file_path.h" |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 22 | #include "base/gtest_prod_util.h" |
Keishi Hattori | 0e45c02 | 2021-11-27 09:25:52 | [diff] [blame] | 23 | #include "base/memory/raw_ptr.h" |
Keishi Hattori | 130ae0a | 2023-03-27 03:48:47 | [diff] [blame] | 24 | #include "base/memory/raw_ptr_exclusion.h" |
Alexandr Ilin | 027ca3d3 | 2019-02-12 18:37:33 | [diff] [blame] | 25 | #include "base/memory/shared_memory_mapping.h" |
Xiaohan Wang | 7b270e34 | 2022-01-15 19:36:03 | [diff] [blame] | 26 | #include "build/build_config.h" |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 27 | |
Luc Nguyen | 510c3645 | 2023-06-29 08:24:10 | [diff] [blame] | 28 | namespace metrics { |
| 29 | class FileMetricsProvider; |
| 30 | } |
| 31 | |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 32 | namespace base { |
| 33 | |
| 34 | class HistogramBase; |
| 35 | class MemoryMappedFile; |
| 36 | |
| 37 | // Simple allocator for pieces of a memory block that may be persistent |
| 38 | // to some storage or shared across multiple processes. This class resides |
| 39 | // under base/metrics because it was written for that purpose. It is, |
| 40 | // however, fully general-purpose and can be freely moved to base/memory |
| 41 | // if other uses are found. |
| 42 | // |
| 43 | // This class provides for thread-secure (i.e. safe against other threads |
| 44 | // or processes that may be compromised and thus have malicious intent) |
| 45 | // allocation of memory within a designated block and also a mechanism by |
| 46 | // which other threads can learn of these allocations. |
| 47 | // |
| 48 | // There is (currently) no way to release an allocated block of data because |
| 49 | // doing so would risk invalidating pointers held by other processes and |
| 50 | // greatly complicate the allocation algorithm. |
| 51 | // |
| 52 | // Construction of this object can accept new, clean (i.e. zeroed) memory |
| 53 | // or previously initialized memory. In the first case, construction must |
| 54 | // be allowed to complete before letting other allocators attach to the same |
| 55 | // segment. In other words, don't share the segment until at least one |
| 56 | // allocator has been attached to it. |
| 57 | // |
| 58 | // Note that memory not in active use is not accessed so it is possible to |
| 59 | // use virtual memory, including memory-mapped files, as backing storage with |
| 60 | // the OS "pinning" new (zeroed) physical RAM pages only as they are needed. |
piman | 03cd21b | 2016-11-22 21:03:29 | [diff] [blame] | 61 | // |
bcwhite | 3f999d3 | 2017-01-11 12:42:13 | [diff] [blame] | 62 | // OBJECTS: Although the allocator can be used in a "malloc" sense, fetching |
| 63 | // character arrays and manipulating that memory manually, the better way is |
bcwhite | cf6a9e8 | 2017-02-09 20:44:23 | [diff] [blame] | 64 | // generally to use the "object" methods to create and manage allocations. In |
bcwhite | 3f999d3 | 2017-01-11 12:42:13 | [diff] [blame] | 65 | // this way the sizing, type-checking, and construction are all automatic. For |
| 66 | // this to work, however, every type of stored object must define two public |
| 67 | // "constexpr" values, kPersistentTypeId and kExpectedInstanceSize, as such: |
piman | 03cd21b | 2016-11-22 21:03:29 | [diff] [blame] | 68 | // |
bcwhite | 3f999d3 | 2017-01-11 12:42:13 | [diff] [blame] | 69 | // struct MyPersistentObjectType { |
| 70 | // // SHA1(MyPersistentObjectType): Increment this if structure changes! |
| 71 | // static constexpr uint32_t kPersistentTypeId = 0x3E15F6DE + 1; |
| 72 | // |
| 73 | // // Expected size for 32/64-bit check. Update this if structure changes! |
| 74 | // static constexpr size_t kExpectedInstanceSize = 20; |
| 75 | // |
| 76 | // ... |
| 77 | // }; |
| 78 | // |
| 79 | // kPersistentTypeId: This value is an arbitrary identifier that allows the |
| 80 | // identification of these objects in the allocator, including the ability |
| 81 | // to find them via iteration. The number is arbitrary but using the first |
| 82 | // four bytes of the SHA1 hash of the type name means that there shouldn't |
| 83 | // be any conflicts with other types that may also be stored in the memory. |
| 84 | // The fully qualified name (e.g. base::debug::MyPersistentObjectType) could |
| 85 | // be used to generate the hash if the type name seems common. Use a command |
| 86 | // like this to get the hash: echo -n "MyPersistentObjectType" | sha1sum |
| 87 | // If the structure layout changes, ALWAYS increment this number so that |
| 88 | // newer versions of the code don't try to interpret persistent data written |
| 89 | // by older versions with a different layout. |
| 90 | // |
| 91 | // kExpectedInstanceSize: This value is the hard-coded number that matches |
| 92 | // what sizeof(T) would return. By providing it explicitly, the allocator can |
| 93 | // verify that the structure is compatible between both 32-bit and 64-bit |
| 94 | // versions of the code. |
| 95 | // |
bcwhite | cf6a9e8 | 2017-02-09 20:44:23 | [diff] [blame] | 96 | // Using New manages the memory and then calls the default constructor for the |
| 97 | // object. Given that objects are persistent, no destructor is ever called |
| 98 | // automatically though a caller can explicitly call Delete to destruct it and |
| 99 | // change the type to something indicating it is no longer in use. |
bcwhite | 3f999d3 | 2017-01-11 12:42:13 | [diff] [blame] | 100 | // |
| 101 | // Though persistent memory segments are transferrable between programs built |
| 102 | // for different natural word widths, they CANNOT be exchanged between CPUs |
| 103 | // of different endianess. Attempts to do so will simply see the existing data |
| 104 | // as corrupt and refuse to access any of it. |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 105 | class BASE_EXPORT PersistentMemoryAllocator { |
| 106 | public: |
| 107 | typedef uint32_t Reference; |
| 108 | |
danakj | ad724a8 | 2024-01-25 17:37:40 | [diff] [blame] | 109 | // All allocations and data-structures must be aligned to this byte boundary. |
| 110 | // Alignment as large as the physical bus between CPU and RAM is _required_ |
| 111 | // for some architectures, is simply more efficient on other CPUs, and |
| 112 | // generally a Good Idea(tm) for all platforms as it reduces/eliminates the |
| 113 | // chance that a type will span cache lines. Alignment mustn't be less |
| 114 | // than 8 to ensure proper alignment for all types. The rest is a balance |
| 115 | // between reducing spans across multiple cache lines and wasted space spent |
| 116 | // padding out allocations. An alignment of 16 would ensure that the block |
| 117 | // header structure always sits in a single cache line. An average of about |
| 118 | // 1/2 this value will be wasted with every allocation. |
| 119 | static constexpr size_t kAllocAlignment = 8; |
| 120 | |
bcwhite | 42561dc | 2017-03-16 18:35:24 | [diff] [blame] | 121 | // These states are used to indicate the overall condition of the memory |
| 122 | // segment irrespective of what is stored within it. Because the data is |
| 123 | // often persistent and thus needs to be readable by different versions of |
| 124 | // a program, these values are fixed and can never change. |
| 125 | enum MemoryState : uint8_t { |
| 126 | // Persistent memory starts all zeros and so shows "uninitialized". |
| 127 | MEMORY_UNINITIALIZED = 0, |
| 128 | |
| 129 | // The header has been written and the memory is ready for use. |
| 130 | MEMORY_INITIALIZED = 1, |
| 131 | |
| 132 | // The data should be considered deleted. This would be set when the |
| 133 | // allocator is being cleaned up. If file-backed, the file is likely |
| 134 | // to be deleted but since deletion can fail for a variety of reasons, |
| 135 | // having this extra status means a future reader can realize what |
| 136 | // should have happened. |
| 137 | MEMORY_DELETED = 2, |
| 138 | |
Luc Nguyen | 510c3645 | 2023-06-29 08:24:10 | [diff] [blame] | 139 | // The data should be considered complete. This is usually set when the |
| 140 | // browser is going to exit to indicate that it terminated cleanly and that |
| 141 | // the memory should be well-formed. In theory, this is not perfect as it is |
| 142 | // possible for the browser/device to crash after this has been set, but in |
| 143 | // practice this should be a reasonable indication as to whether the data |
| 144 | // comes from a completed or crashed session (if file-backed). Note that |
| 145 | // this might not be set on certain platforms (e.g. Android, iOS) due to not |
| 146 | // having a guaranteed clean shutdown path. |
| 147 | MEMORY_COMPLETED = 3, |
| 148 | |
bcwhite | 42561dc | 2017-03-16 18:35:24 | [diff] [blame] | 149 | // Outside code can create states starting with this number; these too |
| 150 | // must also never change between code versions. |
| 151 | MEMORY_USER_DEFINED = 100, |
| 152 | }; |
| 153 | |
bcwhite | f246202 | 2016-04-06 15:39:01 | [diff] [blame] | 154 | // Iterator for going through all iterable memory records in an allocator. |
| 155 | // Like the allocator itself, iterators are lock-free and thread-secure. |
| 156 | // That means that multiple threads can share an iterator and the same |
| 157 | // reference will not be returned twice. |
| 158 | // |
bcwhite | dadd315 | 2016-10-13 23:49:14 | [diff] [blame] | 159 | // The order of the items returned by an iterator matches the order in which |
| 160 | // MakeIterable() was called on them. Once an allocation is made iterable, |
| 161 | // it is always such so the only possible difference between successive |
| 162 | // iterations is for more to be added to the end. |
| 163 | // |
bcwhite | f246202 | 2016-04-06 15:39:01 | [diff] [blame] | 164 | // Iteration, in general, is tolerant of corrupted memory. It will return |
| 165 | // what it can and stop only when corruption forces it to. Bad corruption |
| 166 | // could cause the same object to be returned many times but it will |
| 167 | // eventually quit. |
| 168 | class BASE_EXPORT Iterator { |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 169 | public: |
Roger McFarlane | 23479ae | 2024-12-10 20:31:23 | [diff] [blame] | 170 | // Constructs an iterator on a given `allocator`, starting at the beginning. |
bcwhite | f246202 | 2016-04-06 15:39:01 | [diff] [blame] | 171 | // The allocator must live beyond the lifetime of the iterator. This class |
| 172 | // has read-only access to the allocator (hence "const") but the returned |
| 173 | // references can be used on a read/write version, too. |
| 174 | explicit Iterator(const PersistentMemoryAllocator* allocator); |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 175 | |
Roger McFarlane | 23479ae | 2024-12-10 20:31:23 | [diff] [blame] | 176 | // As above but resuming from the `starting_after` reference. The first call |
bcwhite | f246202 | 2016-04-06 15:39:01 | [diff] [blame] | 177 | // to GetNext() will return the next object found after that reference. The |
| 178 | // reference must be to an "iterable" object; references to non-iterable |
| 179 | // objects (those that never had MakeIterable() called for them) will cause |
| 180 | // a run-time error. |
| 181 | Iterator(const PersistentMemoryAllocator* allocator, |
| 182 | Reference starting_after); |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 183 | |
Peter Boström | 75cd3c0 | 2021-09-28 15:23:18 | [diff] [blame] | 184 | Iterator(const Iterator&) = delete; |
| 185 | Iterator& operator=(const Iterator&) = delete; |
| 186 | |
Keishi Hattori | cbd9cfa | 2021-11-23 17:38:14 | [diff] [blame] | 187 | ~Iterator(); |
| 188 | |
bcwhite | dadd315 | 2016-10-13 23:49:14 | [diff] [blame] | 189 | // Resets the iterator back to the beginning. |
| 190 | void Reset(); |
| 191 | |
Roger McFarlane | 23479ae | 2024-12-10 20:31:23 | [diff] [blame] | 192 | // Resets the iterator, resuming from the `starting_after` reference. |
bcwhite | dadd315 | 2016-10-13 23:49:14 | [diff] [blame] | 193 | void Reset(Reference starting_after); |
| 194 | |
| 195 | // Returns the previously retrieved reference, or kReferenceNull if none. |
| 196 | // If constructor or reset with a starting_after location, this will return |
| 197 | // that value. |
| 198 | Reference GetLast(); |
| 199 | |
Roger McFarlane | 23479ae | 2024-12-10 20:31:23 | [diff] [blame] | 200 | // Gets the next iterable, storing that type in `type_return`. The actual |
bcwhite | f246202 | 2016-04-06 15:39:01 | [diff] [blame] | 201 | // return value is a reference to the allocation inside the allocator or |
| 202 | // zero if there are no more. GetNext() may still be called again at a |
| 203 | // later time to retrieve any new allocations that have been added. |
Roger McFarlane | 23479ae | 2024-12-10 20:31:23 | [diff] [blame] | 204 | Reference GetNext(uint32_t* type_return, size_t* alloc_size = nullptr); |
bcwhite | f246202 | 2016-04-06 15:39:01 | [diff] [blame] | 205 | |
Roger McFarlane | 23479ae | 2024-12-10 20:31:23 | [diff] [blame] | 206 | // Similar to above but gets the next iterable of a specific `type_match`. |
bcwhite | f246202 | 2016-04-06 15:39:01 | [diff] [blame] | 207 | // This should not be mixed with calls to GetNext() because any allocations |
| 208 | // skipped here due to a type mis-match will never be returned by later |
| 209 | // calls to GetNext() meaning it's possible to completely miss entries. |
Roger McFarlane | 23479ae | 2024-12-10 20:31:23 | [diff] [blame] | 210 | Reference GetNextOfType(uint32_t type_match, size_t* alloc_size = nullptr); |
bcwhite | f246202 | 2016-04-06 15:39:01 | [diff] [blame] | 211 | |
bcwhite | 3f999d3 | 2017-01-11 12:42:13 | [diff] [blame] | 212 | // As above but works using object type. |
| 213 | template <typename T> |
| 214 | Reference GetNextOfType() { |
| 215 | return GetNextOfType(T::kPersistentTypeId); |
| 216 | } |
| 217 | |
| 218 | // As above but works using objects and returns null if not found. |
| 219 | template <typename T> |
| 220 | const T* GetNextOfObject() { |
| 221 | return GetAsObject<T>(GetNextOfType<T>()); |
| 222 | } |
| 223 | |
bcwhite | f246202 | 2016-04-06 15:39:01 | [diff] [blame] | 224 | // Converts references to objects. This is a convenience method so that |
| 225 | // users of the iterator don't need to also have their own pointer to the |
| 226 | // allocator over which the iterator runs in order to retrieve objects. |
| 227 | // Because the iterator is not read/write, only "const" objects can be |
| 228 | // fetched. Non-const objects can be fetched using the reference on a |
| 229 | // non-const (external) pointer to the same allocator (or use const_cast |
| 230 | // to remove the qualifier). |
| 231 | template <typename T> |
bcwhite | 3f999d3 | 2017-01-11 12:42:13 | [diff] [blame] | 232 | const T* GetAsObject(Reference ref) const { |
| 233 | return allocator_->GetAsObject<T>(ref); |
bcwhite | f246202 | 2016-04-06 15:39:01 | [diff] [blame] | 234 | } |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 235 | |
bcwhite | 3f999d3 | 2017-01-11 12:42:13 | [diff] [blame] | 236 | // Similar to GetAsObject() but converts references to arrays of things. |
piman | 03cd21b | 2016-11-22 21:03:29 | [diff] [blame] | 237 | template <typename T> |
| 238 | const T* GetAsArray(Reference ref, uint32_t type_id, size_t count) const { |
| 239 | return allocator_->GetAsArray<T>(ref, type_id, count); |
| 240 | } |
| 241 | |
bcwhite | 3f999d3 | 2017-01-11 12:42:13 | [diff] [blame] | 242 | // Convert a generic pointer back into a reference. A null reference will |
Roger McFarlane | 23479ae | 2024-12-10 20:31:23 | [diff] [blame] | 243 | // be returned if `memory` is not inside the persistent segment or does not |
| 244 | // point to an object of the specified `type_id`. |
bcwhite | 3f999d3 | 2017-01-11 12:42:13 | [diff] [blame] | 245 | Reference GetAsReference(const void* memory, uint32_t type_id) const { |
| 246 | return allocator_->GetAsReference(memory, type_id); |
| 247 | } |
| 248 | |
| 249 | // As above but convert an object back into a reference. |
| 250 | template <typename T> |
| 251 | Reference GetAsReference(const T* obj) const { |
| 252 | return allocator_->GetAsReference(obj); |
| 253 | } |
| 254 | |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 255 | private: |
bcwhite | f246202 | 2016-04-06 15:39:01 | [diff] [blame] | 256 | // Weak-pointer to memory allocator being iterated over. |
Keishi Hattori | 0e45c02 | 2021-11-27 09:25:52 | [diff] [blame] | 257 | raw_ptr<const PersistentMemoryAllocator> allocator_; |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 258 | |
bcwhite | f246202 | 2016-04-06 15:39:01 | [diff] [blame] | 259 | // The last record that was returned. |
| 260 | std::atomic<Reference> last_record_; |
| 261 | |
| 262 | // The number of records found; used for detecting loops. |
| 263 | std::atomic<uint32_t> record_count_; |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 264 | }; |
| 265 | |
| 266 | // Returned information about the internal state of the heap. |
| 267 | struct MemoryInfo { |
| 268 | size_t total; |
| 269 | size_t free; |
| 270 | }; |
| 271 | |
bcwhite | b0bb919 | 2016-04-18 01:33:10 | [diff] [blame] | 272 | enum : Reference { |
bcwhite | 3f999d3 | 2017-01-11 12:42:13 | [diff] [blame] | 273 | // A common "null" reference value. |
| 274 | kReferenceNull = 0, |
bcwhite | cf6a9e8 | 2017-02-09 20:44:23 | [diff] [blame] | 275 | }; |
bcwhite | b0bb919 | 2016-04-18 01:33:10 | [diff] [blame] | 276 | |
bcwhite | cf6a9e8 | 2017-02-09 20:44:23 | [diff] [blame] | 277 | enum : uint32_t { |
bcwhite | bee49a2 | 2017-03-10 18:58:48 | [diff] [blame] | 278 | // A value that will match any type when doing lookups. |
| 279 | kTypeIdAny = 0x00000000, |
| 280 | |
bcwhite | 3f999d3 | 2017-01-11 12:42:13 | [diff] [blame] | 281 | // A value indicating that the type is in transition. Work is being done |
| 282 | // on the contents to prepare it for a new type to come. |
bcwhite | cf6a9e8 | 2017-02-09 20:44:23 | [diff] [blame] | 283 | kTypeIdTransitioning = 0xFFFFFFFF, |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 284 | }; |
| 285 | |
piman | 03cd21b | 2016-11-22 21:03:29 | [diff] [blame] | 286 | enum : size_t { |
| 287 | kSizeAny = 1 // Constant indicating that any array size is acceptable. |
| 288 | }; |
| 289 | |
Alexei Svitkine | 1be891d | 2023-08-31 21:57:45 | [diff] [blame] | 290 | // Indicates the mode for accessing the underlying data. |
Alexei Svitkine | 50fa812 | 2023-08-29 21:01:58 | [diff] [blame] | 291 | enum AccessMode { |
| 292 | kReadOnly, |
| 293 | kReadWrite, |
Alexei Svitkine | 1be891d | 2023-08-31 21:57:45 | [diff] [blame] | 294 | // Open existing initialized data in R/W mode. If the passed data appears to |
| 295 | // not have been initialized, does not write to it and instead marks the |
| 296 | // allocator as corrupt (without writing anything to the underlying data.) |
| 297 | kReadWriteExisting, |
Alexei Svitkine | 50fa812 | 2023-08-29 21:01:58 | [diff] [blame] | 298 | }; |
| 299 | |
bcwhite | 65e57d0 | 2016-05-13 14:39:40 | [diff] [blame] | 300 | // This is the standard file extension (suitable for being passed to the |
| 301 | // AddExtension() method of base::FilePath) for dumps of persistent memory. |
| 302 | static const base::FilePath::CharType kFileExtension[]; |
| 303 | |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 304 | // The allocator operates on any arbitrary block of memory. Creation and |
| 305 | // persisting or sharing of that block with another process is the |
| 306 | // responsibility of the caller. The allocator needs to know only the |
Roger McFarlane | 23479ae | 2024-12-10 20:31:23 | [diff] [blame] | 307 | // block's `base` address, the total `size` of the block, and any internal |
| 308 | // `page` size (zero if not paged) across which allocations should not span. |
| 309 | // The `id` is an arbitrary value the caller can use to identify a |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 310 | // particular memory segment. It will only be loaded during the initial |
| 311 | // creation of the segment and can be checked by the caller for consistency. |
Roger McFarlane | 23479ae | 2024-12-10 20:31:23 | [diff] [blame] | 312 | // The `name`, if provided, is used to distinguish histograms for this |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 313 | // allocator. Only the primary owner of the segment should define this value; |
Alexei Svitkine | 50fa812 | 2023-08-29 21:01:58 | [diff] [blame] | 314 | // other processes can learn it from the shared state. If the access mode |
| 315 | // is kReadOnly then no changes will be made to it. The resulting object |
| 316 | // should be stored as a "const" pointer. |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 317 | // |
| 318 | // PersistentMemoryAllocator does NOT take ownership of the memory block. |
| 319 | // The caller must manage it and ensure it stays available throughout the |
| 320 | // lifetime of this object. |
| 321 | // |
| 322 | // Memory segments for sharing must have had an allocator attached to them |
| 323 | // before actually being shared. If the memory segment was just created, it |
| 324 | // should be zeroed before being passed here. If it was an existing segment, |
| 325 | // the values here will be compared to copies stored in the shared segment |
| 326 | // as a guard against corruption. |
| 327 | // |
| 328 | // Make sure that the memory segment is acceptable (see IsMemoryAcceptable() |
| 329 | // method below) before construction if the definition of the segment can |
| 330 | // vary in any way at run-time. Invalid memory segments will cause a crash. |
Alexei Svitkine | 50fa812 | 2023-08-29 21:01:58 | [diff] [blame] | 331 | PersistentMemoryAllocator(void* base, |
| 332 | size_t size, |
| 333 | size_t page_size, |
| 334 | uint64_t id, |
Helmut Januschka | 274c380 | 2024-03-12 23:31:29 | [diff] [blame] | 335 | std::string_view name, |
Alexei Svitkine | 50fa812 | 2023-08-29 21:01:58 | [diff] [blame] | 336 | AccessMode access_mode); |
Peter Boström | 75cd3c0 | 2021-09-28 15:23:18 | [diff] [blame] | 337 | |
| 338 | PersistentMemoryAllocator(const PersistentMemoryAllocator&) = delete; |
| 339 | PersistentMemoryAllocator& operator=(const PersistentMemoryAllocator&) = |
| 340 | delete; |
| 341 | |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 342 | virtual ~PersistentMemoryAllocator(); |
| 343 | |
Roger McFarlane | 43ef168 | 2025-01-31 18:30:37 | [diff] [blame] | 344 | // Checks if memory segment is acceptable for creation of an Allocator. This |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 345 | // doesn't do any analysis of the data and so doesn't guarantee that the |
| 346 | // contents are valid, just that the paramaters won't cause the program to |
| 347 | // abort. The IsCorrupt() method will report detection of data problems |
| 348 | // found during construction and general operation. |
Peter Kasting | 134ef9af | 2024-12-28 02:30:09 | [diff] [blame] | 349 | static bool IsMemoryAcceptable(const void* data, |
| 350 | size_t size, |
| 351 | size_t page_size, |
| 352 | bool readonly); |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 353 | |
Roger McFarlane | 43ef168 | 2025-01-31 18:30:37 | [diff] [blame] | 354 | // Returns the internal identifier for this persistent memory segment. |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 355 | uint64_t Id() const; |
| 356 | |
Roger McFarlane | 43ef168 | 2025-01-31 18:30:37 | [diff] [blame] | 357 | // Returns the internal name of this allocator (possibly an empty string). |
| 358 | // The returned string_view references a bounded span within the shared |
| 359 | // memory region. As such, it should be treated as a volatile but bounded |
| 360 | // block of memory. In particular, clients should respect the 'length()' of |
| 361 | // the returned view instead of relying on a terminating NUL char. |
| 362 | std::string_view Name() const; |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 363 | |
| 364 | // Is this segment open only for read? |
Alexei Svitkine | 50fa812 | 2023-08-29 21:01:58 | [diff] [blame] | 365 | bool IsReadonly() const { return access_mode_ == kReadOnly; } |
bcwhite | 42561dc | 2017-03-16 18:35:24 | [diff] [blame] | 366 | |
| 367 | // Manage the saved state of the memory. |
| 368 | void SetMemoryState(uint8_t memory_state); |
| 369 | uint8_t GetMemoryState() const; |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 370 | |
Roger McFarlane | 43ef168 | 2025-01-31 18:30:37 | [diff] [blame] | 371 | // Creates internal histograms for tracking memory use and allocation sizes |
Roger McFarlane | 23479ae | 2024-12-10 20:31:23 | [diff] [blame] | 372 | // for allocator of `name` (which can simply be the result of Name()). This |
| 373 | // is done separately from construction for situations such as when the |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 374 | // histograms will be backed by memory provided by this very allocator. |
bcwhite | 3779f98 | 2016-02-11 22:37:01 | [diff] [blame] | 375 | // |
Peter Kasting | 97734583 | 2021-07-29 14:05:59 | [diff] [blame] | 376 | // IMPORTANT: tools/metrics/histograms/metadata/uma/histograms.xml must |
Roger McFarlane | 23479ae | 2024-12-10 20:31:23 | [diff] [blame] | 377 | // be updated with the following histograms for each `name` param: |
bcwhite | cc51fdc | 2017-01-25 16:45:25 | [diff] [blame] | 378 | // UMA.PersistentAllocator.name.Errors |
bcwhite | 3779f98 | 2016-02-11 22:37:01 | [diff] [blame] | 379 | // UMA.PersistentAllocator.name.UsedPct |
Helmut Januschka | 274c380 | 2024-03-12 23:31:29 | [diff] [blame] | 380 | void CreateTrackingHistograms(std::string_view name); |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 381 | |
bcwhite | 42561dc | 2017-03-16 18:35:24 | [diff] [blame] | 382 | // Flushes the persistent memory to any backing store. This typically does |
| 383 | // nothing but is used by the FilePersistentMemoryAllocator to inform the |
| 384 | // OS that all the data should be sent to the disk immediately. This is |
| 385 | // useful in the rare case where something has just been stored that needs |
| 386 | // to survive a hard shutdown of the machine like from a power failure. |
Roger McFarlane | 23479ae | 2024-12-10 20:31:23 | [diff] [blame] | 387 | // The `sync` parameter indicates if this call should block until the flush |
bcwhite | 42561dc | 2017-03-16 18:35:24 | [diff] [blame] | 388 | // is complete but is only advisory and may or may not have an effect |
| 389 | // depending on the capabilities of the OS. Synchronous flushes are allowed |
Roger McFarlane | 23479ae | 2024-12-10 20:31:23 | [diff] [blame] | 390 | // only from threads that are allowed to do I/O but since `sync` is only |
Brian White | 1d226b1 | 2017-10-18 15:28:41 | [diff] [blame] | 391 | // advisory, all flushes should be done on IO-capable threads. |
Roger McFarlane | 23479ae | 2024-12-10 20:31:23 | [diff] [blame] | 392 | // TODO: Since `sync` is ignored on Windows, consider making it re-post on a |
| 393 | // background thread with `sync` set to true so that `sync` is not just |
Luc Nguyen | 6c61749 | 2023-08-10 20:26:49 | [diff] [blame] | 394 | // advisory. |
bcwhite | 42561dc | 2017-03-16 18:35:24 | [diff] [blame] | 395 | void Flush(bool sync); |
| 396 | |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 397 | // Direct access to underlying memory segment. If the segment is shared |
| 398 | // across threads or processes, reading data through these values does |
| 399 | // not guarantee consistency. Use with care. Do not write. |
| 400 | const void* data() const { return const_cast<const char*>(mem_base_); } |
| 401 | size_t length() const { return mem_size_; } |
bcwhite | 65e57d0 | 2016-05-13 14:39:40 | [diff] [blame] | 402 | size_t size() const { return mem_size_; } |
Roger McFarlane | 43ef168 | 2025-01-31 18:30:37 | [diff] [blame] | 403 | size_t page_size() const { return mem_page_; } |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 404 | size_t used() const; |
| 405 | |
Roger McFarlane | 43ef168 | 2025-01-31 18:30:37 | [diff] [blame] | 406 | // Returns the object referenced by a `ref`. For safety reasons, the `type_id` |
Roger McFarlane | 23479ae | 2024-12-10 20:31:23 | [diff] [blame] | 407 | // code and size-of(`T`) are compared to ensure the reference is valid |
| 408 | // and cannot return an object outside of the memory segment. A `type_id` of |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 409 | // kTypeIdAny (zero) will match any though the size is still checked. NULL is |
| 410 | // returned if any problem is detected, such as corrupted storage or incorrect |
| 411 | // parameters. Callers MUST check that the returned value is not-null EVERY |
| 412 | // TIME before accessing it or risk crashing! Once dereferenced, the pointer |
| 413 | // is safe to reuse forever. |
| 414 | // |
piman | 03cd21b | 2016-11-22 21:03:29 | [diff] [blame] | 415 | // It is essential that the object be of a fixed size. All fields must be of |
| 416 | // a defined type that does not change based on the compiler or the CPU |
| 417 | // natural word size. Acceptable are char, float, double, and (u)intXX_t. |
| 418 | // Unacceptable are int, bool, and wchar_t which are implementation defined |
| 419 | // with regards to their size. |
bcwhite | e99be2d | 2016-11-09 19:39:41 | [diff] [blame] | 420 | // |
piman | 03cd21b | 2016-11-22 21:03:29 | [diff] [blame] | 421 | // Alignment must also be consistent. A uint64_t after a uint32_t will pad |
bcwhite | e99be2d | 2016-11-09 19:39:41 | [diff] [blame] | 422 | // differently between 32 and 64 bit architectures. Either put the bigger |
| 423 | // elements first, group smaller elements into blocks the size of larger |
piman | 03cd21b | 2016-11-22 21:03:29 | [diff] [blame] | 424 | // elements, or manually insert padding fields as appropriate for the |
| 425 | // largest architecture, including at the end. |
| 426 | // |
Roger McFarlane | 43ef168 | 2025-01-31 18:30:37 | [diff] [blame] | 427 | // To protect against mistakes, all objects must have the attribute |
Roger McFarlane | 23479ae | 2024-12-10 20:31:23 | [diff] [blame] | 428 | // `kExpectedInstanceSize` (static constexpr size_t) that is a hard-coded |
piman | 03cd21b | 2016-11-22 21:03:29 | [diff] [blame] | 429 | // numerical value -- NNN, not sizeof(T) -- that can be tested. If the |
| 430 | // instance size is not fixed, at least one build will fail. |
| 431 | // |
| 432 | // If the size of a structure changes, the type-ID used to recognize it |
| 433 | // should also change so later versions of the code don't try to read |
| 434 | // incompatible structures from earlier versions. |
bcwhite | e99be2d | 2016-11-09 19:39:41 | [diff] [blame] | 435 | // |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 436 | // NOTE: Though this method will guarantee that an object of the specified |
| 437 | // type can be accessed without going outside the bounds of the memory |
| 438 | // segment, it makes no guarantees of the validity of the data within the |
| 439 | // object itself. If it is expected that the contents of the segment could |
| 440 | // be compromised with malicious intent, the object must be hardened as well. |
| 441 | // |
| 442 | // Though the persistent data may be "volatile" if it is shared with |
| 443 | // other processes, such is not necessarily the case. The internal |
| 444 | // "volatile" designation is discarded so as to not propagate the viral |
| 445 | // nature of that keyword to the caller. It can add it back, if necessary, |
| 446 | // based on knowledge of how the allocator is being used. |
| 447 | template <typename T> |
Roger McFarlane | 23479ae | 2024-12-10 20:31:23 | [diff] [blame] | 448 | T* GetAsObject(Reference ref, size_t* alloc_size = nullptr) { |
Andrew Rayskiy | 384fa27 | 2023-10-16 17:45:59 | [diff] [blame] | 449 | static_assert(std::is_standard_layout_v<T>, "only standard objects"); |
| 450 | static_assert(!std::is_array_v<T>, "use GetAsArray<>()"); |
piman | 03cd21b | 2016-11-22 21:03:29 | [diff] [blame] | 451 | static_assert(T::kExpectedInstanceSize == sizeof(T), "inconsistent size"); |
bcwhite | 3f999d3 | 2017-01-11 12:42:13 | [diff] [blame] | 452 | return const_cast<T*>(reinterpret_cast<volatile T*>( |
Roger McFarlane | 23479ae | 2024-12-10 20:31:23 | [diff] [blame] | 453 | GetBlockData(ref, T::kPersistentTypeId, sizeof(T), alloc_size))); |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 454 | } |
| 455 | template <typename T> |
Roger McFarlane | 23479ae | 2024-12-10 20:31:23 | [diff] [blame] | 456 | const T* GetAsObject(Reference ref, size_t* alloc_size = nullptr) const { |
Andrew Rayskiy | 384fa27 | 2023-10-16 17:45:59 | [diff] [blame] | 457 | static_assert(std::is_standard_layout_v<T>, "only standard objects"); |
| 458 | static_assert(!std::is_array_v<T>, "use GetAsArray<>()"); |
piman | 03cd21b | 2016-11-22 21:03:29 | [diff] [blame] | 459 | static_assert(T::kExpectedInstanceSize == sizeof(T), "inconsistent size"); |
bcwhite | 3f999d3 | 2017-01-11 12:42:13 | [diff] [blame] | 460 | return const_cast<const T*>(reinterpret_cast<const volatile T*>( |
Roger McFarlane | 23479ae | 2024-12-10 20:31:23 | [diff] [blame] | 461 | GetBlockData(ref, T::kPersistentTypeId, sizeof(T), alloc_size))); |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 462 | } |
| 463 | |
Roger McFarlane | 43ef168 | 2025-01-31 18:30:37 | [diff] [blame] | 464 | // Like GetAsObject() but returns an array of simple, fixed-size types. |
piman | 03cd21b | 2016-11-22 21:03:29 | [diff] [blame] | 465 | // |
Roger McFarlane | 23479ae | 2024-12-10 20:31:23 | [diff] [blame] | 466 | // Use a `count` of the required number of array elements, or kSizeAny. |
| 467 | // The, optionally returned, `alloc_size` can be used to calculate the upper |
| 468 | // bound but isn't reliable because padding can make space for extra elements |
| 469 | // that were not written. |
piman | 03cd21b | 2016-11-22 21:03:29 | [diff] [blame] | 470 | // |
| 471 | // Remember that an array of char is a string but may not be NUL terminated. |
| 472 | // |
| 473 | // There are no compile-time or run-time checks to ensure 32/64-bit size |
| 474 | // compatibilty when using these accessors. Only use fixed-size types such |
| 475 | // as char, float, double, or (u)intXX_t. |
| 476 | template <typename T> |
Roger McFarlane | 23479ae | 2024-12-10 20:31:23 | [diff] [blame] | 477 | T* GetAsArray(Reference ref, |
| 478 | uint32_t type_id, |
| 479 | size_t count, |
| 480 | size_t* alloc_size = nullptr) { |
Andrew Rayskiy | 384fa27 | 2023-10-16 17:45:59 | [diff] [blame] | 481 | static_assert(std::is_fundamental_v<T>, "use GetAsObject<>()"); |
piman | 03cd21b | 2016-11-22 21:03:29 | [diff] [blame] | 482 | return const_cast<T*>(reinterpret_cast<volatile T*>( |
Roger McFarlane | 23479ae | 2024-12-10 20:31:23 | [diff] [blame] | 483 | GetBlockData(ref, type_id, count * sizeof(T), alloc_size))); |
piman | 03cd21b | 2016-11-22 21:03:29 | [diff] [blame] | 484 | } |
| 485 | template <typename T> |
Roger McFarlane | 23479ae | 2024-12-10 20:31:23 | [diff] [blame] | 486 | const T* GetAsArray(Reference ref, |
| 487 | uint32_t type_id, |
| 488 | size_t count, |
| 489 | size_t* alloc_size = nullptr) const { |
Andrew Rayskiy | 384fa27 | 2023-10-16 17:45:59 | [diff] [blame] | 490 | static_assert(std::is_fundamental_v<T>, "use GetAsObject<>()"); |
piman | 03cd21b | 2016-11-22 21:03:29 | [diff] [blame] | 491 | return const_cast<const char*>(reinterpret_cast<const volatile T*>( |
Roger McFarlane | 23479ae | 2024-12-10 20:31:23 | [diff] [blame] | 492 | GetBlockData(ref, type_id, count * sizeof(T), alloc_size))); |
piman | 03cd21b | 2016-11-22 21:03:29 | [diff] [blame] | 493 | } |
| 494 | |
Roger McFarlane | 43ef168 | 2025-01-31 18:30:37 | [diff] [blame] | 495 | // Gets the corresponding reference for an object held in persistent memory. |
Roger McFarlane | 23479ae | 2024-12-10 20:31:23 | [diff] [blame] | 496 | // If the `memory` is not valid or the type does not match, a kReferenceNull |
bcwhite | 1010504 | 2016-11-29 00:09:55 | [diff] [blame] | 497 | // result will be returned. |
| 498 | Reference GetAsReference(const void* memory, uint32_t type_id) const; |
| 499 | |
Roger McFarlane | 43ef168 | 2025-01-31 18:30:37 | [diff] [blame] | 500 | // Accesses the internal "type" of an object. This generally isn't necessary |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 501 | // but can be used to "clear" the type and so effectively mark it as deleted |
bcwhite | 84a8e09 | 2016-06-03 15:19:31 | [diff] [blame] | 502 | // even though the memory stays valid and allocated. Changing the type is |
| 503 | // an atomic compare/exchange and so requires knowing the existing value. |
| 504 | // It will return false if the existing type is not what is expected. |
bcwhite | cf6a9e8 | 2017-02-09 20:44:23 | [diff] [blame] | 505 | // |
bcwhite | 3f999d3 | 2017-01-11 12:42:13 | [diff] [blame] | 506 | // Changing the type doesn't mean the data is compatible with the new type. |
Roger McFarlane | 23479ae | 2024-12-10 20:31:23 | [diff] [blame] | 507 | // Passing true for `clear` will zero the memory after the type has been |
| 508 | // changed away from `from_type_id` but before it becomes `to_type_id` meaning |
bcwhite | bee49a2 | 2017-03-10 18:58:48 | [diff] [blame] | 509 | // that it is done in a manner that is thread-safe. Memory is guaranteed to |
| 510 | // be zeroed atomically by machine-word in a monotonically increasing order. |
bcwhite | cf6a9e8 | 2017-02-09 20:44:23 | [diff] [blame] | 511 | // |
| 512 | // It will likely be necessary to reconstruct the type before it can be used. |
| 513 | // Changing the type WILL NOT invalidate existing pointers to the data, either |
| 514 | // in this process or others, so changing the data structure could have |
| 515 | // unpredicatable results. USE WITH CARE! |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 516 | uint32_t GetType(Reference ref) const; |
bcwhite | cf6a9e8 | 2017-02-09 20:44:23 | [diff] [blame] | 517 | bool ChangeType(Reference ref, |
| 518 | uint32_t to_type_id, |
| 519 | uint32_t from_type_id, |
| 520 | bool clear); |
bcwhite | 3f999d3 | 2017-01-11 12:42:13 | [diff] [blame] | 521 | |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 522 | // Allocated objects can be added to an internal list that can then be |
| 523 | // iterated over by other processes. If an allocated object can be found |
| 524 | // another way, such as by having its reference within a different object |
| 525 | // that will be made iterable, then this call is not necessary. This always |
| 526 | // succeeds unless corruption is detected; check IsCorrupted() to find out. |
| 527 | // Once an object is made iterable, its position in iteration can never |
| 528 | // change; new iterable objects will always be added after it in the series. |
bcwhite | 3f999d3 | 2017-01-11 12:42:13 | [diff] [blame] | 529 | // Changing the type does not alter its "iterable" status. |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 530 | void MakeIterable(Reference ref); |
| 531 | |
Roger McFarlane | 43ef168 | 2025-01-31 18:30:37 | [diff] [blame] | 532 | // Gets the information about the amount of free space in the allocator. The |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 533 | // amount of free space should be treated as approximate due to extras from |
| 534 | // alignment and metadata. Concurrent allocations from other threads will |
| 535 | // also make the true amount less than what is reported. |
| 536 | void GetMemoryInfo(MemoryInfo* meminfo) const; |
| 537 | |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 538 | // If there is some indication that the memory has become corrupted, |
| 539 | // calling this will attempt to prevent further damage by indicating to |
| 540 | // all processes that something is not as expected. |
Alexei Svitkine | 1be891d | 2023-08-31 21:57:45 | [diff] [blame] | 541 | // If `allow_write` is false, the corrupt bit will not be written to the data. |
| 542 | void SetCorrupt(bool allow_write = true) const; |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 543 | |
| 544 | // This can be called to determine if corruption has been detected in the |
| 545 | // segment, possibly my a malicious actor. Once detected, future allocations |
| 546 | // will fail and iteration may not locate all objects. |
| 547 | bool IsCorrupt() const; |
| 548 | |
| 549 | // Flag set if an allocation has failed because the memory segment was full. |
| 550 | bool IsFull() const; |
| 551 | |
| 552 | // Update those "tracking" histograms which do not get updates during regular |
| 553 | // operation, such as how much memory is currently used. This should be |
| 554 | // called before such information is to be displayed or uploaded. |
| 555 | void UpdateTrackingHistograms(); |
| 556 | |
bcwhite | cf6a9e8 | 2017-02-09 20:44:23 | [diff] [blame] | 557 | // While the above works much like malloc & free, these next methods provide |
| 558 | // an "object" interface similar to new and delete. |
| 559 | |
Roger McFarlane | 43ef168 | 2025-01-31 18:30:37 | [diff] [blame] | 560 | // Reserves space in the memory segment of the desired `size` and `type_id`. |
danakj | ad724a8 | 2024-01-25 17:37:40 | [diff] [blame] | 561 | // |
bcwhite | cf6a9e8 | 2017-02-09 20:44:23 | [diff] [blame] | 562 | // A return value of zero indicates the allocation failed, otherwise the |
| 563 | // returned reference can be used by any process to get a real pointer via |
Roger McFarlane | 23479ae | 2024-12-10 20:31:23 | [diff] [blame] | 564 | // the GetAsObject() or GetAsArray() calls. The actual allocated size may be |
bcwhite | 4cde5b7 | 2017-05-30 15:26:43 | [diff] [blame] | 565 | // larger and will always be a multiple of 8 bytes (64 bits). |
Roger McFarlane | 23479ae | 2024-12-10 20:31:23 | [diff] [blame] | 566 | Reference Allocate(size_t size, |
| 567 | uint32_t type_id, |
| 568 | size_t* alloc_size = nullptr); |
bcwhite | cf6a9e8 | 2017-02-09 20:44:23 | [diff] [blame] | 569 | |
Roger McFarlane | 43ef168 | 2025-01-31 18:30:37 | [diff] [blame] | 570 | // Allocates and constructs an object in persistent memory. The type must |
| 571 | // have both (size_t) kExpectedInstanceSize and (uint32_t) kPersistentTypeId |
bcwhite | cf6a9e8 | 2017-02-09 20:44:23 | [diff] [blame] | 572 | // static constexpr fields that are used to ensure compatibility between |
| 573 | // software versions. An optional size parameter can be specified to force |
| 574 | // the allocation to be bigger than the size of the object; this is useful |
| 575 | // when the last field is actually variable length. |
| 576 | template <typename T> |
| 577 | T* New(size_t size) { |
danakj | ad724a8 | 2024-01-25 17:37:40 | [diff] [blame] | 578 | static_assert(alignof(T) <= kAllocAlignment); |
Peter Kasting | 134ef9af | 2024-12-28 02:30:09 | [diff] [blame] | 579 | if (size < sizeof(T)) { |
bcwhite | cf6a9e8 | 2017-02-09 20:44:23 | [diff] [blame] | 580 | size = sizeof(T); |
Peter Kasting | 134ef9af | 2024-12-28 02:30:09 | [diff] [blame] | 581 | } |
bcwhite | cf6a9e8 | 2017-02-09 20:44:23 | [diff] [blame] | 582 | Reference ref = Allocate(size, T::kPersistentTypeId); |
| 583 | void* mem = |
| 584 | const_cast<void*>(GetBlockData(ref, T::kPersistentTypeId, size)); |
Peter Kasting | 134ef9af | 2024-12-28 02:30:09 | [diff] [blame] | 585 | if (!mem) { |
bcwhite | cf6a9e8 | 2017-02-09 20:44:23 | [diff] [blame] | 586 | return nullptr; |
Peter Kasting | 134ef9af | 2024-12-28 02:30:09 | [diff] [blame] | 587 | } |
brettw | 16289b3e | 2017-06-13 21:58:40 | [diff] [blame] | 588 | DCHECK_EQ(0U, reinterpret_cast<uintptr_t>(mem) & (alignof(T) - 1)); |
bcwhite | cf6a9e8 | 2017-02-09 20:44:23 | [diff] [blame] | 589 | return new (mem) T(); |
| 590 | } |
| 591 | template <typename T> |
| 592 | T* New() { |
| 593 | return New<T>(sizeof(T)); |
| 594 | } |
| 595 | |
| 596 | // Similar to New, above, but construct the object out of an existing memory |
Roger McFarlane | 23479ae | 2024-12-10 20:31:23 | [diff] [blame] | 597 | // block and of an expected type. If `clear` is true, memory will be zeroed |
bcwhite | cf6a9e8 | 2017-02-09 20:44:23 | [diff] [blame] | 598 | // before construction. Though this is not standard object behavior, it |
| 599 | // is present to match with new allocations that always come from zeroed |
| 600 | // memory. Anything previously present simply ceases to exist; no destructor |
| 601 | // is called for it so explicitly Delete() the old object first if need be. |
| 602 | // Calling this will not invalidate existing pointers to the object, either |
| 603 | // in this process or others, so changing the object could have unpredictable |
| 604 | // results. USE WITH CARE! |
| 605 | template <typename T> |
| 606 | T* New(Reference ref, uint32_t from_type_id, bool clear) { |
bcwhite | cf6a9e8 | 2017-02-09 20:44:23 | [diff] [blame] | 607 | // Make sure the memory is appropriate. This won't be used until after |
| 608 | // the type is changed but checking first avoids the possibility of having |
| 609 | // to change the type back. |
Roger McFarlane | 23479ae | 2024-12-10 20:31:23 | [diff] [blame] | 610 | size_t alloc_size = 0; |
| 611 | void* mem = const_cast<void*>(GetBlockData(ref, 0, sizeof(T), &alloc_size)); |
Peter Kasting | 134ef9af | 2024-12-28 02:30:09 | [diff] [blame] | 612 | if (!mem) { |
bcwhite | cf6a9e8 | 2017-02-09 20:44:23 | [diff] [blame] | 613 | return nullptr; |
Peter Kasting | 134ef9af | 2024-12-28 02:30:09 | [diff] [blame] | 614 | } |
Roger McFarlane | 23479ae | 2024-12-10 20:31:23 | [diff] [blame] | 615 | |
| 616 | DCHECK_LE(sizeof(T), alloc_size) << "alloc not big enough for obj"; |
| 617 | |
bcwhite | cf6a9e8 | 2017-02-09 20:44:23 | [diff] [blame] | 618 | // Ensure the allocator's internal alignment is sufficient for this object. |
| 619 | // This protects against coding errors in the allocator. |
brettw | 16289b3e | 2017-06-13 21:58:40 | [diff] [blame] | 620 | DCHECK_EQ(0U, reinterpret_cast<uintptr_t>(mem) & (alignof(T) - 1)); |
bcwhite | cf6a9e8 | 2017-02-09 20:44:23 | [diff] [blame] | 621 | // Change the type, clearing the memory if so desired. The new type is |
| 622 | // "transitioning" so that there is no race condition with the construction |
| 623 | // of the object should another thread be simultaneously iterating over |
| 624 | // data. This will "acquire" the memory so no changes get reordered before |
| 625 | // it. |
Peter Kasting | 134ef9af | 2024-12-28 02:30:09 | [diff] [blame] | 626 | if (!ChangeType(ref, kTypeIdTransitioning, from_type_id, clear)) { |
bcwhite | cf6a9e8 | 2017-02-09 20:44:23 | [diff] [blame] | 627 | return nullptr; |
Peter Kasting | 134ef9af | 2024-12-28 02:30:09 | [diff] [blame] | 628 | } |
bcwhite | cf6a9e8 | 2017-02-09 20:44:23 | [diff] [blame] | 629 | // Construct an object of the desired type on this memory, just as if |
| 630 | // New() had been called to create it. |
| 631 | T* obj = new (mem) T(); |
| 632 | // Finally change the type to the desired one. This will "release" all of |
| 633 | // the changes above and so provide a consistent view to other threads. |
| 634 | bool success = |
| 635 | ChangeType(ref, T::kPersistentTypeId, kTypeIdTransitioning, false); |
| 636 | DCHECK(success); |
| 637 | return obj; |
| 638 | } |
| 639 | |
| 640 | // Deletes an object by destructing it and then changing the type to a |
| 641 | // different value (default 0). |
| 642 | template <typename T> |
| 643 | void Delete(T* obj, uint32_t new_type) { |
| 644 | // Get the reference for the object. |
| 645 | Reference ref = GetAsReference<T>(obj); |
| 646 | // First change the type to "transitioning" so there is no race condition |
| 647 | // where another thread could find the object through iteration while it |
| 648 | // is been destructed. This will "acquire" the memory so no changes get |
Roger McFarlane | 23479ae | 2024-12-10 20:31:23 | [diff] [blame] | 649 | // reordered before it. It will fail if `ref` is invalid. |
Peter Kasting | 134ef9af | 2024-12-28 02:30:09 | [diff] [blame] | 650 | if (!ChangeType(ref, kTypeIdTransitioning, T::kPersistentTypeId, false)) { |
bcwhite | cf6a9e8 | 2017-02-09 20:44:23 | [diff] [blame] | 651 | return; |
Peter Kasting | 134ef9af | 2024-12-28 02:30:09 | [diff] [blame] | 652 | } |
bcwhite | cf6a9e8 | 2017-02-09 20:44:23 | [diff] [blame] | 653 | // Destruct the object. |
| 654 | obj->~T(); |
| 655 | // Finally change the type to the desired value. This will "release" all |
| 656 | // the changes above. |
| 657 | bool success = ChangeType(ref, new_type, kTypeIdTransitioning, false); |
| 658 | DCHECK(success); |
| 659 | } |
| 660 | template <typename T> |
| 661 | void Delete(T* obj) { |
| 662 | Delete<T>(obj, 0); |
| 663 | } |
| 664 | |
| 665 | // As above but works with objects allocated from persistent memory. |
| 666 | template <typename T> |
| 667 | Reference GetAsReference(const T* obj) const { |
| 668 | return GetAsReference(obj, T::kPersistentTypeId); |
| 669 | } |
| 670 | |
| 671 | // As above but works with an object allocated from persistent memory. |
| 672 | template <typename T> |
| 673 | void MakeIterable(const T* obj) { |
| 674 | MakeIterable(GetAsReference<T>(obj)); |
| 675 | } |
| 676 | |
Roger McFarlane | 43ef168 | 2025-01-31 18:30:37 | [diff] [blame] | 677 | // Returns a string_view of a c-style string that is located at the end of an |
| 678 | // allocated memory block. It is the caller's responsibility to know/ensure |
| 679 | // that `object` is of some type that ends with a c-style string and that |
| 680 | // said string begins at `offset` bytes from `object`. `alloc_size` must be |
| 681 | // the size of the allocation, as returned by the allocator. If `object` is |
| 682 | // `nullptr` or `offset >= alloc_size` then an empty string_view is returned. |
| 683 | // Users should treat the returned view as a volatile bounded memory region; |
| 684 | // it references the underlying shared memory, whose contents can be changed |
| 685 | // or corrupted at any time. In particular, clients should respect the |
| 686 | // 'length()' of the returned view instead of relying on a terminating NUL |
| 687 | // char. |
| 688 | static std::string_view StringViewAt(const void* object, |
| 689 | size_t offset, |
| 690 | size_t alloc_size); |
| 691 | |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 692 | protected: |
bcwhite | cd4923d | 2016-09-23 18:30:03 | [diff] [blame] | 693 | enum MemoryType { |
| 694 | MEM_EXTERNAL, |
| 695 | MEM_MALLOC, |
| 696 | MEM_VIRTUAL, |
| 697 | MEM_SHARED, |
| 698 | MEM_FILE, |
| 699 | }; |
| 700 | |
| 701 | struct Memory { |
| 702 | Memory(void* b, MemoryType t) : base(b), type(t) {} |
| 703 | |
Keishi Hattori | e175ac5 | 2022-06-07 06:24:57 | [diff] [blame] | 704 | raw_ptr<void> base; |
bcwhite | cd4923d | 2016-09-23 18:30:03 | [diff] [blame] | 705 | MemoryType type; |
| 706 | }; |
| 707 | |
| 708 | // Constructs the allocator. Everything is the same as the public allocator |
Roger McFarlane | 23479ae | 2024-12-10 20:31:23 | [diff] [blame] | 709 | // except `memory` which is a structure with additional information besides |
bcwhite | cd4923d | 2016-09-23 18:30:03 | [diff] [blame] | 710 | // the base address. |
Alexei Svitkine | 50fa812 | 2023-08-29 21:01:58 | [diff] [blame] | 711 | PersistentMemoryAllocator(Memory memory, |
| 712 | size_t size, |
| 713 | size_t page_size, |
| 714 | uint64_t id, |
Helmut Januschka | 274c380 | 2024-03-12 23:31:29 | [diff] [blame] | 715 | std::string_view name, |
Alexei Svitkine | 50fa812 | 2023-08-29 21:01:58 | [diff] [blame] | 716 | AccessMode access_mode); |
bcwhite | cd4923d | 2016-09-23 18:30:03 | [diff] [blame] | 717 | |
bcwhite | 42561dc | 2017-03-16 18:35:24 | [diff] [blame] | 718 | // Implementation of Flush that accepts how much to flush. |
| 719 | virtual void FlushPartial(size_t length, bool sync); |
| 720 | |
Bartek Nowierski | 424d8a3b | 2024-06-07 04:58:41 | [diff] [blame] | 721 | // RAW_PTR_EXCLUSION: Never allocated by PartitionAlloc (always mmap'ed), so |
| 722 | // there is no benefit to using a raw_ptr, only cost. |
Keishi Hattori | 130ae0a | 2023-03-27 03:48:47 | [diff] [blame] | 723 | RAW_PTR_EXCLUSION volatile char* const |
Peter Kasting | 134ef9af | 2024-12-28 02:30:09 | [diff] [blame] | 724 | mem_base_; // Memory base. (char so sizeof guaranteed 1) |
| 725 | const MemoryType mem_type_; // Type of memory allocation. |
| 726 | const uint32_t mem_size_; // Size of entire memory segment. |
| 727 | const uint32_t mem_page_; // Page size allocations shouldn't cross. |
| 728 | const size_t vm_page_size_; // The page size used by the OS. |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 729 | |
| 730 | private: |
| 731 | struct SharedMetadata; |
| 732 | struct BlockHeader; |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 733 | static const Reference kReferenceQueue; |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 734 | |
| 735 | // The shared metadata is always located at the top of the memory segment. |
| 736 | // These convenience functions eliminate constant casting of the base |
| 737 | // pointer within the code. |
bcwhite | c03fc0a | 2016-02-05 01:18:03 | [diff] [blame] | 738 | const SharedMetadata* shared_meta() const { |
| 739 | return reinterpret_cast<const SharedMetadata*>( |
| 740 | const_cast<const char*>(mem_base_)); |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 741 | } |
bcwhite | c03fc0a | 2016-02-05 01:18:03 | [diff] [blame] | 742 | SharedMetadata* shared_meta() { |
| 743 | return reinterpret_cast<SharedMetadata*>(const_cast<char*>(mem_base_)); |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 744 | } |
| 745 | |
| 746 | // Actual method for doing the allocation. |
Roger McFarlane | 23479ae | 2024-12-10 20:31:23 | [diff] [blame] | 747 | Reference AllocateImpl(size_t size, uint32_t type_id, size_t* alloc_size); |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 748 | |
Roger McFarlane | 23479ae | 2024-12-10 20:31:23 | [diff] [blame] | 749 | // Dereferences a block `ref` to retrieve a pointer to the block header for |
| 750 | // the reference. This method ensures that the referenced block is valid for |
| 751 | // the desired `type_id` and `size`. Optionally, if `alloc_sizes` is not |
| 752 | // nullptr, the validated size of the underlying allocation is returned. |
| 753 | // |
| 754 | // Special cases for internal use only: |
| 755 | // |
| 756 | // * If `queue_ok` is true and `ref` is kReferenceQueueindicates then the |
| 757 | // block header for the allocation queue is returned. |
| 758 | // |
| 759 | // * if `free_ok` then the block header is allowed to point to a block that |
| 760 | // may not be in the `allocated` state. This bypasses block validation. |
| 761 | // |
| 762 | // Because they bypass block valoidation, it is not premitted to request the |
| 763 | // `alloc_size` when either of `queue_ok` or `free_ok` are true. |
Peter Kasting | 8ff3ed8 | 2022-05-28 20:38:18 | [diff] [blame] | 764 | const volatile BlockHeader* GetBlock(Reference ref, |
| 765 | uint32_t type_id, |
| 766 | size_t size, |
| 767 | bool queue_ok, |
Roger McFarlane | 23479ae | 2024-12-10 20:31:23 | [diff] [blame] | 768 | bool free_ok, |
| 769 | size_t* alloc_size = nullptr) const; |
Peter Kasting | 8ff3ed8 | 2022-05-28 20:38:18 | [diff] [blame] | 770 | volatile BlockHeader* GetBlock(Reference ref, |
| 771 | uint32_t type_id, |
| 772 | size_t size, |
| 773 | bool queue_ok, |
Roger McFarlane | 23479ae | 2024-12-10 20:31:23 | [diff] [blame] | 774 | bool free_ok, |
| 775 | size_t* alloc_size = nullptr) { |
Peter Kasting | 8ff3ed8 | 2022-05-28 20:38:18 | [diff] [blame] | 776 | return const_cast<volatile BlockHeader*>( |
| 777 | const_cast<const PersistentMemoryAllocator*>(this)->GetBlock( |
Roger McFarlane | 23479ae | 2024-12-10 20:31:23 | [diff] [blame] | 778 | ref, type_id, size, queue_ok, free_ok, alloc_size)); |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 779 | } |
| 780 | |
Luc Nguyen | 510c3645 | 2023-06-29 08:24:10 | [diff] [blame] | 781 | // Gets the actual data within a block associated with a specific reference. |
Peter Kasting | 8ff3ed8 | 2022-05-28 20:38:18 | [diff] [blame] | 782 | const volatile void* GetBlockData(Reference ref, |
| 783 | uint32_t type_id, |
Roger McFarlane | 23479ae | 2024-12-10 20:31:23 | [diff] [blame] | 784 | size_t size, |
| 785 | size_t* alloc_size = nullptr) const; |
| 786 | volatile void* GetBlockData(Reference ref, |
| 787 | uint32_t type_id, |
| 788 | size_t size, |
| 789 | size_t* alloc_size = nullptr) { |
Peter Kasting | 8ff3ed8 | 2022-05-28 20:38:18 | [diff] [blame] | 790 | return const_cast<volatile void*>( |
| 791 | const_cast<const PersistentMemoryAllocator*>(this)->GetBlockData( |
Roger McFarlane | 23479ae | 2024-12-10 20:31:23 | [diff] [blame] | 792 | ref, type_id, size, alloc_size)); |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 793 | } |
| 794 | |
Luc Nguyen | 510c3645 | 2023-06-29 08:24:10 | [diff] [blame] | 795 | // Returns the offset to the first free space segment. |
| 796 | uint32_t freeptr() const; |
| 797 | |
| 798 | // Returns the metadata version used in this allocator. |
| 799 | uint32_t version() const; |
| 800 | |
Alexei Svitkine | 50fa812 | 2023-08-29 21:01:58 | [diff] [blame] | 801 | const AccessMode access_mode_; |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 802 | |
Alexei Svitkine | 50fa812 | 2023-08-29 21:01:58 | [diff] [blame] | 803 | // Local version of "corrupted" flag. |
| 804 | mutable std::atomic<bool> corrupt_ = false; |
| 805 | |
Alexei Svitkine | 50fa812 | 2023-08-29 21:01:58 | [diff] [blame] | 806 | // Histogram recording used space. |
| 807 | raw_ptr<HistogramBase> used_histogram_ = nullptr; |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 808 | |
Roger McFarlane | 43ef168 | 2025-01-31 18:30:37 | [diff] [blame] | 809 | // TODO(crbug.com/40064026): Remove these. They are used to investigate |
| 810 | // unexpected failures and code paths. |
Luc Nguyen | 6511d101 | 2024-02-09 19:48:14 | [diff] [blame] | 811 | friend class DelayedPersistentAllocation; |
Luc Nguyen | 510c3645 | 2023-06-29 08:24:10 | [diff] [blame] | 812 | friend class metrics::FileMetricsProvider; |
Roger McFarlane | 43ef168 | 2025-01-31 18:30:37 | [diff] [blame] | 813 | void DumpWithoutCrashing(Reference ref, |
| 814 | uint32_t expected_type, |
| 815 | size_t expected_size, |
| 816 | bool dump_block_header) const; |
Luc Nguyen | 6511d101 | 2024-02-09 19:48:14 | [diff] [blame] | 817 | |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 818 | friend class PersistentMemoryAllocatorTest; |
| 819 | FRIEND_TEST_ALL_PREFIXES(PersistentMemoryAllocatorTest, AllocateAndIterate); |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 820 | }; |
| 821 | |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 822 | // This allocator uses a local memory block it allocates from the general |
| 823 | // heap. It is generally used when some kind of "death rattle" handler will |
| 824 | // save the contents to persistent storage during process shutdown. It is |
| 825 | // also useful for testing. |
| 826 | class BASE_EXPORT LocalPersistentMemoryAllocator |
| 827 | : public PersistentMemoryAllocator { |
| 828 | public: |
Helmut Januschka | 274c380 | 2024-03-12 23:31:29 | [diff] [blame] | 829 | LocalPersistentMemoryAllocator(size_t size, |
| 830 | uint64_t id, |
| 831 | std::string_view name); |
Peter Boström | 7319bbd | 2021-09-15 22:59:38 | [diff] [blame] | 832 | |
| 833 | LocalPersistentMemoryAllocator(const LocalPersistentMemoryAllocator&) = |
| 834 | delete; |
| 835 | LocalPersistentMemoryAllocator& operator=( |
| 836 | const LocalPersistentMemoryAllocator&) = delete; |
| 837 | |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 838 | ~LocalPersistentMemoryAllocator() override; |
| 839 | |
| 840 | private: |
Roger McFarlane | 23479ae | 2024-12-10 20:31:23 | [diff] [blame] | 841 | // Allocates a block of local memory of the specified `size`, ensuring that |
bcwhite | eda1a9978 | 2016-06-02 19:27:17 | [diff] [blame] | 842 | // the memory will not be physically allocated until accessed and will read |
| 843 | // as zero when that happens. |
Helmut Januschka | 274c380 | 2024-03-12 23:31:29 | [diff] [blame] | 844 | static Memory AllocateLocalMemory(size_t size, std::string_view name); |
bcwhite | eda1a9978 | 2016-06-02 19:27:17 | [diff] [blame] | 845 | |
Roger McFarlane | 23479ae | 2024-12-10 20:31:23 | [diff] [blame] | 846 | // Deallocates a block of local `memory` of the specified `size`. |
bcwhite | cd4923d | 2016-09-23 18:30:03 | [diff] [blame] | 847 | static void DeallocateLocalMemory(void* memory, size_t size, MemoryType type); |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 848 | }; |
| 849 | |
Alexandr Ilin | 027ca3d3 | 2019-02-12 18:37:33 | [diff] [blame] | 850 | // This allocator takes a writable shared memory mapping object and performs |
| 851 | // allocation from it. The allocator takes ownership of the mapping object. |
| 852 | class BASE_EXPORT WritableSharedPersistentMemoryAllocator |
| 853 | : public PersistentMemoryAllocator { |
| 854 | public: |
| 855 | WritableSharedPersistentMemoryAllocator( |
| 856 | base::WritableSharedMemoryMapping memory, |
| 857 | uint64_t id, |
Helmut Januschka | 274c380 | 2024-03-12 23:31:29 | [diff] [blame] | 858 | std::string_view name); |
Peter Boström | 7319bbd | 2021-09-15 22:59:38 | [diff] [blame] | 859 | |
| 860 | WritableSharedPersistentMemoryAllocator( |
| 861 | const WritableSharedPersistentMemoryAllocator&) = delete; |
| 862 | WritableSharedPersistentMemoryAllocator& operator=( |
| 863 | const WritableSharedPersistentMemoryAllocator&) = delete; |
| 864 | |
Alexandr Ilin | 027ca3d3 | 2019-02-12 18:37:33 | [diff] [blame] | 865 | ~WritableSharedPersistentMemoryAllocator() override; |
| 866 | |
| 867 | // Ensure that the memory isn't so invalid that it would crash when passing it |
| 868 | // to the allocator. This doesn't guarantee the data is valid, just that it |
| 869 | // won't cause the program to abort. The existing IsCorrupt() call will handle |
| 870 | // the rest. |
| 871 | static bool IsSharedMemoryAcceptable( |
| 872 | const base::WritableSharedMemoryMapping& memory); |
| 873 | |
| 874 | private: |
| 875 | base::WritableSharedMemoryMapping shared_memory_; |
Alexandr Ilin | 027ca3d3 | 2019-02-12 18:37:33 | [diff] [blame] | 876 | }; |
| 877 | |
| 878 | // This allocator takes a read-only shared memory mapping object and performs |
| 879 | // allocation from it. The allocator takes ownership of the mapping object. |
| 880 | class BASE_EXPORT ReadOnlySharedPersistentMemoryAllocator |
| 881 | : public PersistentMemoryAllocator { |
| 882 | public: |
| 883 | ReadOnlySharedPersistentMemoryAllocator( |
| 884 | base::ReadOnlySharedMemoryMapping memory, |
| 885 | uint64_t id, |
Helmut Januschka | 274c380 | 2024-03-12 23:31:29 | [diff] [blame] | 886 | std::string_view name); |
Peter Boström | 7319bbd | 2021-09-15 22:59:38 | [diff] [blame] | 887 | |
| 888 | ReadOnlySharedPersistentMemoryAllocator( |
| 889 | const ReadOnlySharedPersistentMemoryAllocator&) = delete; |
| 890 | ReadOnlySharedPersistentMemoryAllocator& operator=( |
| 891 | const ReadOnlySharedPersistentMemoryAllocator&) = delete; |
| 892 | |
Alexandr Ilin | 027ca3d3 | 2019-02-12 18:37:33 | [diff] [blame] | 893 | ~ReadOnlySharedPersistentMemoryAllocator() override; |
| 894 | |
| 895 | // Ensure that the memory isn't so invalid that it would crash when passing it |
| 896 | // to the allocator. This doesn't guarantee the data is valid, just that it |
| 897 | // won't cause the program to abort. The existing IsCorrupt() call will handle |
| 898 | // the rest. |
| 899 | static bool IsSharedMemoryAcceptable( |
| 900 | const base::ReadOnlySharedMemoryMapping& memory); |
| 901 | |
| 902 | private: |
| 903 | base::ReadOnlySharedMemoryMapping shared_memory_; |
Alexandr Ilin | 027ca3d3 | 2019-02-12 18:37:33 | [diff] [blame] | 904 | }; |
bcwhite | 5451c58 | 2016-02-12 18:47:15 | [diff] [blame] | 905 | |
Xiaohan Wang | 7b270e34 | 2022-01-15 19:36:03 | [diff] [blame] | 906 | // NACL doesn't support any kind of file access in build. |
| 907 | #if !BUILDFLAG(IS_NACL) |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 908 | // This allocator takes a memory-mapped file object and performs allocation |
bcwhite | 34229a8 | 2016-05-26 23:24:32 | [diff] [blame] | 909 | // from it. The allocator takes ownership of the file object. |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 910 | class BASE_EXPORT FilePersistentMemoryAllocator |
| 911 | : public PersistentMemoryAllocator { |
| 912 | public: |
Roger McFarlane | 23479ae | 2024-12-10 20:31:23 | [diff] [blame] | 913 | // A `max_size` of zero will use the length of the file as the maximum |
| 914 | // size. The `file` object must have been already created with sufficient |
bcwhite | 34229a8 | 2016-05-26 23:24:32 | [diff] [blame] | 915 | // permissions (read, read/write, or read/write/extend). |
dcheng | 093de9b | 2016-04-04 21:25:51 | [diff] [blame] | 916 | FilePersistentMemoryAllocator(std::unique_ptr<MemoryMappedFile> file, |
bcwhite | 34229a8 | 2016-05-26 23:24:32 | [diff] [blame] | 917 | size_t max_size, |
dcheng | 093de9b | 2016-04-04 21:25:51 | [diff] [blame] | 918 | uint64_t id, |
Helmut Januschka | 274c380 | 2024-03-12 23:31:29 | [diff] [blame] | 919 | std::string_view name, |
Alexei Svitkine | 50fa812 | 2023-08-29 21:01:58 | [diff] [blame] | 920 | AccessMode access_mode); |
Peter Boström | 7319bbd | 2021-09-15 22:59:38 | [diff] [blame] | 921 | |
| 922 | FilePersistentMemoryAllocator(const FilePersistentMemoryAllocator&) = delete; |
| 923 | FilePersistentMemoryAllocator& operator=( |
| 924 | const FilePersistentMemoryAllocator&) = delete; |
| 925 | |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 926 | ~FilePersistentMemoryAllocator() override; |
| 927 | |
bcwhite | cd4923d | 2016-09-23 18:30:03 | [diff] [blame] | 928 | // Ensure that the file isn't so invalid that it would crash when passing it |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 929 | // to the allocator. This doesn't guarantee the file is valid, just that it |
bcwhite | 5451c58 | 2016-02-12 18:47:15 | [diff] [blame] | 930 | // won't cause the program to abort. The existing IsCorrupt() call will handle |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 931 | // the rest. |
bcwhite | 34229a8 | 2016-05-26 23:24:32 | [diff] [blame] | 932 | static bool IsFileAcceptable(const MemoryMappedFile& file, bool read_only); |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 933 | |
Brian White | ce24fb3 | 2018-09-18 18:45:51 | [diff] [blame] | 934 | // Load all or a portion of the file into memory for fast access. This can |
| 935 | // be used to force the disk access to be done on a background thread and |
| 936 | // then have the data available to be read on the main thread with a greatly |
| 937 | // reduced risk of blocking due to I/O. The risk isn't eliminated completely |
| 938 | // because the system could always release the memory when under pressure |
| 939 | // but this can happen to any block of memory (i.e. swapped out). |
| 940 | void Cache(); |
| 941 | |
bcwhite | 42561dc | 2017-03-16 18:35:24 | [diff] [blame] | 942 | protected: |
| 943 | // PersistentMemoryAllocator: |
| 944 | void FlushPartial(size_t length, bool sync) override; |
| 945 | |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 946 | private: |
dcheng | 093de9b | 2016-04-04 21:25:51 | [diff] [blame] | 947 | std::unique_ptr<MemoryMappedFile> mapped_file_; |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 948 | }; |
Xiaohan Wang | 7b270e34 | 2022-01-15 19:36:03 | [diff] [blame] | 949 | #endif // !BUILDFLAG(IS_NACL) |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 950 | |
bcwhite | 1166f8d | 2017-04-21 17:19:03 | [diff] [blame] | 951 | // An allocation that is defined but not executed until required at a later |
| 952 | // time. This allows for potential users of an allocation to be decoupled |
| 953 | // from the logic that defines it. In addition, there can be multiple users |
| 954 | // of the same allocation or any region thereof that are guaranteed to always |
| 955 | // use the same space. It's okay to copy/move these objects. |
| 956 | // |
| 957 | // This is a top-level class instead of an inner class of the PMA so that it |
| 958 | // can be forward-declared in other header files without the need to include |
| 959 | // the full contents of this file. |
| 960 | class BASE_EXPORT DelayedPersistentAllocation { |
| 961 | public: |
| 962 | using Reference = PersistentMemoryAllocator::Reference; |
| 963 | |
Roger McFarlane | 23479ae | 2024-12-10 20:31:23 | [diff] [blame] | 964 | // Creates a delayed allocation using the specified `allocator`. When |
| 965 | // needed, the memory will be allocated using the specified `type` and |
| 966 | // `size`. If `offset` is given, the returned pointer will be at that |
bcwhite | 1166f8d | 2017-04-21 17:19:03 | [diff] [blame] | 967 | // offset into the segment; this allows combining allocations into a |
| 968 | // single persistent segment to reduce overhead and means an "all or |
Roger McFarlane | 23479ae | 2024-12-10 20:31:23 | [diff] [blame] | 969 | // nothing" request. Note that `size` is always the total memory size |
| 970 | // and `offset` is just indicating the start of a block within it. |
bcwhite | 1166f8d | 2017-04-21 17:19:03 | [diff] [blame] | 971 | // |
Roger McFarlane | 23479ae | 2024-12-10 20:31:23 | [diff] [blame] | 972 | // Once allocated, a reference to the segment will be stored at `ref`. |
bcwhite | 1166f8d | 2017-04-21 17:19:03 | [diff] [blame] | 973 | // This shared location must be initialized to zero (0); it is checked |
| 974 | // with every Get() request to see if the allocation has already been |
Roger McFarlane | 23479ae | 2024-12-10 20:31:23 | [diff] [blame] | 975 | // done. If reading `ref` outside of this object, be sure to do an |
bcwhite | 17aab96 | 2017-05-15 16:43:29 | [diff] [blame] | 976 | // "acquire" load. Don't write to it -- leave that to this object. |
bcwhite | 1166f8d | 2017-04-21 17:19:03 | [diff] [blame] | 977 | DelayedPersistentAllocation(PersistentMemoryAllocator* allocator, |
| 978 | std::atomic<Reference>* ref, |
| 979 | uint32_t type, |
| 980 | size_t size, |
Alexei Svitkine | 86a6972 | 2023-03-16 18:43:37 | [diff] [blame] | 981 | size_t offset = 0); |
bcwhite | 1166f8d | 2017-04-21 17:19:03 | [diff] [blame] | 982 | ~DelayedPersistentAllocation(); |
| 983 | |
danakj | ad724a8 | 2024-01-25 17:37:40 | [diff] [blame] | 984 | // Gets a span to the defined allocation. This will realize the request |
bcwhite | 1166f8d | 2017-04-21 17:19:03 | [diff] [blame] | 985 | // and update the reference provided during construction. The memory will |
| 986 | // be zeroed the first time it is returned, after that it is shared with |
| 987 | // all other Get() requests and so shows any changes made to it elsewhere. |
| 988 | // |
danakj | ad724a8 | 2024-01-25 17:37:40 | [diff] [blame] | 989 | // If the allocation fails for any reason, an empty span will be returned. |
| 990 | // This works even on "const" objects because the allocation is already |
| 991 | // defined, just delayed. |
| 992 | template <typename T> |
| 993 | span<T> Get() const { |
| 994 | // PersistentMemoryAllocator only supports types with alignment at most |
| 995 | // kAllocAlignment. |
| 996 | static_assert(alignof(T) <= PersistentMemoryAllocator::kAllocAlignment); |
| 997 | // The offset must be a multiple of the alignment or misaligned pointers |
| 998 | // will result. |
| 999 | CHECK_EQ(offset_ % alignof(T), 0u); |
| 1000 | span<uint8_t> untyped = GetUntyped(); |
Tom Sepez | e6f11a55 | 2025-02-18 19:01:51 | [diff] [blame] | 1001 | return UNSAFE_TODO( |
| 1002 | span(reinterpret_cast<T*>(untyped.data()), untyped.size() / sizeof(T))); |
danakj | ad724a8 | 2024-01-25 17:37:40 | [diff] [blame] | 1003 | } |
bcwhite | 1166f8d | 2017-04-21 17:19:03 | [diff] [blame] | 1004 | |
| 1005 | // Gets the internal reference value. If this returns a non-zero value then |
| 1006 | // a subsequent call to Get() will do nothing but convert that reference into |
| 1007 | // a memory location -- useful for accessing an existing allocation without |
| 1008 | // creating one unnecessarily. |
| 1009 | Reference reference() const { |
| 1010 | return reference_->load(std::memory_order_relaxed); |
| 1011 | } |
| 1012 | |
| 1013 | private: |
danakj | ad724a8 | 2024-01-25 17:37:40 | [diff] [blame] | 1014 | span<uint8_t> GetUntyped() const; |
| 1015 | |
bcwhite | 1166f8d | 2017-04-21 17:19:03 | [diff] [blame] | 1016 | // The underlying object that does the actual allocation of memory. Its |
| 1017 | // lifetime must exceed that of all DelayedPersistentAllocation objects |
| 1018 | // that use it. |
Rushan Suleymanov | feb5185c | 2023-10-30 19:02:33 | [diff] [blame] | 1019 | const raw_ptr<PersistentMemoryAllocator> allocator_; |
bcwhite | 1166f8d | 2017-04-21 17:19:03 | [diff] [blame] | 1020 | |
| 1021 | // The desired type and size of the allocated segment plus the offset |
| 1022 | // within it for the defined request. |
| 1023 | const uint32_t type_; |
bcwhite | 4e72a15 | 2017-07-04 21:13:15 | [diff] [blame] | 1024 | const uint32_t size_; |
| 1025 | const uint32_t offset_; |
bcwhite | 1166f8d | 2017-04-21 17:19:03 | [diff] [blame] | 1026 | |
bcwhite | 1166f8d | 2017-04-21 17:19:03 | [diff] [blame] | 1027 | // The location at which a reference to the allocated segment is to be |
| 1028 | // stored once the allocation is complete. If multiple delayed allocations |
| 1029 | // share the same pointer then an allocation on one will amount to an |
| 1030 | // allocation for all. |
Rushan Suleymanov | feb5185c | 2023-10-30 19:02:33 | [diff] [blame] | 1031 | const raw_ptr<volatile std::atomic<Reference>, AllowPtrArithmetic> reference_; |
bcwhite | 1166f8d | 2017-04-21 17:19:03 | [diff] [blame] | 1032 | |
| 1033 | // No DISALLOW_COPY_AND_ASSIGN as it's okay to copy/move these objects. |
| 1034 | }; |
| 1035 | |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 1036 | } // namespace base |
| 1037 | |
| 1038 | #endif // BASE_METRICS_PERSISTENT_MEMORY_ALLOCATOR_H_ |