blob: 42dee558f7ec76a2ba7b7f1636c9ef9d84d84156 [file] [log] [blame]
Avi Drissmane4622aa2022-09-08 20:36:061// Copyright 2015 The Chromium Authors
bcwhite34ae4982016-01-20 13:44:462// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef BASE_METRICS_PERSISTENT_MEMORY_ALLOCATOR_H_
6#define BASE_METRICS_PERSISTENT_MEMORY_ALLOCATOR_H_
7
8#include <stdint.h>
dcheng093de9b2016-04-04 21:25:519
bcwhite34ae4982016-01-20 13:44:4610#include <atomic>
dcheng093de9b2016-04-04 21:25:5111#include <memory>
Helmut Januschka274c3802024-03-12 23:31:2912#include <string_view>
piman03cd21b2016-11-22 21:03:2913#include <type_traits>
bcwhite34ae4982016-01-20 13:44:4614
15#include "base/atomicops.h"
16#include "base/base_export.h"
David Sanders8cfb63a2022-04-14 19:36:3017#include "base/check.h"
Daniel Cheng610a205c2025-06-07 21:04:5118#include "base/check_op.h"
Tom Sepeze6f11a552025-02-18 19:01:5119#include "base/compiler_specific.h"
danakjad724a82024-01-25 17:37:4020#include "base/containers/span.h"
bcwhite65e57d02016-05-13 14:39:4021#include "base/files/file_path.h"
bcwhite34ae4982016-01-20 13:44:4622#include "base/gtest_prod_util.h"
Keishi Hattori0e45c022021-11-27 09:25:5223#include "base/memory/raw_ptr.h"
Keishi Hattori130ae0a2023-03-27 03:48:4724#include "base/memory/raw_ptr_exclusion.h"
Alexandr Ilin027ca3d32019-02-12 18:37:3325#include "base/memory/shared_memory_mapping.h"
Xiaohan Wang7b270e342022-01-15 19:36:0326#include "build/build_config.h"
bcwhite34ae4982016-01-20 13:44:4627
Luc Nguyen510c36452023-06-29 08:24:1028namespace metrics {
29class FileMetricsProvider;
30}
31
bcwhite34ae4982016-01-20 13:44:4632namespace base {
33
34class HistogramBase;
35class MemoryMappedFile;
36
37// Simple allocator for pieces of a memory block that may be persistent
38// to some storage or shared across multiple processes. This class resides
39// under base/metrics because it was written for that purpose. It is,
40// however, fully general-purpose and can be freely moved to base/memory
41// if other uses are found.
42//
43// This class provides for thread-secure (i.e. safe against other threads
44// or processes that may be compromised and thus have malicious intent)
45// allocation of memory within a designated block and also a mechanism by
46// which other threads can learn of these allocations.
47//
48// There is (currently) no way to release an allocated block of data because
49// doing so would risk invalidating pointers held by other processes and
50// greatly complicate the allocation algorithm.
51//
52// Construction of this object can accept new, clean (i.e. zeroed) memory
53// or previously initialized memory. In the first case, construction must
54// be allowed to complete before letting other allocators attach to the same
55// segment. In other words, don't share the segment until at least one
56// allocator has been attached to it.
57//
58// Note that memory not in active use is not accessed so it is possible to
59// use virtual memory, including memory-mapped files, as backing storage with
60// the OS "pinning" new (zeroed) physical RAM pages only as they are needed.
piman03cd21b2016-11-22 21:03:2961//
bcwhite3f999d32017-01-11 12:42:1362// OBJECTS: Although the allocator can be used in a "malloc" sense, fetching
63// character arrays and manipulating that memory manually, the better way is
bcwhitecf6a9e82017-02-09 20:44:2364// generally to use the "object" methods to create and manage allocations. In
bcwhite3f999d32017-01-11 12:42:1365// this way the sizing, type-checking, and construction are all automatic. For
66// this to work, however, every type of stored object must define two public
67// "constexpr" values, kPersistentTypeId and kExpectedInstanceSize, as such:
piman03cd21b2016-11-22 21:03:2968//
bcwhite3f999d32017-01-11 12:42:1369// struct MyPersistentObjectType {
70// // SHA1(MyPersistentObjectType): Increment this if structure changes!
71// static constexpr uint32_t kPersistentTypeId = 0x3E15F6DE + 1;
72//
73// // Expected size for 32/64-bit check. Update this if structure changes!
74// static constexpr size_t kExpectedInstanceSize = 20;
75//
76// ...
77// };
78//
79// kPersistentTypeId: This value is an arbitrary identifier that allows the
80// identification of these objects in the allocator, including the ability
81// to find them via iteration. The number is arbitrary but using the first
82// four bytes of the SHA1 hash of the type name means that there shouldn't
83// be any conflicts with other types that may also be stored in the memory.
84// The fully qualified name (e.g. base::debug::MyPersistentObjectType) could
85// be used to generate the hash if the type name seems common. Use a command
86// like this to get the hash: echo -n "MyPersistentObjectType" | sha1sum
87// If the structure layout changes, ALWAYS increment this number so that
88// newer versions of the code don't try to interpret persistent data written
89// by older versions with a different layout.
90//
91// kExpectedInstanceSize: This value is the hard-coded number that matches
92// what sizeof(T) would return. By providing it explicitly, the allocator can
93// verify that the structure is compatible between both 32-bit and 64-bit
94// versions of the code.
95//
bcwhitecf6a9e82017-02-09 20:44:2396// Using New manages the memory and then calls the default constructor for the
97// object. Given that objects are persistent, no destructor is ever called
98// automatically though a caller can explicitly call Delete to destruct it and
99// change the type to something indicating it is no longer in use.
bcwhite3f999d32017-01-11 12:42:13100//
101// Though persistent memory segments are transferrable between programs built
102// for different natural word widths, they CANNOT be exchanged between CPUs
103// of different endianess. Attempts to do so will simply see the existing data
104// as corrupt and refuse to access any of it.
bcwhite34ae4982016-01-20 13:44:46105class BASE_EXPORT PersistentMemoryAllocator {
106 public:
107 typedef uint32_t Reference;
108
danakjad724a82024-01-25 17:37:40109 // All allocations and data-structures must be aligned to this byte boundary.
110 // Alignment as large as the physical bus between CPU and RAM is _required_
111 // for some architectures, is simply more efficient on other CPUs, and
112 // generally a Good Idea(tm) for all platforms as it reduces/eliminates the
113 // chance that a type will span cache lines. Alignment mustn't be less
114 // than 8 to ensure proper alignment for all types. The rest is a balance
115 // between reducing spans across multiple cache lines and wasted space spent
116 // padding out allocations. An alignment of 16 would ensure that the block
117 // header structure always sits in a single cache line. An average of about
118 // 1/2 this value will be wasted with every allocation.
119 static constexpr size_t kAllocAlignment = 8;
120
bcwhite42561dc2017-03-16 18:35:24121 // These states are used to indicate the overall condition of the memory
122 // segment irrespective of what is stored within it. Because the data is
123 // often persistent and thus needs to be readable by different versions of
124 // a program, these values are fixed and can never change.
125 enum MemoryState : uint8_t {
126 // Persistent memory starts all zeros and so shows "uninitialized".
127 MEMORY_UNINITIALIZED = 0,
128
129 // The header has been written and the memory is ready for use.
130 MEMORY_INITIALIZED = 1,
131
132 // The data should be considered deleted. This would be set when the
133 // allocator is being cleaned up. If file-backed, the file is likely
134 // to be deleted but since deletion can fail for a variety of reasons,
135 // having this extra status means a future reader can realize what
136 // should have happened.
137 MEMORY_DELETED = 2,
138
Luc Nguyen510c36452023-06-29 08:24:10139 // The data should be considered complete. This is usually set when the
140 // browser is going to exit to indicate that it terminated cleanly and that
141 // the memory should be well-formed. In theory, this is not perfect as it is
142 // possible for the browser/device to crash after this has been set, but in
143 // practice this should be a reasonable indication as to whether the data
144 // comes from a completed or crashed session (if file-backed). Note that
145 // this might not be set on certain platforms (e.g. Android, iOS) due to not
146 // having a guaranteed clean shutdown path.
147 MEMORY_COMPLETED = 3,
148
bcwhite42561dc2017-03-16 18:35:24149 // Outside code can create states starting with this number; these too
150 // must also never change between code versions.
151 MEMORY_USER_DEFINED = 100,
152 };
153
bcwhitef2462022016-04-06 15:39:01154 // Iterator for going through all iterable memory records in an allocator.
155 // Like the allocator itself, iterators are lock-free and thread-secure.
156 // That means that multiple threads can share an iterator and the same
157 // reference will not be returned twice.
158 //
bcwhitedadd3152016-10-13 23:49:14159 // The order of the items returned by an iterator matches the order in which
160 // MakeIterable() was called on them. Once an allocation is made iterable,
161 // it is always such so the only possible difference between successive
162 // iterations is for more to be added to the end.
163 //
bcwhitef2462022016-04-06 15:39:01164 // Iteration, in general, is tolerant of corrupted memory. It will return
165 // what it can and stop only when corruption forces it to. Bad corruption
166 // could cause the same object to be returned many times but it will
167 // eventually quit.
168 class BASE_EXPORT Iterator {
bcwhite34ae4982016-01-20 13:44:46169 public:
Roger McFarlane23479ae2024-12-10 20:31:23170 // Constructs an iterator on a given `allocator`, starting at the beginning.
bcwhitef2462022016-04-06 15:39:01171 // The allocator must live beyond the lifetime of the iterator. This class
172 // has read-only access to the allocator (hence "const") but the returned
173 // references can be used on a read/write version, too.
174 explicit Iterator(const PersistentMemoryAllocator* allocator);
bcwhite34ae4982016-01-20 13:44:46175
Roger McFarlane23479ae2024-12-10 20:31:23176 // As above but resuming from the `starting_after` reference. The first call
bcwhitef2462022016-04-06 15:39:01177 // to GetNext() will return the next object found after that reference. The
178 // reference must be to an "iterable" object; references to non-iterable
179 // objects (those that never had MakeIterable() called for them) will cause
180 // a run-time error.
181 Iterator(const PersistentMemoryAllocator* allocator,
182 Reference starting_after);
bcwhite34ae4982016-01-20 13:44:46183
Peter Boström75cd3c02021-09-28 15:23:18184 Iterator(const Iterator&) = delete;
185 Iterator& operator=(const Iterator&) = delete;
186
Keishi Hattoricbd9cfa2021-11-23 17:38:14187 ~Iterator();
188
bcwhitedadd3152016-10-13 23:49:14189 // Resets the iterator back to the beginning.
190 void Reset();
191
Roger McFarlane23479ae2024-12-10 20:31:23192 // Resets the iterator, resuming from the `starting_after` reference.
bcwhitedadd3152016-10-13 23:49:14193 void Reset(Reference starting_after);
194
195 // Returns the previously retrieved reference, or kReferenceNull if none.
196 // If constructor or reset with a starting_after location, this will return
197 // that value.
198 Reference GetLast();
199
Roger McFarlane23479ae2024-12-10 20:31:23200 // Gets the next iterable, storing that type in `type_return`. The actual
bcwhitef2462022016-04-06 15:39:01201 // return value is a reference to the allocation inside the allocator or
202 // zero if there are no more. GetNext() may still be called again at a
203 // later time to retrieve any new allocations that have been added.
Roger McFarlane23479ae2024-12-10 20:31:23204 Reference GetNext(uint32_t* type_return, size_t* alloc_size = nullptr);
bcwhitef2462022016-04-06 15:39:01205
Roger McFarlane23479ae2024-12-10 20:31:23206 // Similar to above but gets the next iterable of a specific `type_match`.
bcwhitef2462022016-04-06 15:39:01207 // This should not be mixed with calls to GetNext() because any allocations
208 // skipped here due to a type mis-match will never be returned by later
209 // calls to GetNext() meaning it's possible to completely miss entries.
Roger McFarlane23479ae2024-12-10 20:31:23210 Reference GetNextOfType(uint32_t type_match, size_t* alloc_size = nullptr);
bcwhitef2462022016-04-06 15:39:01211
bcwhite3f999d32017-01-11 12:42:13212 // As above but works using object type.
213 template <typename T>
214 Reference GetNextOfType() {
215 return GetNextOfType(T::kPersistentTypeId);
216 }
217
218 // As above but works using objects and returns null if not found.
219 template <typename T>
220 const T* GetNextOfObject() {
221 return GetAsObject<T>(GetNextOfType<T>());
222 }
223
bcwhitef2462022016-04-06 15:39:01224 // Converts references to objects. This is a convenience method so that
225 // users of the iterator don't need to also have their own pointer to the
226 // allocator over which the iterator runs in order to retrieve objects.
227 // Because the iterator is not read/write, only "const" objects can be
228 // fetched. Non-const objects can be fetched using the reference on a
229 // non-const (external) pointer to the same allocator (or use const_cast
230 // to remove the qualifier).
231 template <typename T>
bcwhite3f999d32017-01-11 12:42:13232 const T* GetAsObject(Reference ref) const {
233 return allocator_->GetAsObject<T>(ref);
bcwhitef2462022016-04-06 15:39:01234 }
bcwhite34ae4982016-01-20 13:44:46235
bcwhite3f999d32017-01-11 12:42:13236 // Similar to GetAsObject() but converts references to arrays of things.
piman03cd21b2016-11-22 21:03:29237 template <typename T>
238 const T* GetAsArray(Reference ref, uint32_t type_id, size_t count) const {
239 return allocator_->GetAsArray<T>(ref, type_id, count);
240 }
241
bcwhite3f999d32017-01-11 12:42:13242 // Convert a generic pointer back into a reference. A null reference will
Roger McFarlane23479ae2024-12-10 20:31:23243 // be returned if `memory` is not inside the persistent segment or does not
244 // point to an object of the specified `type_id`.
bcwhite3f999d32017-01-11 12:42:13245 Reference GetAsReference(const void* memory, uint32_t type_id) const {
246 return allocator_->GetAsReference(memory, type_id);
247 }
248
249 // As above but convert an object back into a reference.
250 template <typename T>
251 Reference GetAsReference(const T* obj) const {
252 return allocator_->GetAsReference(obj);
253 }
254
bcwhite34ae4982016-01-20 13:44:46255 private:
bcwhitef2462022016-04-06 15:39:01256 // Weak-pointer to memory allocator being iterated over.
Keishi Hattori0e45c022021-11-27 09:25:52257 raw_ptr<const PersistentMemoryAllocator> allocator_;
bcwhite34ae4982016-01-20 13:44:46258
bcwhitef2462022016-04-06 15:39:01259 // The last record that was returned.
260 std::atomic<Reference> last_record_;
261
262 // The number of records found; used for detecting loops.
263 std::atomic<uint32_t> record_count_;
bcwhite34ae4982016-01-20 13:44:46264 };
265
266 // Returned information about the internal state of the heap.
267 struct MemoryInfo {
268 size_t total;
269 size_t free;
270 };
271
bcwhiteb0bb9192016-04-18 01:33:10272 enum : Reference {
bcwhite3f999d32017-01-11 12:42:13273 // A common "null" reference value.
274 kReferenceNull = 0,
bcwhitecf6a9e82017-02-09 20:44:23275 };
bcwhiteb0bb9192016-04-18 01:33:10276
bcwhitecf6a9e82017-02-09 20:44:23277 enum : uint32_t {
bcwhitebee49a22017-03-10 18:58:48278 // A value that will match any type when doing lookups.
279 kTypeIdAny = 0x00000000,
280
bcwhite3f999d32017-01-11 12:42:13281 // A value indicating that the type is in transition. Work is being done
282 // on the contents to prepare it for a new type to come.
bcwhitecf6a9e82017-02-09 20:44:23283 kTypeIdTransitioning = 0xFFFFFFFF,
bcwhite34ae4982016-01-20 13:44:46284 };
285
piman03cd21b2016-11-22 21:03:29286 enum : size_t {
287 kSizeAny = 1 // Constant indicating that any array size is acceptable.
288 };
289
Alexei Svitkine1be891d2023-08-31 21:57:45290 // Indicates the mode for accessing the underlying data.
Alexei Svitkine50fa8122023-08-29 21:01:58291 enum AccessMode {
292 kReadOnly,
293 kReadWrite,
Alexei Svitkine1be891d2023-08-31 21:57:45294 // Open existing initialized data in R/W mode. If the passed data appears to
295 // not have been initialized, does not write to it and instead marks the
296 // allocator as corrupt (without writing anything to the underlying data.)
297 kReadWriteExisting,
Alexei Svitkine50fa8122023-08-29 21:01:58298 };
299
bcwhite65e57d02016-05-13 14:39:40300 // This is the standard file extension (suitable for being passed to the
301 // AddExtension() method of base::FilePath) for dumps of persistent memory.
302 static const base::FilePath::CharType kFileExtension[];
303
bcwhite34ae4982016-01-20 13:44:46304 // The allocator operates on any arbitrary block of memory. Creation and
305 // persisting or sharing of that block with another process is the
306 // responsibility of the caller. The allocator needs to know only the
Roger McFarlane23479ae2024-12-10 20:31:23307 // block's `base` address, the total `size` of the block, and any internal
308 // `page` size (zero if not paged) across which allocations should not span.
309 // The `id` is an arbitrary value the caller can use to identify a
bcwhite34ae4982016-01-20 13:44:46310 // particular memory segment. It will only be loaded during the initial
311 // creation of the segment and can be checked by the caller for consistency.
Roger McFarlane23479ae2024-12-10 20:31:23312 // The `name`, if provided, is used to distinguish histograms for this
bcwhite34ae4982016-01-20 13:44:46313 // allocator. Only the primary owner of the segment should define this value;
Alexei Svitkine50fa8122023-08-29 21:01:58314 // other processes can learn it from the shared state. If the access mode
315 // is kReadOnly then no changes will be made to it. The resulting object
316 // should be stored as a "const" pointer.
bcwhite34ae4982016-01-20 13:44:46317 //
318 // PersistentMemoryAllocator does NOT take ownership of the memory block.
319 // The caller must manage it and ensure it stays available throughout the
320 // lifetime of this object.
321 //
322 // Memory segments for sharing must have had an allocator attached to them
323 // before actually being shared. If the memory segment was just created, it
324 // should be zeroed before being passed here. If it was an existing segment,
325 // the values here will be compared to copies stored in the shared segment
326 // as a guard against corruption.
327 //
328 // Make sure that the memory segment is acceptable (see IsMemoryAcceptable()
329 // method below) before construction if the definition of the segment can
330 // vary in any way at run-time. Invalid memory segments will cause a crash.
Alexei Svitkine50fa8122023-08-29 21:01:58331 PersistentMemoryAllocator(void* base,
332 size_t size,
333 size_t page_size,
334 uint64_t id,
Helmut Januschka274c3802024-03-12 23:31:29335 std::string_view name,
Alexei Svitkine50fa8122023-08-29 21:01:58336 AccessMode access_mode);
Peter Boström75cd3c02021-09-28 15:23:18337
338 PersistentMemoryAllocator(const PersistentMemoryAllocator&) = delete;
339 PersistentMemoryAllocator& operator=(const PersistentMemoryAllocator&) =
340 delete;
341
bcwhite34ae4982016-01-20 13:44:46342 virtual ~PersistentMemoryAllocator();
343
Roger McFarlane43ef1682025-01-31 18:30:37344 // Checks if memory segment is acceptable for creation of an Allocator. This
bcwhite34ae4982016-01-20 13:44:46345 // doesn't do any analysis of the data and so doesn't guarantee that the
346 // contents are valid, just that the paramaters won't cause the program to
347 // abort. The IsCorrupt() method will report detection of data problems
348 // found during construction and general operation.
Peter Kasting134ef9af2024-12-28 02:30:09349 static bool IsMemoryAcceptable(const void* data,
350 size_t size,
351 size_t page_size,
352 bool readonly);
bcwhite34ae4982016-01-20 13:44:46353
Roger McFarlane43ef1682025-01-31 18:30:37354 // Returns the internal identifier for this persistent memory segment.
bcwhite34ae4982016-01-20 13:44:46355 uint64_t Id() const;
356
Roger McFarlane43ef1682025-01-31 18:30:37357 // Returns the internal name of this allocator (possibly an empty string).
358 // The returned string_view references a bounded span within the shared
359 // memory region. As such, it should be treated as a volatile but bounded
360 // block of memory. In particular, clients should respect the 'length()' of
361 // the returned view instead of relying on a terminating NUL char.
362 std::string_view Name() const;
bcwhite34ae4982016-01-20 13:44:46363
364 // Is this segment open only for read?
Alexei Svitkine50fa8122023-08-29 21:01:58365 bool IsReadonly() const { return access_mode_ == kReadOnly; }
bcwhite42561dc2017-03-16 18:35:24366
367 // Manage the saved state of the memory.
368 void SetMemoryState(uint8_t memory_state);
369 uint8_t GetMemoryState() const;
bcwhite34ae4982016-01-20 13:44:46370
Roger McFarlane43ef1682025-01-31 18:30:37371 // Creates internal histograms for tracking memory use and allocation sizes
Roger McFarlane23479ae2024-12-10 20:31:23372 // for allocator of `name` (which can simply be the result of Name()). This
373 // is done separately from construction for situations such as when the
bcwhite34ae4982016-01-20 13:44:46374 // histograms will be backed by memory provided by this very allocator.
bcwhite3779f982016-02-11 22:37:01375 //
Peter Kasting977345832021-07-29 14:05:59376 // IMPORTANT: tools/metrics/histograms/metadata/uma/histograms.xml must
Roger McFarlane23479ae2024-12-10 20:31:23377 // be updated with the following histograms for each `name` param:
bcwhitecc51fdc2017-01-25 16:45:25378 // UMA.PersistentAllocator.name.Errors
bcwhite3779f982016-02-11 22:37:01379 // UMA.PersistentAllocator.name.UsedPct
Helmut Januschka274c3802024-03-12 23:31:29380 void CreateTrackingHistograms(std::string_view name);
bcwhite34ae4982016-01-20 13:44:46381
bcwhite42561dc2017-03-16 18:35:24382 // Flushes the persistent memory to any backing store. This typically does
383 // nothing but is used by the FilePersistentMemoryAllocator to inform the
384 // OS that all the data should be sent to the disk immediately. This is
385 // useful in the rare case where something has just been stored that needs
386 // to survive a hard shutdown of the machine like from a power failure.
Roger McFarlane23479ae2024-12-10 20:31:23387 // The `sync` parameter indicates if this call should block until the flush
bcwhite42561dc2017-03-16 18:35:24388 // is complete but is only advisory and may or may not have an effect
389 // depending on the capabilities of the OS. Synchronous flushes are allowed
Roger McFarlane23479ae2024-12-10 20:31:23390 // only from threads that are allowed to do I/O but since `sync` is only
Brian White1d226b12017-10-18 15:28:41391 // advisory, all flushes should be done on IO-capable threads.
Roger McFarlane23479ae2024-12-10 20:31:23392 // TODO: Since `sync` is ignored on Windows, consider making it re-post on a
393 // background thread with `sync` set to true so that `sync` is not just
Luc Nguyen6c617492023-08-10 20:26:49394 // advisory.
bcwhite42561dc2017-03-16 18:35:24395 void Flush(bool sync);
396
bcwhite34ae4982016-01-20 13:44:46397 // Direct access to underlying memory segment. If the segment is shared
398 // across threads or processes, reading data through these values does
399 // not guarantee consistency. Use with care. Do not write.
400 const void* data() const { return const_cast<const char*>(mem_base_); }
401 size_t length() const { return mem_size_; }
bcwhite65e57d02016-05-13 14:39:40402 size_t size() const { return mem_size_; }
Roger McFarlane43ef1682025-01-31 18:30:37403 size_t page_size() const { return mem_page_; }
bcwhite34ae4982016-01-20 13:44:46404 size_t used() const;
405
Roger McFarlane43ef1682025-01-31 18:30:37406 // Returns the object referenced by a `ref`. For safety reasons, the `type_id`
Roger McFarlane23479ae2024-12-10 20:31:23407 // code and size-of(`T`) are compared to ensure the reference is valid
408 // and cannot return an object outside of the memory segment. A `type_id` of
bcwhite34ae4982016-01-20 13:44:46409 // kTypeIdAny (zero) will match any though the size is still checked. NULL is
410 // returned if any problem is detected, such as corrupted storage or incorrect
411 // parameters. Callers MUST check that the returned value is not-null EVERY
412 // TIME before accessing it or risk crashing! Once dereferenced, the pointer
413 // is safe to reuse forever.
414 //
piman03cd21b2016-11-22 21:03:29415 // It is essential that the object be of a fixed size. All fields must be of
416 // a defined type that does not change based on the compiler or the CPU
417 // natural word size. Acceptable are char, float, double, and (u)intXX_t.
418 // Unacceptable are int, bool, and wchar_t which are implementation defined
419 // with regards to their size.
bcwhitee99be2d2016-11-09 19:39:41420 //
piman03cd21b2016-11-22 21:03:29421 // Alignment must also be consistent. A uint64_t after a uint32_t will pad
bcwhitee99be2d2016-11-09 19:39:41422 // differently between 32 and 64 bit architectures. Either put the bigger
423 // elements first, group smaller elements into blocks the size of larger
piman03cd21b2016-11-22 21:03:29424 // elements, or manually insert padding fields as appropriate for the
425 // largest architecture, including at the end.
426 //
Roger McFarlane43ef1682025-01-31 18:30:37427 // To protect against mistakes, all objects must have the attribute
Roger McFarlane23479ae2024-12-10 20:31:23428 // `kExpectedInstanceSize` (static constexpr size_t) that is a hard-coded
piman03cd21b2016-11-22 21:03:29429 // numerical value -- NNN, not sizeof(T) -- that can be tested. If the
430 // instance size is not fixed, at least one build will fail.
431 //
432 // If the size of a structure changes, the type-ID used to recognize it
433 // should also change so later versions of the code don't try to read
434 // incompatible structures from earlier versions.
bcwhitee99be2d2016-11-09 19:39:41435 //
bcwhite34ae4982016-01-20 13:44:46436 // NOTE: Though this method will guarantee that an object of the specified
437 // type can be accessed without going outside the bounds of the memory
438 // segment, it makes no guarantees of the validity of the data within the
439 // object itself. If it is expected that the contents of the segment could
440 // be compromised with malicious intent, the object must be hardened as well.
441 //
442 // Though the persistent data may be "volatile" if it is shared with
443 // other processes, such is not necessarily the case. The internal
444 // "volatile" designation is discarded so as to not propagate the viral
445 // nature of that keyword to the caller. It can add it back, if necessary,
446 // based on knowledge of how the allocator is being used.
447 template <typename T>
Roger McFarlane23479ae2024-12-10 20:31:23448 T* GetAsObject(Reference ref, size_t* alloc_size = nullptr) {
Andrew Rayskiy384fa272023-10-16 17:45:59449 static_assert(std::is_standard_layout_v<T>, "only standard objects");
450 static_assert(!std::is_array_v<T>, "use GetAsArray<>()");
piman03cd21b2016-11-22 21:03:29451 static_assert(T::kExpectedInstanceSize == sizeof(T), "inconsistent size");
bcwhite3f999d32017-01-11 12:42:13452 return const_cast<T*>(reinterpret_cast<volatile T*>(
Roger McFarlane23479ae2024-12-10 20:31:23453 GetBlockData(ref, T::kPersistentTypeId, sizeof(T), alloc_size)));
bcwhite34ae4982016-01-20 13:44:46454 }
455 template <typename T>
Roger McFarlane23479ae2024-12-10 20:31:23456 const T* GetAsObject(Reference ref, size_t* alloc_size = nullptr) const {
Andrew Rayskiy384fa272023-10-16 17:45:59457 static_assert(std::is_standard_layout_v<T>, "only standard objects");
458 static_assert(!std::is_array_v<T>, "use GetAsArray<>()");
piman03cd21b2016-11-22 21:03:29459 static_assert(T::kExpectedInstanceSize == sizeof(T), "inconsistent size");
bcwhite3f999d32017-01-11 12:42:13460 return const_cast<const T*>(reinterpret_cast<const volatile T*>(
Roger McFarlane23479ae2024-12-10 20:31:23461 GetBlockData(ref, T::kPersistentTypeId, sizeof(T), alloc_size)));
bcwhite34ae4982016-01-20 13:44:46462 }
463
Roger McFarlane43ef1682025-01-31 18:30:37464 // Like GetAsObject() but returns an array of simple, fixed-size types.
piman03cd21b2016-11-22 21:03:29465 //
Roger McFarlane23479ae2024-12-10 20:31:23466 // Use a `count` of the required number of array elements, or kSizeAny.
467 // The, optionally returned, `alloc_size` can be used to calculate the upper
468 // bound but isn't reliable because padding can make space for extra elements
469 // that were not written.
piman03cd21b2016-11-22 21:03:29470 //
471 // Remember that an array of char is a string but may not be NUL terminated.
472 //
473 // There are no compile-time or run-time checks to ensure 32/64-bit size
474 // compatibilty when using these accessors. Only use fixed-size types such
475 // as char, float, double, or (u)intXX_t.
476 template <typename T>
Roger McFarlane23479ae2024-12-10 20:31:23477 T* GetAsArray(Reference ref,
478 uint32_t type_id,
479 size_t count,
480 size_t* alloc_size = nullptr) {
Andrew Rayskiy384fa272023-10-16 17:45:59481 static_assert(std::is_fundamental_v<T>, "use GetAsObject<>()");
piman03cd21b2016-11-22 21:03:29482 return const_cast<T*>(reinterpret_cast<volatile T*>(
Roger McFarlane23479ae2024-12-10 20:31:23483 GetBlockData(ref, type_id, count * sizeof(T), alloc_size)));
piman03cd21b2016-11-22 21:03:29484 }
485 template <typename T>
Roger McFarlane23479ae2024-12-10 20:31:23486 const T* GetAsArray(Reference ref,
487 uint32_t type_id,
488 size_t count,
489 size_t* alloc_size = nullptr) const {
Andrew Rayskiy384fa272023-10-16 17:45:59490 static_assert(std::is_fundamental_v<T>, "use GetAsObject<>()");
piman03cd21b2016-11-22 21:03:29491 return const_cast<const char*>(reinterpret_cast<const volatile T*>(
Roger McFarlane23479ae2024-12-10 20:31:23492 GetBlockData(ref, type_id, count * sizeof(T), alloc_size)));
piman03cd21b2016-11-22 21:03:29493 }
494
Roger McFarlane43ef1682025-01-31 18:30:37495 // Gets the corresponding reference for an object held in persistent memory.
Roger McFarlane23479ae2024-12-10 20:31:23496 // If the `memory` is not valid or the type does not match, a kReferenceNull
bcwhite10105042016-11-29 00:09:55497 // result will be returned.
498 Reference GetAsReference(const void* memory, uint32_t type_id) const;
499
Roger McFarlane43ef1682025-01-31 18:30:37500 // Accesses the internal "type" of an object. This generally isn't necessary
bcwhite34ae4982016-01-20 13:44:46501 // but can be used to "clear" the type and so effectively mark it as deleted
bcwhite84a8e092016-06-03 15:19:31502 // even though the memory stays valid and allocated. Changing the type is
503 // an atomic compare/exchange and so requires knowing the existing value.
504 // It will return false if the existing type is not what is expected.
bcwhitecf6a9e82017-02-09 20:44:23505 //
bcwhite3f999d32017-01-11 12:42:13506 // Changing the type doesn't mean the data is compatible with the new type.
Roger McFarlane23479ae2024-12-10 20:31:23507 // Passing true for `clear` will zero the memory after the type has been
508 // changed away from `from_type_id` but before it becomes `to_type_id` meaning
bcwhitebee49a22017-03-10 18:58:48509 // that it is done in a manner that is thread-safe. Memory is guaranteed to
510 // be zeroed atomically by machine-word in a monotonically increasing order.
bcwhitecf6a9e82017-02-09 20:44:23511 //
512 // It will likely be necessary to reconstruct the type before it can be used.
513 // Changing the type WILL NOT invalidate existing pointers to the data, either
514 // in this process or others, so changing the data structure could have
515 // unpredicatable results. USE WITH CARE!
bcwhite34ae4982016-01-20 13:44:46516 uint32_t GetType(Reference ref) const;
bcwhitecf6a9e82017-02-09 20:44:23517 bool ChangeType(Reference ref,
518 uint32_t to_type_id,
519 uint32_t from_type_id,
520 bool clear);
bcwhite3f999d32017-01-11 12:42:13521
bcwhite34ae4982016-01-20 13:44:46522 // Allocated objects can be added to an internal list that can then be
523 // iterated over by other processes. If an allocated object can be found
524 // another way, such as by having its reference within a different object
525 // that will be made iterable, then this call is not necessary. This always
526 // succeeds unless corruption is detected; check IsCorrupted() to find out.
527 // Once an object is made iterable, its position in iteration can never
528 // change; new iterable objects will always be added after it in the series.
bcwhite3f999d32017-01-11 12:42:13529 // Changing the type does not alter its "iterable" status.
bcwhite34ae4982016-01-20 13:44:46530 void MakeIterable(Reference ref);
531
Roger McFarlane43ef1682025-01-31 18:30:37532 // Gets the information about the amount of free space in the allocator. The
bcwhite34ae4982016-01-20 13:44:46533 // amount of free space should be treated as approximate due to extras from
534 // alignment and metadata. Concurrent allocations from other threads will
535 // also make the true amount less than what is reported.
536 void GetMemoryInfo(MemoryInfo* meminfo) const;
537
bcwhite34ae4982016-01-20 13:44:46538 // If there is some indication that the memory has become corrupted,
539 // calling this will attempt to prevent further damage by indicating to
540 // all processes that something is not as expected.
Alexei Svitkine1be891d2023-08-31 21:57:45541 // If `allow_write` is false, the corrupt bit will not be written to the data.
542 void SetCorrupt(bool allow_write = true) const;
bcwhite34ae4982016-01-20 13:44:46543
544 // This can be called to determine if corruption has been detected in the
545 // segment, possibly my a malicious actor. Once detected, future allocations
546 // will fail and iteration may not locate all objects.
547 bool IsCorrupt() const;
548
549 // Flag set if an allocation has failed because the memory segment was full.
550 bool IsFull() const;
551
552 // Update those "tracking" histograms which do not get updates during regular
553 // operation, such as how much memory is currently used. This should be
554 // called before such information is to be displayed or uploaded.
555 void UpdateTrackingHistograms();
556
bcwhitecf6a9e82017-02-09 20:44:23557 // While the above works much like malloc & free, these next methods provide
558 // an "object" interface similar to new and delete.
559
Roger McFarlane43ef1682025-01-31 18:30:37560 // Reserves space in the memory segment of the desired `size` and `type_id`.
danakjad724a82024-01-25 17:37:40561 //
bcwhitecf6a9e82017-02-09 20:44:23562 // A return value of zero indicates the allocation failed, otherwise the
563 // returned reference can be used by any process to get a real pointer via
Roger McFarlane23479ae2024-12-10 20:31:23564 // the GetAsObject() or GetAsArray() calls. The actual allocated size may be
bcwhite4cde5b72017-05-30 15:26:43565 // larger and will always be a multiple of 8 bytes (64 bits).
Roger McFarlane23479ae2024-12-10 20:31:23566 Reference Allocate(size_t size,
567 uint32_t type_id,
568 size_t* alloc_size = nullptr);
bcwhitecf6a9e82017-02-09 20:44:23569
Roger McFarlane43ef1682025-01-31 18:30:37570 // Allocates and constructs an object in persistent memory. The type must
571 // have both (size_t) kExpectedInstanceSize and (uint32_t) kPersistentTypeId
bcwhitecf6a9e82017-02-09 20:44:23572 // static constexpr fields that are used to ensure compatibility between
573 // software versions. An optional size parameter can be specified to force
574 // the allocation to be bigger than the size of the object; this is useful
575 // when the last field is actually variable length.
576 template <typename T>
577 T* New(size_t size) {
danakjad724a82024-01-25 17:37:40578 static_assert(alignof(T) <= kAllocAlignment);
Peter Kasting134ef9af2024-12-28 02:30:09579 if (size < sizeof(T)) {
bcwhitecf6a9e82017-02-09 20:44:23580 size = sizeof(T);
Peter Kasting134ef9af2024-12-28 02:30:09581 }
bcwhitecf6a9e82017-02-09 20:44:23582 Reference ref = Allocate(size, T::kPersistentTypeId);
583 void* mem =
584 const_cast<void*>(GetBlockData(ref, T::kPersistentTypeId, size));
Peter Kasting134ef9af2024-12-28 02:30:09585 if (!mem) {
bcwhitecf6a9e82017-02-09 20:44:23586 return nullptr;
Peter Kasting134ef9af2024-12-28 02:30:09587 }
brettw16289b3e2017-06-13 21:58:40588 DCHECK_EQ(0U, reinterpret_cast<uintptr_t>(mem) & (alignof(T) - 1));
bcwhitecf6a9e82017-02-09 20:44:23589 return new (mem) T();
590 }
591 template <typename T>
592 T* New() {
593 return New<T>(sizeof(T));
594 }
595
596 // Similar to New, above, but construct the object out of an existing memory
Roger McFarlane23479ae2024-12-10 20:31:23597 // block and of an expected type. If `clear` is true, memory will be zeroed
bcwhitecf6a9e82017-02-09 20:44:23598 // before construction. Though this is not standard object behavior, it
599 // is present to match with new allocations that always come from zeroed
600 // memory. Anything previously present simply ceases to exist; no destructor
601 // is called for it so explicitly Delete() the old object first if need be.
602 // Calling this will not invalidate existing pointers to the object, either
603 // in this process or others, so changing the object could have unpredictable
604 // results. USE WITH CARE!
605 template <typename T>
606 T* New(Reference ref, uint32_t from_type_id, bool clear) {
bcwhitecf6a9e82017-02-09 20:44:23607 // Make sure the memory is appropriate. This won't be used until after
608 // the type is changed but checking first avoids the possibility of having
609 // to change the type back.
Roger McFarlane23479ae2024-12-10 20:31:23610 size_t alloc_size = 0;
611 void* mem = const_cast<void*>(GetBlockData(ref, 0, sizeof(T), &alloc_size));
Peter Kasting134ef9af2024-12-28 02:30:09612 if (!mem) {
bcwhitecf6a9e82017-02-09 20:44:23613 return nullptr;
Peter Kasting134ef9af2024-12-28 02:30:09614 }
Roger McFarlane23479ae2024-12-10 20:31:23615
616 DCHECK_LE(sizeof(T), alloc_size) << "alloc not big enough for obj";
617
bcwhitecf6a9e82017-02-09 20:44:23618 // Ensure the allocator's internal alignment is sufficient for this object.
619 // This protects against coding errors in the allocator.
brettw16289b3e2017-06-13 21:58:40620 DCHECK_EQ(0U, reinterpret_cast<uintptr_t>(mem) & (alignof(T) - 1));
bcwhitecf6a9e82017-02-09 20:44:23621 // Change the type, clearing the memory if so desired. The new type is
622 // "transitioning" so that there is no race condition with the construction
623 // of the object should another thread be simultaneously iterating over
624 // data. This will "acquire" the memory so no changes get reordered before
625 // it.
Peter Kasting134ef9af2024-12-28 02:30:09626 if (!ChangeType(ref, kTypeIdTransitioning, from_type_id, clear)) {
bcwhitecf6a9e82017-02-09 20:44:23627 return nullptr;
Peter Kasting134ef9af2024-12-28 02:30:09628 }
bcwhitecf6a9e82017-02-09 20:44:23629 // Construct an object of the desired type on this memory, just as if
630 // New() had been called to create it.
631 T* obj = new (mem) T();
632 // Finally change the type to the desired one. This will "release" all of
633 // the changes above and so provide a consistent view to other threads.
634 bool success =
635 ChangeType(ref, T::kPersistentTypeId, kTypeIdTransitioning, false);
636 DCHECK(success);
637 return obj;
638 }
639
640 // Deletes an object by destructing it and then changing the type to a
641 // different value (default 0).
642 template <typename T>
643 void Delete(T* obj, uint32_t new_type) {
644 // Get the reference for the object.
645 Reference ref = GetAsReference<T>(obj);
646 // First change the type to "transitioning" so there is no race condition
647 // where another thread could find the object through iteration while it
648 // is been destructed. This will "acquire" the memory so no changes get
Roger McFarlane23479ae2024-12-10 20:31:23649 // reordered before it. It will fail if `ref` is invalid.
Peter Kasting134ef9af2024-12-28 02:30:09650 if (!ChangeType(ref, kTypeIdTransitioning, T::kPersistentTypeId, false)) {
bcwhitecf6a9e82017-02-09 20:44:23651 return;
Peter Kasting134ef9af2024-12-28 02:30:09652 }
bcwhitecf6a9e82017-02-09 20:44:23653 // Destruct the object.
654 obj->~T();
655 // Finally change the type to the desired value. This will "release" all
656 // the changes above.
657 bool success = ChangeType(ref, new_type, kTypeIdTransitioning, false);
658 DCHECK(success);
659 }
660 template <typename T>
661 void Delete(T* obj) {
662 Delete<T>(obj, 0);
663 }
664
665 // As above but works with objects allocated from persistent memory.
666 template <typename T>
667 Reference GetAsReference(const T* obj) const {
668 return GetAsReference(obj, T::kPersistentTypeId);
669 }
670
671 // As above but works with an object allocated from persistent memory.
672 template <typename T>
673 void MakeIterable(const T* obj) {
674 MakeIterable(GetAsReference<T>(obj));
675 }
676
Roger McFarlane43ef1682025-01-31 18:30:37677 // Returns a string_view of a c-style string that is located at the end of an
678 // allocated memory block. It is the caller's responsibility to know/ensure
679 // that `object` is of some type that ends with a c-style string and that
680 // said string begins at `offset` bytes from `object`. `alloc_size` must be
681 // the size of the allocation, as returned by the allocator. If `object` is
682 // `nullptr` or `offset >= alloc_size` then an empty string_view is returned.
683 // Users should treat the returned view as a volatile bounded memory region;
684 // it references the underlying shared memory, whose contents can be changed
685 // or corrupted at any time. In particular, clients should respect the
686 // 'length()' of the returned view instead of relying on a terminating NUL
687 // char.
688 static std::string_view StringViewAt(const void* object,
689 size_t offset,
690 size_t alloc_size);
691
bcwhite34ae4982016-01-20 13:44:46692 protected:
bcwhitecd4923d2016-09-23 18:30:03693 enum MemoryType {
694 MEM_EXTERNAL,
695 MEM_MALLOC,
696 MEM_VIRTUAL,
697 MEM_SHARED,
698 MEM_FILE,
699 };
700
701 struct Memory {
702 Memory(void* b, MemoryType t) : base(b), type(t) {}
703
Keishi Hattorie175ac52022-06-07 06:24:57704 raw_ptr<void> base;
bcwhitecd4923d2016-09-23 18:30:03705 MemoryType type;
706 };
707
708 // Constructs the allocator. Everything is the same as the public allocator
Roger McFarlane23479ae2024-12-10 20:31:23709 // except `memory` which is a structure with additional information besides
bcwhitecd4923d2016-09-23 18:30:03710 // the base address.
Alexei Svitkine50fa8122023-08-29 21:01:58711 PersistentMemoryAllocator(Memory memory,
712 size_t size,
713 size_t page_size,
714 uint64_t id,
Helmut Januschka274c3802024-03-12 23:31:29715 std::string_view name,
Alexei Svitkine50fa8122023-08-29 21:01:58716 AccessMode access_mode);
bcwhitecd4923d2016-09-23 18:30:03717
bcwhite42561dc2017-03-16 18:35:24718 // Implementation of Flush that accepts how much to flush.
719 virtual void FlushPartial(size_t length, bool sync);
720
Bartek Nowierski424d8a3b2024-06-07 04:58:41721 // RAW_PTR_EXCLUSION: Never allocated by PartitionAlloc (always mmap'ed), so
722 // there is no benefit to using a raw_ptr, only cost.
Keishi Hattori130ae0a2023-03-27 03:48:47723 RAW_PTR_EXCLUSION volatile char* const
Peter Kasting134ef9af2024-12-28 02:30:09724 mem_base_; // Memory base. (char so sizeof guaranteed 1)
725 const MemoryType mem_type_; // Type of memory allocation.
726 const uint32_t mem_size_; // Size of entire memory segment.
727 const uint32_t mem_page_; // Page size allocations shouldn't cross.
728 const size_t vm_page_size_; // The page size used by the OS.
bcwhite34ae4982016-01-20 13:44:46729
730 private:
731 struct SharedMetadata;
732 struct BlockHeader;
bcwhite34ae4982016-01-20 13:44:46733 static const Reference kReferenceQueue;
bcwhite34ae4982016-01-20 13:44:46734
735 // The shared metadata is always located at the top of the memory segment.
736 // These convenience functions eliminate constant casting of the base
737 // pointer within the code.
bcwhitec03fc0a2016-02-05 01:18:03738 const SharedMetadata* shared_meta() const {
739 return reinterpret_cast<const SharedMetadata*>(
740 const_cast<const char*>(mem_base_));
bcwhite34ae4982016-01-20 13:44:46741 }
bcwhitec03fc0a2016-02-05 01:18:03742 SharedMetadata* shared_meta() {
743 return reinterpret_cast<SharedMetadata*>(const_cast<char*>(mem_base_));
bcwhite34ae4982016-01-20 13:44:46744 }
745
746 // Actual method for doing the allocation.
Roger McFarlane23479ae2024-12-10 20:31:23747 Reference AllocateImpl(size_t size, uint32_t type_id, size_t* alloc_size);
bcwhite34ae4982016-01-20 13:44:46748
Roger McFarlane23479ae2024-12-10 20:31:23749 // Dereferences a block `ref` to retrieve a pointer to the block header for
750 // the reference. This method ensures that the referenced block is valid for
751 // the desired `type_id` and `size`. Optionally, if `alloc_sizes` is not
752 // nullptr, the validated size of the underlying allocation is returned.
753 //
754 // Special cases for internal use only:
755 //
756 // * If `queue_ok` is true and `ref` is kReferenceQueueindicates then the
757 // block header for the allocation queue is returned.
758 //
759 // * if `free_ok` then the block header is allowed to point to a block that
760 // may not be in the `allocated` state. This bypasses block validation.
761 //
762 // Because they bypass block valoidation, it is not premitted to request the
763 // `alloc_size` when either of `queue_ok` or `free_ok` are true.
Peter Kasting8ff3ed82022-05-28 20:38:18764 const volatile BlockHeader* GetBlock(Reference ref,
765 uint32_t type_id,
766 size_t size,
767 bool queue_ok,
Roger McFarlane23479ae2024-12-10 20:31:23768 bool free_ok,
769 size_t* alloc_size = nullptr) const;
Peter Kasting8ff3ed82022-05-28 20:38:18770 volatile BlockHeader* GetBlock(Reference ref,
771 uint32_t type_id,
772 size_t size,
773 bool queue_ok,
Roger McFarlane23479ae2024-12-10 20:31:23774 bool free_ok,
775 size_t* alloc_size = nullptr) {
Peter Kasting8ff3ed82022-05-28 20:38:18776 return const_cast<volatile BlockHeader*>(
777 const_cast<const PersistentMemoryAllocator*>(this)->GetBlock(
Roger McFarlane23479ae2024-12-10 20:31:23778 ref, type_id, size, queue_ok, free_ok, alloc_size));
bcwhite34ae4982016-01-20 13:44:46779 }
780
Luc Nguyen510c36452023-06-29 08:24:10781 // Gets the actual data within a block associated with a specific reference.
Peter Kasting8ff3ed82022-05-28 20:38:18782 const volatile void* GetBlockData(Reference ref,
783 uint32_t type_id,
Roger McFarlane23479ae2024-12-10 20:31:23784 size_t size,
785 size_t* alloc_size = nullptr) const;
786 volatile void* GetBlockData(Reference ref,
787 uint32_t type_id,
788 size_t size,
789 size_t* alloc_size = nullptr) {
Peter Kasting8ff3ed82022-05-28 20:38:18790 return const_cast<volatile void*>(
791 const_cast<const PersistentMemoryAllocator*>(this)->GetBlockData(
Roger McFarlane23479ae2024-12-10 20:31:23792 ref, type_id, size, alloc_size));
bcwhite34ae4982016-01-20 13:44:46793 }
794
Luc Nguyen510c36452023-06-29 08:24:10795 // Returns the offset to the first free space segment.
796 uint32_t freeptr() const;
797
798 // Returns the metadata version used in this allocator.
799 uint32_t version() const;
800
Alexei Svitkine50fa8122023-08-29 21:01:58801 const AccessMode access_mode_;
bcwhite34ae4982016-01-20 13:44:46802
Alexei Svitkine50fa8122023-08-29 21:01:58803 // Local version of "corrupted" flag.
804 mutable std::atomic<bool> corrupt_ = false;
805
Alexei Svitkine50fa8122023-08-29 21:01:58806 // Histogram recording used space.
807 raw_ptr<HistogramBase> used_histogram_ = nullptr;
bcwhite34ae4982016-01-20 13:44:46808
Roger McFarlane43ef1682025-01-31 18:30:37809 // TODO(crbug.com/40064026): Remove these. They are used to investigate
810 // unexpected failures and code paths.
Luc Nguyen6511d1012024-02-09 19:48:14811 friend class DelayedPersistentAllocation;
Luc Nguyen510c36452023-06-29 08:24:10812 friend class metrics::FileMetricsProvider;
Roger McFarlane43ef1682025-01-31 18:30:37813 void DumpWithoutCrashing(Reference ref,
814 uint32_t expected_type,
815 size_t expected_size,
816 bool dump_block_header) const;
Luc Nguyen6511d1012024-02-09 19:48:14817
bcwhite34ae4982016-01-20 13:44:46818 friend class PersistentMemoryAllocatorTest;
819 FRIEND_TEST_ALL_PREFIXES(PersistentMemoryAllocatorTest, AllocateAndIterate);
bcwhite34ae4982016-01-20 13:44:46820};
821
bcwhite34ae4982016-01-20 13:44:46822// This allocator uses a local memory block it allocates from the general
823// heap. It is generally used when some kind of "death rattle" handler will
824// save the contents to persistent storage during process shutdown. It is
825// also useful for testing.
826class BASE_EXPORT LocalPersistentMemoryAllocator
827 : public PersistentMemoryAllocator {
828 public:
Helmut Januschka274c3802024-03-12 23:31:29829 LocalPersistentMemoryAllocator(size_t size,
830 uint64_t id,
831 std::string_view name);
Peter Boström7319bbd2021-09-15 22:59:38832
833 LocalPersistentMemoryAllocator(const LocalPersistentMemoryAllocator&) =
834 delete;
835 LocalPersistentMemoryAllocator& operator=(
836 const LocalPersistentMemoryAllocator&) = delete;
837
bcwhite34ae4982016-01-20 13:44:46838 ~LocalPersistentMemoryAllocator() override;
839
840 private:
Roger McFarlane23479ae2024-12-10 20:31:23841 // Allocates a block of local memory of the specified `size`, ensuring that
bcwhiteeda1a99782016-06-02 19:27:17842 // the memory will not be physically allocated until accessed and will read
843 // as zero when that happens.
Helmut Januschka274c3802024-03-12 23:31:29844 static Memory AllocateLocalMemory(size_t size, std::string_view name);
bcwhiteeda1a99782016-06-02 19:27:17845
Roger McFarlane23479ae2024-12-10 20:31:23846 // Deallocates a block of local `memory` of the specified `size`.
bcwhitecd4923d2016-09-23 18:30:03847 static void DeallocateLocalMemory(void* memory, size_t size, MemoryType type);
bcwhite34ae4982016-01-20 13:44:46848};
849
Alexandr Ilin027ca3d32019-02-12 18:37:33850// This allocator takes a writable shared memory mapping object and performs
851// allocation from it. The allocator takes ownership of the mapping object.
852class BASE_EXPORT WritableSharedPersistentMemoryAllocator
853 : public PersistentMemoryAllocator {
854 public:
855 WritableSharedPersistentMemoryAllocator(
856 base::WritableSharedMemoryMapping memory,
857 uint64_t id,
Helmut Januschka274c3802024-03-12 23:31:29858 std::string_view name);
Peter Boström7319bbd2021-09-15 22:59:38859
860 WritableSharedPersistentMemoryAllocator(
861 const WritableSharedPersistentMemoryAllocator&) = delete;
862 WritableSharedPersistentMemoryAllocator& operator=(
863 const WritableSharedPersistentMemoryAllocator&) = delete;
864
Alexandr Ilin027ca3d32019-02-12 18:37:33865 ~WritableSharedPersistentMemoryAllocator() override;
866
867 // Ensure that the memory isn't so invalid that it would crash when passing it
868 // to the allocator. This doesn't guarantee the data is valid, just that it
869 // won't cause the program to abort. The existing IsCorrupt() call will handle
870 // the rest.
871 static bool IsSharedMemoryAcceptable(
872 const base::WritableSharedMemoryMapping& memory);
873
874 private:
875 base::WritableSharedMemoryMapping shared_memory_;
Alexandr Ilin027ca3d32019-02-12 18:37:33876};
877
878// This allocator takes a read-only shared memory mapping object and performs
879// allocation from it. The allocator takes ownership of the mapping object.
880class BASE_EXPORT ReadOnlySharedPersistentMemoryAllocator
881 : public PersistentMemoryAllocator {
882 public:
883 ReadOnlySharedPersistentMemoryAllocator(
884 base::ReadOnlySharedMemoryMapping memory,
885 uint64_t id,
Helmut Januschka274c3802024-03-12 23:31:29886 std::string_view name);
Peter Boström7319bbd2021-09-15 22:59:38887
888 ReadOnlySharedPersistentMemoryAllocator(
889 const ReadOnlySharedPersistentMemoryAllocator&) = delete;
890 ReadOnlySharedPersistentMemoryAllocator& operator=(
891 const ReadOnlySharedPersistentMemoryAllocator&) = delete;
892
Alexandr Ilin027ca3d32019-02-12 18:37:33893 ~ReadOnlySharedPersistentMemoryAllocator() override;
894
895 // Ensure that the memory isn't so invalid that it would crash when passing it
896 // to the allocator. This doesn't guarantee the data is valid, just that it
897 // won't cause the program to abort. The existing IsCorrupt() call will handle
898 // the rest.
899 static bool IsSharedMemoryAcceptable(
900 const base::ReadOnlySharedMemoryMapping& memory);
901
902 private:
903 base::ReadOnlySharedMemoryMapping shared_memory_;
Alexandr Ilin027ca3d32019-02-12 18:37:33904};
bcwhite5451c582016-02-12 18:47:15905
Xiaohan Wang7b270e342022-01-15 19:36:03906// NACL doesn't support any kind of file access in build.
907#if !BUILDFLAG(IS_NACL)
bcwhite34ae4982016-01-20 13:44:46908// This allocator takes a memory-mapped file object and performs allocation
bcwhite34229a82016-05-26 23:24:32909// from it. The allocator takes ownership of the file object.
bcwhite34ae4982016-01-20 13:44:46910class BASE_EXPORT FilePersistentMemoryAllocator
911 : public PersistentMemoryAllocator {
912 public:
Roger McFarlane23479ae2024-12-10 20:31:23913 // A `max_size` of zero will use the length of the file as the maximum
914 // size. The `file` object must have been already created with sufficient
bcwhite34229a82016-05-26 23:24:32915 // permissions (read, read/write, or read/write/extend).
dcheng093de9b2016-04-04 21:25:51916 FilePersistentMemoryAllocator(std::unique_ptr<MemoryMappedFile> file,
bcwhite34229a82016-05-26 23:24:32917 size_t max_size,
dcheng093de9b2016-04-04 21:25:51918 uint64_t id,
Helmut Januschka274c3802024-03-12 23:31:29919 std::string_view name,
Alexei Svitkine50fa8122023-08-29 21:01:58920 AccessMode access_mode);
Peter Boström7319bbd2021-09-15 22:59:38921
922 FilePersistentMemoryAllocator(const FilePersistentMemoryAllocator&) = delete;
923 FilePersistentMemoryAllocator& operator=(
924 const FilePersistentMemoryAllocator&) = delete;
925
bcwhite34ae4982016-01-20 13:44:46926 ~FilePersistentMemoryAllocator() override;
927
bcwhitecd4923d2016-09-23 18:30:03928 // Ensure that the file isn't so invalid that it would crash when passing it
bcwhite34ae4982016-01-20 13:44:46929 // to the allocator. This doesn't guarantee the file is valid, just that it
bcwhite5451c582016-02-12 18:47:15930 // won't cause the program to abort. The existing IsCorrupt() call will handle
bcwhite34ae4982016-01-20 13:44:46931 // the rest.
bcwhite34229a82016-05-26 23:24:32932 static bool IsFileAcceptable(const MemoryMappedFile& file, bool read_only);
bcwhite34ae4982016-01-20 13:44:46933
Brian Whitece24fb32018-09-18 18:45:51934 // Load all or a portion of the file into memory for fast access. This can
935 // be used to force the disk access to be done on a background thread and
936 // then have the data available to be read on the main thread with a greatly
937 // reduced risk of blocking due to I/O. The risk isn't eliminated completely
938 // because the system could always release the memory when under pressure
939 // but this can happen to any block of memory (i.e. swapped out).
940 void Cache();
941
bcwhite42561dc2017-03-16 18:35:24942 protected:
943 // PersistentMemoryAllocator:
944 void FlushPartial(size_t length, bool sync) override;
945
bcwhite34ae4982016-01-20 13:44:46946 private:
dcheng093de9b2016-04-04 21:25:51947 std::unique_ptr<MemoryMappedFile> mapped_file_;
bcwhite34ae4982016-01-20 13:44:46948};
Xiaohan Wang7b270e342022-01-15 19:36:03949#endif // !BUILDFLAG(IS_NACL)
bcwhite34ae4982016-01-20 13:44:46950
bcwhite1166f8d2017-04-21 17:19:03951// An allocation that is defined but not executed until required at a later
952// time. This allows for potential users of an allocation to be decoupled
953// from the logic that defines it. In addition, there can be multiple users
954// of the same allocation or any region thereof that are guaranteed to always
955// use the same space. It's okay to copy/move these objects.
956//
957// This is a top-level class instead of an inner class of the PMA so that it
958// can be forward-declared in other header files without the need to include
959// the full contents of this file.
960class BASE_EXPORT DelayedPersistentAllocation {
961 public:
962 using Reference = PersistentMemoryAllocator::Reference;
963
Roger McFarlane23479ae2024-12-10 20:31:23964 // Creates a delayed allocation using the specified `allocator`. When
965 // needed, the memory will be allocated using the specified `type` and
966 // `size`. If `offset` is given, the returned pointer will be at that
bcwhite1166f8d2017-04-21 17:19:03967 // offset into the segment; this allows combining allocations into a
968 // single persistent segment to reduce overhead and means an "all or
Roger McFarlane23479ae2024-12-10 20:31:23969 // nothing" request. Note that `size` is always the total memory size
970 // and `offset` is just indicating the start of a block within it.
bcwhite1166f8d2017-04-21 17:19:03971 //
Roger McFarlane23479ae2024-12-10 20:31:23972 // Once allocated, a reference to the segment will be stored at `ref`.
bcwhite1166f8d2017-04-21 17:19:03973 // This shared location must be initialized to zero (0); it is checked
974 // with every Get() request to see if the allocation has already been
Roger McFarlane23479ae2024-12-10 20:31:23975 // done. If reading `ref` outside of this object, be sure to do an
bcwhite17aab962017-05-15 16:43:29976 // "acquire" load. Don't write to it -- leave that to this object.
bcwhite1166f8d2017-04-21 17:19:03977 DelayedPersistentAllocation(PersistentMemoryAllocator* allocator,
978 std::atomic<Reference>* ref,
979 uint32_t type,
980 size_t size,
Alexei Svitkine86a69722023-03-16 18:43:37981 size_t offset = 0);
bcwhite1166f8d2017-04-21 17:19:03982 ~DelayedPersistentAllocation();
983
danakjad724a82024-01-25 17:37:40984 // Gets a span to the defined allocation. This will realize the request
bcwhite1166f8d2017-04-21 17:19:03985 // and update the reference provided during construction. The memory will
986 // be zeroed the first time it is returned, after that it is shared with
987 // all other Get() requests and so shows any changes made to it elsewhere.
988 //
danakjad724a82024-01-25 17:37:40989 // If the allocation fails for any reason, an empty span will be returned.
990 // This works even on "const" objects because the allocation is already
991 // defined, just delayed.
992 template <typename T>
993 span<T> Get() const {
994 // PersistentMemoryAllocator only supports types with alignment at most
995 // kAllocAlignment.
996 static_assert(alignof(T) <= PersistentMemoryAllocator::kAllocAlignment);
997 // The offset must be a multiple of the alignment or misaligned pointers
998 // will result.
999 CHECK_EQ(offset_ % alignof(T), 0u);
1000 span<uint8_t> untyped = GetUntyped();
Tom Sepeze6f11a552025-02-18 19:01:511001 return UNSAFE_TODO(
1002 span(reinterpret_cast<T*>(untyped.data()), untyped.size() / sizeof(T)));
danakjad724a82024-01-25 17:37:401003 }
bcwhite1166f8d2017-04-21 17:19:031004
1005 // Gets the internal reference value. If this returns a non-zero value then
1006 // a subsequent call to Get() will do nothing but convert that reference into
1007 // a memory location -- useful for accessing an existing allocation without
1008 // creating one unnecessarily.
1009 Reference reference() const {
1010 return reference_->load(std::memory_order_relaxed);
1011 }
1012
1013 private:
danakjad724a82024-01-25 17:37:401014 span<uint8_t> GetUntyped() const;
1015
bcwhite1166f8d2017-04-21 17:19:031016 // The underlying object that does the actual allocation of memory. Its
1017 // lifetime must exceed that of all DelayedPersistentAllocation objects
1018 // that use it.
Rushan Suleymanovfeb5185c2023-10-30 19:02:331019 const raw_ptr<PersistentMemoryAllocator> allocator_;
bcwhite1166f8d2017-04-21 17:19:031020
1021 // The desired type and size of the allocated segment plus the offset
1022 // within it for the defined request.
1023 const uint32_t type_;
bcwhite4e72a152017-07-04 21:13:151024 const uint32_t size_;
1025 const uint32_t offset_;
bcwhite1166f8d2017-04-21 17:19:031026
bcwhite1166f8d2017-04-21 17:19:031027 // The location at which a reference to the allocated segment is to be
1028 // stored once the allocation is complete. If multiple delayed allocations
1029 // share the same pointer then an allocation on one will amount to an
1030 // allocation for all.
Rushan Suleymanovfeb5185c2023-10-30 19:02:331031 const raw_ptr<volatile std::atomic<Reference>, AllowPtrArithmetic> reference_;
bcwhite1166f8d2017-04-21 17:19:031032
1033 // No DISALLOW_COPY_AND_ASSIGN as it's okay to copy/move these objects.
1034};
1035
bcwhite34ae4982016-01-20 13:44:461036} // namespace base
1037
1038#endif // BASE_METRICS_PERSISTENT_MEMORY_ALLOCATOR_H_