blob: fd66da23563b99ed1d17c43d90331b9263785aae [file] [log] [blame]
bcwhite34ae4982016-01-20 13:44:461// Copyright (c) 2015 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef BASE_METRICS_PERSISTENT_MEMORY_ALLOCATOR_H_
6#define BASE_METRICS_PERSISTENT_MEMORY_ALLOCATOR_H_
7
8#include <stdint.h>
dcheng093de9b2016-04-04 21:25:519
bcwhite34ae4982016-01-20 13:44:4610#include <atomic>
dcheng093de9b2016-04-04 21:25:5111#include <memory>
piman03cd21b2016-11-22 21:03:2912#include <type_traits>
bcwhite34ae4982016-01-20 13:44:4613
14#include "base/atomicops.h"
15#include "base/base_export.h"
bcwhite65e57d02016-05-13 14:39:4016#include "base/files/file_path.h"
bcwhite34ae4982016-01-20 13:44:4617#include "base/gtest_prod_util.h"
18#include "base/macros.h"
bcwhite3779f982016-02-11 22:37:0119#include "base/strings/string_piece.h"
bcwhite34ae4982016-01-20 13:44:4620
21namespace base {
22
23class HistogramBase;
24class MemoryMappedFile;
bcwhite5451c582016-02-12 18:47:1525class SharedMemory;
bcwhite34ae4982016-01-20 13:44:4626
27// Simple allocator for pieces of a memory block that may be persistent
28// to some storage or shared across multiple processes. This class resides
29// under base/metrics because it was written for that purpose. It is,
30// however, fully general-purpose and can be freely moved to base/memory
31// if other uses are found.
32//
33// This class provides for thread-secure (i.e. safe against other threads
34// or processes that may be compromised and thus have malicious intent)
35// allocation of memory within a designated block and also a mechanism by
36// which other threads can learn of these allocations.
37//
38// There is (currently) no way to release an allocated block of data because
39// doing so would risk invalidating pointers held by other processes and
40// greatly complicate the allocation algorithm.
41//
42// Construction of this object can accept new, clean (i.e. zeroed) memory
43// or previously initialized memory. In the first case, construction must
44// be allowed to complete before letting other allocators attach to the same
45// segment. In other words, don't share the segment until at least one
46// allocator has been attached to it.
47//
48// Note that memory not in active use is not accessed so it is possible to
49// use virtual memory, including memory-mapped files, as backing storage with
50// the OS "pinning" new (zeroed) physical RAM pages only as they are needed.
piman03cd21b2016-11-22 21:03:2951//
bcwhite3f999d32017-01-11 12:42:1352// OBJECTS: Although the allocator can be used in a "malloc" sense, fetching
53// character arrays and manipulating that memory manually, the better way is
54// generally to use the "Object" methods to create and manage allocations. In
55// this way the sizing, type-checking, and construction are all automatic. For
56// this to work, however, every type of stored object must define two public
57// "constexpr" values, kPersistentTypeId and kExpectedInstanceSize, as such:
piman03cd21b2016-11-22 21:03:2958//
bcwhite3f999d32017-01-11 12:42:1359// struct MyPersistentObjectType {
60// // SHA1(MyPersistentObjectType): Increment this if structure changes!
61// static constexpr uint32_t kPersistentTypeId = 0x3E15F6DE + 1;
62//
63// // Expected size for 32/64-bit check. Update this if structure changes!
64// static constexpr size_t kExpectedInstanceSize = 20;
65//
66// ...
67// };
68//
69// kPersistentTypeId: This value is an arbitrary identifier that allows the
70// identification of these objects in the allocator, including the ability
71// to find them via iteration. The number is arbitrary but using the first
72// four bytes of the SHA1 hash of the type name means that there shouldn't
73// be any conflicts with other types that may also be stored in the memory.
74// The fully qualified name (e.g. base::debug::MyPersistentObjectType) could
75// be used to generate the hash if the type name seems common. Use a command
76// like this to get the hash: echo -n "MyPersistentObjectType" | sha1sum
77// If the structure layout changes, ALWAYS increment this number so that
78// newer versions of the code don't try to interpret persistent data written
79// by older versions with a different layout.
80//
81// kExpectedInstanceSize: This value is the hard-coded number that matches
82// what sizeof(T) would return. By providing it explicitly, the allocator can
83// verify that the structure is compatible between both 32-bit and 64-bit
84// versions of the code.
85//
86// Using AllocateObject (and ChangeObject) will zero the memory and then call
87// the default constructor for the object. Given that objects are persistent,
88// no destructor is ever called automatically though a caller can explicitly
89// call DeleteObject to destruct it and change the type to something indicating
90// it is no longer in use.
91//
92// Though persistent memory segments are transferrable between programs built
93// for different natural word widths, they CANNOT be exchanged between CPUs
94// of different endianess. Attempts to do so will simply see the existing data
95// as corrupt and refuse to access any of it.
bcwhite34ae4982016-01-20 13:44:4696class BASE_EXPORT PersistentMemoryAllocator {
97 public:
98 typedef uint32_t Reference;
99
bcwhitef2462022016-04-06 15:39:01100 // Iterator for going through all iterable memory records in an allocator.
101 // Like the allocator itself, iterators are lock-free and thread-secure.
102 // That means that multiple threads can share an iterator and the same
103 // reference will not be returned twice.
104 //
bcwhitedadd3152016-10-13 23:49:14105 // The order of the items returned by an iterator matches the order in which
106 // MakeIterable() was called on them. Once an allocation is made iterable,
107 // it is always such so the only possible difference between successive
108 // iterations is for more to be added to the end.
109 //
bcwhitef2462022016-04-06 15:39:01110 // Iteration, in general, is tolerant of corrupted memory. It will return
111 // what it can and stop only when corruption forces it to. Bad corruption
112 // could cause the same object to be returned many times but it will
113 // eventually quit.
114 class BASE_EXPORT Iterator {
bcwhite34ae4982016-01-20 13:44:46115 public:
bcwhitef2462022016-04-06 15:39:01116 // Constructs an iterator on a given |allocator|, starting at the beginning.
117 // The allocator must live beyond the lifetime of the iterator. This class
118 // has read-only access to the allocator (hence "const") but the returned
119 // references can be used on a read/write version, too.
120 explicit Iterator(const PersistentMemoryAllocator* allocator);
bcwhite34ae4982016-01-20 13:44:46121
bcwhitef2462022016-04-06 15:39:01122 // As above but resuming from the |starting_after| reference. The first call
123 // to GetNext() will return the next object found after that reference. The
124 // reference must be to an "iterable" object; references to non-iterable
125 // objects (those that never had MakeIterable() called for them) will cause
126 // a run-time error.
127 Iterator(const PersistentMemoryAllocator* allocator,
128 Reference starting_after);
bcwhite34ae4982016-01-20 13:44:46129
bcwhitedadd3152016-10-13 23:49:14130 // Resets the iterator back to the beginning.
131 void Reset();
132
133 // Resets the iterator, resuming from the |starting_after| reference.
134 void Reset(Reference starting_after);
135
136 // Returns the previously retrieved reference, or kReferenceNull if none.
137 // If constructor or reset with a starting_after location, this will return
138 // that value.
139 Reference GetLast();
140
bcwhitef2462022016-04-06 15:39:01141 // Gets the next iterable, storing that type in |type_return|. The actual
142 // return value is a reference to the allocation inside the allocator or
143 // zero if there are no more. GetNext() may still be called again at a
144 // later time to retrieve any new allocations that have been added.
145 Reference GetNext(uint32_t* type_return);
146
147 // Similar to above but gets the next iterable of a specific |type_match|.
148 // This should not be mixed with calls to GetNext() because any allocations
149 // skipped here due to a type mis-match will never be returned by later
150 // calls to GetNext() meaning it's possible to completely miss entries.
151 Reference GetNextOfType(uint32_t type_match);
152
bcwhite3f999d32017-01-11 12:42:13153 // As above but works using object type.
154 template <typename T>
155 Reference GetNextOfType() {
156 return GetNextOfType(T::kPersistentTypeId);
157 }
158
159 // As above but works using objects and returns null if not found.
160 template <typename T>
161 const T* GetNextOfObject() {
162 return GetAsObject<T>(GetNextOfType<T>());
163 }
164
bcwhitef2462022016-04-06 15:39:01165 // Converts references to objects. This is a convenience method so that
166 // users of the iterator don't need to also have their own pointer to the
167 // allocator over which the iterator runs in order to retrieve objects.
168 // Because the iterator is not read/write, only "const" objects can be
169 // fetched. Non-const objects can be fetched using the reference on a
170 // non-const (external) pointer to the same allocator (or use const_cast
171 // to remove the qualifier).
172 template <typename T>
bcwhite3f999d32017-01-11 12:42:13173 const T* GetAsObject(Reference ref) const {
174 return allocator_->GetAsObject<T>(ref);
bcwhitef2462022016-04-06 15:39:01175 }
bcwhite34ae4982016-01-20 13:44:46176
bcwhite3f999d32017-01-11 12:42:13177 // Similar to GetAsObject() but converts references to arrays of things.
piman03cd21b2016-11-22 21:03:29178 template <typename T>
179 const T* GetAsArray(Reference ref, uint32_t type_id, size_t count) const {
180 return allocator_->GetAsArray<T>(ref, type_id, count);
181 }
182
bcwhite3f999d32017-01-11 12:42:13183 // Convert a generic pointer back into a reference. A null reference will
184 // be returned if |memory| is not inside the persistent segment or does not
185 // point to an object of the specified |type_id|.
186 Reference GetAsReference(const void* memory, uint32_t type_id) const {
187 return allocator_->GetAsReference(memory, type_id);
188 }
189
190 // As above but convert an object back into a reference.
191 template <typename T>
192 Reference GetAsReference(const T* obj) const {
193 return allocator_->GetAsReference(obj);
194 }
195
bcwhite34ae4982016-01-20 13:44:46196 private:
bcwhitef2462022016-04-06 15:39:01197 // Weak-pointer to memory allocator being iterated over.
198 const PersistentMemoryAllocator* allocator_;
bcwhite34ae4982016-01-20 13:44:46199
bcwhitef2462022016-04-06 15:39:01200 // The last record that was returned.
201 std::atomic<Reference> last_record_;
202
203 // The number of records found; used for detecting loops.
204 std::atomic<uint32_t> record_count_;
205
206 DISALLOW_COPY_AND_ASSIGN(Iterator);
bcwhite34ae4982016-01-20 13:44:46207 };
208
209 // Returned information about the internal state of the heap.
210 struct MemoryInfo {
211 size_t total;
212 size_t free;
213 };
214
bcwhiteb0bb9192016-04-18 01:33:10215 enum : Reference {
bcwhite3f999d32017-01-11 12:42:13216 // A common "null" reference value.
217 kReferenceNull = 0,
bcwhiteb0bb9192016-04-18 01:33:10218
bcwhite3f999d32017-01-11 12:42:13219 // A value indicating that the type is in transition. Work is being done
220 // on the contents to prepare it for a new type to come.
221 kReferenceTransitioning = 0xFFFFFFFF,
bcwhite34ae4982016-01-20 13:44:46222 };
223
piman03cd21b2016-11-22 21:03:29224 enum : size_t {
225 kSizeAny = 1 // Constant indicating that any array size is acceptable.
226 };
227
bcwhite65e57d02016-05-13 14:39:40228 // This is the standard file extension (suitable for being passed to the
229 // AddExtension() method of base::FilePath) for dumps of persistent memory.
230 static const base::FilePath::CharType kFileExtension[];
231
bcwhite34ae4982016-01-20 13:44:46232 // The allocator operates on any arbitrary block of memory. Creation and
233 // persisting or sharing of that block with another process is the
234 // responsibility of the caller. The allocator needs to know only the
235 // block's |base| address, the total |size| of the block, and any internal
236 // |page| size (zero if not paged) across which allocations should not span.
237 // The |id| is an arbitrary value the caller can use to identify a
238 // particular memory segment. It will only be loaded during the initial
239 // creation of the segment and can be checked by the caller for consistency.
240 // The |name|, if provided, is used to distinguish histograms for this
241 // allocator. Only the primary owner of the segment should define this value;
242 // other processes can learn it from the shared state. If the underlying
243 // memory is |readonly| then no changes will be made to it. The resulting
244 // object should be stored as a "const" pointer.
245 //
246 // PersistentMemoryAllocator does NOT take ownership of the memory block.
247 // The caller must manage it and ensure it stays available throughout the
248 // lifetime of this object.
249 //
250 // Memory segments for sharing must have had an allocator attached to them
251 // before actually being shared. If the memory segment was just created, it
252 // should be zeroed before being passed here. If it was an existing segment,
253 // the values here will be compared to copies stored in the shared segment
254 // as a guard against corruption.
255 //
256 // Make sure that the memory segment is acceptable (see IsMemoryAcceptable()
257 // method below) before construction if the definition of the segment can
258 // vary in any way at run-time. Invalid memory segments will cause a crash.
259 PersistentMemoryAllocator(void* base, size_t size, size_t page_size,
bcwhite3779f982016-02-11 22:37:01260 uint64_t id, base::StringPiece name,
bcwhite34ae4982016-01-20 13:44:46261 bool readonly);
262 virtual ~PersistentMemoryAllocator();
263
264 // Check if memory segment is acceptable for creation of an Allocator. This
265 // doesn't do any analysis of the data and so doesn't guarantee that the
266 // contents are valid, just that the paramaters won't cause the program to
267 // abort. The IsCorrupt() method will report detection of data problems
268 // found during construction and general operation.
269 static bool IsMemoryAcceptable(const void* data, size_t size,
270 size_t page_size, bool readonly);
271
272 // Get the internal identifier for this persistent memory segment.
273 uint64_t Id() const;
274
275 // Get the internal name of this allocator (possibly an empty string).
276 const char* Name() const;
277
278 // Is this segment open only for read?
279 bool IsReadonly() { return readonly_; }
280
281 // Create internal histograms for tracking memory use and allocation sizes
282 // for allocator of |name| (which can simply be the result of Name()). This
283 // is done seperately from construction for situations such as when the
284 // histograms will be backed by memory provided by this very allocator.
bcwhite3779f982016-02-11 22:37:01285 //
286 // IMPORTANT: Callers must update tools/metrics/histograms/histograms.xml
287 // with the following histograms:
288 // UMA.PersistentAllocator.name.Allocs
289 // UMA.PersistentAllocator.name.UsedPct
290 void CreateTrackingHistograms(base::StringPiece name);
bcwhite34ae4982016-01-20 13:44:46291
292 // Direct access to underlying memory segment. If the segment is shared
293 // across threads or processes, reading data through these values does
294 // not guarantee consistency. Use with care. Do not write.
295 const void* data() const { return const_cast<const char*>(mem_base_); }
296 size_t length() const { return mem_size_; }
bcwhite65e57d02016-05-13 14:39:40297 size_t size() const { return mem_size_; }
bcwhite34ae4982016-01-20 13:44:46298 size_t used() const;
299
300 // Get an object referenced by a |ref|. For safety reasons, the |type_id|
301 // code and size-of(|T|) are compared to ensure the reference is valid
302 // and cannot return an object outside of the memory segment. A |type_id| of
303 // kTypeIdAny (zero) will match any though the size is still checked. NULL is
304 // returned if any problem is detected, such as corrupted storage or incorrect
305 // parameters. Callers MUST check that the returned value is not-null EVERY
306 // TIME before accessing it or risk crashing! Once dereferenced, the pointer
307 // is safe to reuse forever.
308 //
piman03cd21b2016-11-22 21:03:29309 // It is essential that the object be of a fixed size. All fields must be of
310 // a defined type that does not change based on the compiler or the CPU
311 // natural word size. Acceptable are char, float, double, and (u)intXX_t.
312 // Unacceptable are int, bool, and wchar_t which are implementation defined
313 // with regards to their size.
bcwhitee99be2d2016-11-09 19:39:41314 //
piman03cd21b2016-11-22 21:03:29315 // Alignment must also be consistent. A uint64_t after a uint32_t will pad
bcwhitee99be2d2016-11-09 19:39:41316 // differently between 32 and 64 bit architectures. Either put the bigger
317 // elements first, group smaller elements into blocks the size of larger
piman03cd21b2016-11-22 21:03:29318 // elements, or manually insert padding fields as appropriate for the
319 // largest architecture, including at the end.
320 //
321 // To protected against mistakes, all objects must have the attribute
322 // |kExpectedInstanceSize| (static constexpr size_t) that is a hard-coded
323 // numerical value -- NNN, not sizeof(T) -- that can be tested. If the
324 // instance size is not fixed, at least one build will fail.
325 //
326 // If the size of a structure changes, the type-ID used to recognize it
327 // should also change so later versions of the code don't try to read
328 // incompatible structures from earlier versions.
bcwhitee99be2d2016-11-09 19:39:41329 //
bcwhite34ae4982016-01-20 13:44:46330 // NOTE: Though this method will guarantee that an object of the specified
331 // type can be accessed without going outside the bounds of the memory
332 // segment, it makes no guarantees of the validity of the data within the
333 // object itself. If it is expected that the contents of the segment could
334 // be compromised with malicious intent, the object must be hardened as well.
335 //
336 // Though the persistent data may be "volatile" if it is shared with
337 // other processes, such is not necessarily the case. The internal
338 // "volatile" designation is discarded so as to not propagate the viral
339 // nature of that keyword to the caller. It can add it back, if necessary,
340 // based on knowledge of how the allocator is being used.
341 template <typename T>
bcwhite3f999d32017-01-11 12:42:13342 T* GetAsObject(Reference ref) {
343 static_assert(std::is_standard_layout<T>::value, "only standard objects");
344 static_assert(!std::is_array<T>::value, "use GetAsArray<>()");
piman03cd21b2016-11-22 21:03:29345 static_assert(T::kExpectedInstanceSize == sizeof(T), "inconsistent size");
bcwhite3f999d32017-01-11 12:42:13346 return const_cast<T*>(reinterpret_cast<volatile T*>(
347 GetBlockData(ref, T::kPersistentTypeId, sizeof(T))));
bcwhite34ae4982016-01-20 13:44:46348 }
349 template <typename T>
bcwhite3f999d32017-01-11 12:42:13350 const T* GetAsObject(Reference ref) const {
351 static_assert(std::is_standard_layout<T>::value, "only standard objects");
352 static_assert(!std::is_array<T>::value, "use GetAsArray<>()");
piman03cd21b2016-11-22 21:03:29353 static_assert(T::kExpectedInstanceSize == sizeof(T), "inconsistent size");
bcwhite3f999d32017-01-11 12:42:13354 return const_cast<const T*>(reinterpret_cast<const volatile T*>(
355 GetBlockData(ref, T::kPersistentTypeId, sizeof(T))));
bcwhite34ae4982016-01-20 13:44:46356 }
357
piman03cd21b2016-11-22 21:03:29358 // Like GetAsObject but get an array of simple, fixed-size types.
359 //
360 // Use a |count| of the required number of array elements, or kSizeAny.
361 // GetAllocSize() can be used to calculate the upper bound but isn't reliable
362 // because padding can make space for extra elements that were not written.
363 //
364 // Remember that an array of char is a string but may not be NUL terminated.
365 //
366 // There are no compile-time or run-time checks to ensure 32/64-bit size
367 // compatibilty when using these accessors. Only use fixed-size types such
368 // as char, float, double, or (u)intXX_t.
369 template <typename T>
370 T* GetAsArray(Reference ref, uint32_t type_id, size_t count) {
371 static_assert(std::is_fundamental<T>::value, "use GetAsObject<>()");
372 return const_cast<T*>(reinterpret_cast<volatile T*>(
373 GetBlockData(ref, type_id, count * sizeof(T))));
374 }
375 template <typename T>
376 const T* GetAsArray(Reference ref, uint32_t type_id, size_t count) const {
377 static_assert(std::is_fundamental<T>::value, "use GetAsObject<>()");
378 return const_cast<const char*>(reinterpret_cast<const volatile T*>(
379 GetBlockData(ref, type_id, count * sizeof(T))));
380 }
381
bcwhite10105042016-11-29 00:09:55382 // Get the corresponding reference for an object held in persistent memory.
383 // If the |memory| is not valid or the type does not match, a kReferenceNull
384 // result will be returned.
385 Reference GetAsReference(const void* memory, uint32_t type_id) const;
386
bcwhite3f999d32017-01-11 12:42:13387 // As above but works with objects allocated from persistent memory.
388 template <typename T>
389 Reference GetAsReference(const T* obj) const {
390 return GetAsReference(obj, T::kPersistentTypeId);
391 }
392
bcwhite34ae4982016-01-20 13:44:46393 // Get the number of bytes allocated to a block. This is useful when storing
394 // arrays in order to validate the ending boundary. The returned value will
395 // include any padding added to achieve the required alignment and so could
396 // be larger than given in the original Allocate() request.
397 size_t GetAllocSize(Reference ref) const;
398
399 // Access the internal "type" of an object. This generally isn't necessary
400 // but can be used to "clear" the type and so effectively mark it as deleted
bcwhite84a8e092016-06-03 15:19:31401 // even though the memory stays valid and allocated. Changing the type is
402 // an atomic compare/exchange and so requires knowing the existing value.
403 // It will return false if the existing type is not what is expected.
bcwhite3f999d32017-01-11 12:42:13404 // Changing the type doesn't mean the data is compatible with the new type.
405 // It will likely be necessary to clear or reconstruct the type before it
406 // can be used. Changing the type WILL NOT invalidate existing pointers to
407 // the data, either in this process or others, so changing the data structure
408 // could have unpredicatable results. USE WITH CARE!
bcwhite34ae4982016-01-20 13:44:46409 uint32_t GetType(Reference ref) const;
bcwhite84a8e092016-06-03 15:19:31410 bool ChangeType(Reference ref, uint32_t to_type_id, uint32_t from_type_id);
bcwhite34ae4982016-01-20 13:44:46411
bcwhite3f999d32017-01-11 12:42:13412 // Like ChangeType() but gets the "to" type from the object type, clears
413 // the memory, and constructs a new object of the desired type just as
414 // though it was fresh from AllocateObject<>(). The old type simply ceases
415 // to exist; no destructor is called for it. Calling this will not invalidate
416 // existing pointers to the object, either in this process or others, so
417 // changing the object could have unpredictable results. USE WITH CARE!
418 template <typename T>
419 T* ChangeObject(Reference ref, uint32_t from_type_id) {
420 DCHECK_LE(sizeof(T), GetAllocSize(ref)) << "alloc not big enough for obj";
421 // Make sure the memory is appropriate. This won't be used until after
422 // the type is changed but checking first avoids the possibility of having
423 // to change the type back.
424 void* mem = const_cast<void*>(GetBlockData(ref, 0, sizeof(T)));
425 if (!mem)
426 return nullptr;
427 // Ensure the allocator's internal alignment is sufficient for this object.
428 // This protects against coding errors in the allocator.
429 DCHECK_EQ(0U, reinterpret_cast<uintptr_t>(mem) & (ALIGNOF(T) - 1));
430 // First change the type to "transitioning" so that there is no race
431 // condition with the clearing and construction of the object should
432 // another thread be simultaneously iterating over data. This will
433 // "acquire" the memory so no changes get reordered before it.
434 if (!ChangeType(ref, kReferenceTransitioning, from_type_id))
435 return nullptr;
436 // Clear the memory so that the property of all memory being zero after an
437 // allocation also applies here.
438 memset(mem, 0, GetAllocSize(ref));
439 // Construct an object of the desired type on this memory, just as if
440 // AllocateObject had been called to create it.
441 T* obj = new (mem) T();
442 // Finally change the type to the desired one. This will "release" all of
443 // the changes above and so provide a consistent view to other threads.
444 bool success =
445 ChangeType(ref, T::kPersistentTypeId, kReferenceTransitioning);
446 DCHECK(success);
447 return obj;
448 }
449
bcwhite34ae4982016-01-20 13:44:46450 // Reserve space in the memory segment of the desired |size| and |type_id|.
451 // A return value of zero indicates the allocation failed, otherwise the
452 // returned reference can be used by any process to get a real pointer via
453 // the GetAsObject() call.
454 Reference Allocate(size_t size, uint32_t type_id);
455
bcwhite3f999d32017-01-11 12:42:13456 // Allocate and construct an object in persistent memory. The type must have
457 // both (size_t) kExpectedInstanceSize and (uint32_t) kPersistentTypeId
458 // static constexpr fields that are used to ensure compatibility between
459 // software versions. An optional size parameter can be specified to force
460 // the allocation to be bigger than the size of the object; this is useful
461 // when the last field is actually variable length.
462 template <typename T>
463 T* AllocateObject(size_t size) {
464 if (size < sizeof(T))
465 size = sizeof(T);
466 Reference ref = Allocate(size, T::kPersistentTypeId);
467 void* mem =
468 const_cast<void*>(GetBlockData(ref, T::kPersistentTypeId, size));
469 if (!mem)
470 return nullptr;
471 DCHECK_EQ(0U, reinterpret_cast<uintptr_t>(mem) & (ALIGNOF(T) - 1));
472 return new (mem) T();
473 }
474 template <typename T>
475 T* AllocateObject() {
476 return AllocateObject<T>(sizeof(T));
477 }
478
479 // Deletes an object by destructing it and then changing the type to a
480 // different value (default 0).
481 template <typename T>
482 void DeleteObject(T* obj, uint32_t new_type) {
483 // Get the reference for the object.
484 Reference ref = GetAsReference<T>(obj);
485 // First change the type to "transitioning" so there is no race condition
486 // where another thread could find the object through iteration while it
487 // is been destructed. This will "acquire" the memory so no changes get
488 // reordered before it. It will fail if |ref| is invalid.
489 if (!ChangeType(ref, kReferenceTransitioning, T::kPersistentTypeId))
490 return;
491 // Destruct the object.
492 obj->~T();
493 // Finally change the type to the desired value. This will "release" all
494 // the changes above.
495 bool success = ChangeType(ref, new_type, kReferenceTransitioning);
496 DCHECK(success);
497 }
498 template <typename T>
499 void DeleteObject(T* obj) {
500 DeleteObject<T>(obj, 0);
501 }
502
bcwhite34ae4982016-01-20 13:44:46503 // Allocated objects can be added to an internal list that can then be
504 // iterated over by other processes. If an allocated object can be found
505 // another way, such as by having its reference within a different object
506 // that will be made iterable, then this call is not necessary. This always
507 // succeeds unless corruption is detected; check IsCorrupted() to find out.
508 // Once an object is made iterable, its position in iteration can never
509 // change; new iterable objects will always be added after it in the series.
bcwhite3f999d32017-01-11 12:42:13510 // Changing the type does not alter its "iterable" status.
bcwhite34ae4982016-01-20 13:44:46511 void MakeIterable(Reference ref);
512
bcwhite3f999d32017-01-11 12:42:13513 // As above but works with an object allocated from persistent memory.
514 template <typename T>
515 void MakeIterable(const T* obj) {
516 MakeIterable(GetAsReference<T>(obj));
517 }
518
bcwhite34ae4982016-01-20 13:44:46519 // Get the information about the amount of free space in the allocator. The
520 // amount of free space should be treated as approximate due to extras from
521 // alignment and metadata. Concurrent allocations from other threads will
522 // also make the true amount less than what is reported.
523 void GetMemoryInfo(MemoryInfo* meminfo) const;
524
bcwhite34ae4982016-01-20 13:44:46525 // If there is some indication that the memory has become corrupted,
526 // calling this will attempt to prevent further damage by indicating to
527 // all processes that something is not as expected.
528 void SetCorrupt() const;
529
530 // This can be called to determine if corruption has been detected in the
531 // segment, possibly my a malicious actor. Once detected, future allocations
532 // will fail and iteration may not locate all objects.
533 bool IsCorrupt() const;
534
535 // Flag set if an allocation has failed because the memory segment was full.
536 bool IsFull() const;
537
538 // Update those "tracking" histograms which do not get updates during regular
539 // operation, such as how much memory is currently used. This should be
540 // called before such information is to be displayed or uploaded.
541 void UpdateTrackingHistograms();
542
543 protected:
bcwhitecd4923d2016-09-23 18:30:03544 enum MemoryType {
545 MEM_EXTERNAL,
546 MEM_MALLOC,
547 MEM_VIRTUAL,
548 MEM_SHARED,
549 MEM_FILE,
550 };
551
552 struct Memory {
553 Memory(void* b, MemoryType t) : base(b), type(t) {}
554
555 void* base;
556 MemoryType type;
557 };
558
559 // Constructs the allocator. Everything is the same as the public allocator
560 // except |memory| which is a structure with additional information besides
561 // the base address.
562 PersistentMemoryAllocator(Memory memory, size_t size, size_t page_size,
563 uint64_t id, base::StringPiece name,
564 bool readonly);
565
bcwhite34ae4982016-01-20 13:44:46566 volatile char* const mem_base_; // Memory base. (char so sizeof guaranteed 1)
bcwhitecd4923d2016-09-23 18:30:03567 const MemoryType mem_type_; // Type of memory allocation.
bcwhite34ae4982016-01-20 13:44:46568 const uint32_t mem_size_; // Size of entire memory segment.
569 const uint32_t mem_page_; // Page size allocations shouldn't cross.
570
571 private:
572 struct SharedMetadata;
573 struct BlockHeader;
574 static const uint32_t kAllocAlignment;
575 static const Reference kReferenceQueue;
bcwhite34ae4982016-01-20 13:44:46576
577 // The shared metadata is always located at the top of the memory segment.
578 // These convenience functions eliminate constant casting of the base
579 // pointer within the code.
bcwhitec03fc0a2016-02-05 01:18:03580 const SharedMetadata* shared_meta() const {
581 return reinterpret_cast<const SharedMetadata*>(
582 const_cast<const char*>(mem_base_));
bcwhite34ae4982016-01-20 13:44:46583 }
bcwhitec03fc0a2016-02-05 01:18:03584 SharedMetadata* shared_meta() {
585 return reinterpret_cast<SharedMetadata*>(const_cast<char*>(mem_base_));
bcwhite34ae4982016-01-20 13:44:46586 }
587
588 // Actual method for doing the allocation.
589 Reference AllocateImpl(size_t size, uint32_t type_id);
590
591 // Get the block header associated with a specific reference.
592 const volatile BlockHeader* GetBlock(Reference ref, uint32_t type_id,
593 uint32_t size, bool queue_ok,
594 bool free_ok) const;
595 volatile BlockHeader* GetBlock(Reference ref, uint32_t type_id, uint32_t size,
596 bool queue_ok, bool free_ok) {
597 return const_cast<volatile BlockHeader*>(
598 const_cast<const PersistentMemoryAllocator*>(this)->GetBlock(
599 ref, type_id, size, queue_ok, free_ok));
600 }
601
602 // Get the actual data within a block associated with a specific reference.
603 const volatile void* GetBlockData(Reference ref, uint32_t type_id,
604 uint32_t size) const;
605 volatile void* GetBlockData(Reference ref, uint32_t type_id,
606 uint32_t size) {
607 return const_cast<volatile void*>(
608 const_cast<const PersistentMemoryAllocator*>(this)->GetBlockData(
609 ref, type_id, size));
610 }
611
612 const bool readonly_; // Indicates access to read-only memory.
613 std::atomic<bool> corrupt_; // Local version of "corrupted" flag.
614
615 HistogramBase* allocs_histogram_; // Histogram recording allocs.
616 HistogramBase* used_histogram_; // Histogram recording used space.
617
618 friend class PersistentMemoryAllocatorTest;
619 FRIEND_TEST_ALL_PREFIXES(PersistentMemoryAllocatorTest, AllocateAndIterate);
620 DISALLOW_COPY_AND_ASSIGN(PersistentMemoryAllocator);
621};
622
623
624// This allocator uses a local memory block it allocates from the general
625// heap. It is generally used when some kind of "death rattle" handler will
626// save the contents to persistent storage during process shutdown. It is
627// also useful for testing.
628class BASE_EXPORT LocalPersistentMemoryAllocator
629 : public PersistentMemoryAllocator {
630 public:
631 LocalPersistentMemoryAllocator(size_t size, uint64_t id,
bcwhite3779f982016-02-11 22:37:01632 base::StringPiece name);
bcwhite34ae4982016-01-20 13:44:46633 ~LocalPersistentMemoryAllocator() override;
634
635 private:
bcwhiteeda1a99782016-06-02 19:27:17636 // Allocates a block of local memory of the specified |size|, ensuring that
637 // the memory will not be physically allocated until accessed and will read
638 // as zero when that happens.
bcwhitecd4923d2016-09-23 18:30:03639 static Memory AllocateLocalMemory(size_t size);
bcwhiteeda1a99782016-06-02 19:27:17640
641 // Deallocates a block of local |memory| of the specified |size|.
bcwhitecd4923d2016-09-23 18:30:03642 static void DeallocateLocalMemory(void* memory, size_t size, MemoryType type);
bcwhiteeda1a99782016-06-02 19:27:17643
bcwhite34ae4982016-01-20 13:44:46644 DISALLOW_COPY_AND_ASSIGN(LocalPersistentMemoryAllocator);
645};
646
647
bcwhite5451c582016-02-12 18:47:15648// This allocator takes a shared-memory object and performs allocation from
649// it. The memory must be previously mapped via Map() or MapAt(). The allocator
650// takes ownership of the memory object.
651class BASE_EXPORT SharedPersistentMemoryAllocator
652 : public PersistentMemoryAllocator {
653 public:
dcheng093de9b2016-04-04 21:25:51654 SharedPersistentMemoryAllocator(std::unique_ptr<SharedMemory> memory,
655 uint64_t id,
656 base::StringPiece name,
657 bool read_only);
bcwhite5451c582016-02-12 18:47:15658 ~SharedPersistentMemoryAllocator() override;
659
660 SharedMemory* shared_memory() { return shared_memory_.get(); }
661
bcwhitecd4923d2016-09-23 18:30:03662 // Ensure that the memory isn't so invalid that it would crash when passing it
bcwhite5451c582016-02-12 18:47:15663 // to the allocator. This doesn't guarantee the data is valid, just that it
664 // won't cause the program to abort. The existing IsCorrupt() call will handle
665 // the rest.
666 static bool IsSharedMemoryAcceptable(const SharedMemory& memory);
667
668 private:
dcheng093de9b2016-04-04 21:25:51669 std::unique_ptr<SharedMemory> shared_memory_;
bcwhite5451c582016-02-12 18:47:15670
671 DISALLOW_COPY_AND_ASSIGN(SharedPersistentMemoryAllocator);
672};
673
674
bcwhite9e68eef2016-05-30 15:40:30675#if !defined(OS_NACL) // NACL doesn't support any kind of file access in build.
bcwhite34ae4982016-01-20 13:44:46676// This allocator takes a memory-mapped file object and performs allocation
bcwhite34229a82016-05-26 23:24:32677// from it. The allocator takes ownership of the file object.
bcwhite34ae4982016-01-20 13:44:46678class BASE_EXPORT FilePersistentMemoryAllocator
679 : public PersistentMemoryAllocator {
680 public:
bcwhite34229a82016-05-26 23:24:32681 // A |max_size| of zero will use the length of the file as the maximum
682 // size. The |file| object must have been already created with sufficient
683 // permissions (read, read/write, or read/write/extend).
dcheng093de9b2016-04-04 21:25:51684 FilePersistentMemoryAllocator(std::unique_ptr<MemoryMappedFile> file,
bcwhite34229a82016-05-26 23:24:32685 size_t max_size,
dcheng093de9b2016-04-04 21:25:51686 uint64_t id,
bcwhite34229a82016-05-26 23:24:32687 base::StringPiece name,
688 bool read_only);
bcwhite34ae4982016-01-20 13:44:46689 ~FilePersistentMemoryAllocator() override;
690
bcwhitecd4923d2016-09-23 18:30:03691 // Ensure that the file isn't so invalid that it would crash when passing it
bcwhite34ae4982016-01-20 13:44:46692 // to the allocator. This doesn't guarantee the file is valid, just that it
bcwhite5451c582016-02-12 18:47:15693 // won't cause the program to abort. The existing IsCorrupt() call will handle
bcwhite34ae4982016-01-20 13:44:46694 // the rest.
bcwhite34229a82016-05-26 23:24:32695 static bool IsFileAcceptable(const MemoryMappedFile& file, bool read_only);
bcwhite34ae4982016-01-20 13:44:46696
697 private:
dcheng093de9b2016-04-04 21:25:51698 std::unique_ptr<MemoryMappedFile> mapped_file_;
bcwhite5451c582016-02-12 18:47:15699
700 DISALLOW_COPY_AND_ASSIGN(FilePersistentMemoryAllocator);
bcwhite34ae4982016-01-20 13:44:46701};
bcwhite9e68eef2016-05-30 15:40:30702#endif // !defined(OS_NACL)
bcwhite34ae4982016-01-20 13:44:46703
704} // namespace base
705
706#endif // BASE_METRICS_PERSISTENT_MEMORY_ALLOCATOR_H_