bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 1 | // Copyright (c) 2015 The Chromium Authors. All rights reserved. |
| 2 | // Use of this source code is governed by a BSD-style license that can be |
| 3 | // found in the LICENSE file. |
| 4 | |
| 5 | #ifndef BASE_METRICS_PERSISTENT_MEMORY_ALLOCATOR_H_ |
| 6 | #define BASE_METRICS_PERSISTENT_MEMORY_ALLOCATOR_H_ |
| 7 | |
| 8 | #include <stdint.h> |
dcheng | 093de9b | 2016-04-04 21:25:51 | [diff] [blame] | 9 | |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 10 | #include <atomic> |
dcheng | 093de9b | 2016-04-04 21:25:51 | [diff] [blame] | 11 | #include <memory> |
piman | 03cd21b | 2016-11-22 21:03:29 | [diff] [blame] | 12 | #include <type_traits> |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 13 | |
| 14 | #include "base/atomicops.h" |
| 15 | #include "base/base_export.h" |
bcwhite | 65e57d0 | 2016-05-13 14:39:40 | [diff] [blame] | 16 | #include "base/files/file_path.h" |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 17 | #include "base/gtest_prod_util.h" |
| 18 | #include "base/macros.h" |
bcwhite | 3779f98 | 2016-02-11 22:37:01 | [diff] [blame] | 19 | #include "base/strings/string_piece.h" |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 20 | |
| 21 | namespace base { |
| 22 | |
| 23 | class HistogramBase; |
| 24 | class MemoryMappedFile; |
bcwhite | 5451c58 | 2016-02-12 18:47:15 | [diff] [blame] | 25 | class SharedMemory; |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 26 | |
| 27 | // Simple allocator for pieces of a memory block that may be persistent |
| 28 | // to some storage or shared across multiple processes. This class resides |
| 29 | // under base/metrics because it was written for that purpose. It is, |
| 30 | // however, fully general-purpose and can be freely moved to base/memory |
| 31 | // if other uses are found. |
| 32 | // |
| 33 | // This class provides for thread-secure (i.e. safe against other threads |
| 34 | // or processes that may be compromised and thus have malicious intent) |
| 35 | // allocation of memory within a designated block and also a mechanism by |
| 36 | // which other threads can learn of these allocations. |
| 37 | // |
| 38 | // There is (currently) no way to release an allocated block of data because |
| 39 | // doing so would risk invalidating pointers held by other processes and |
| 40 | // greatly complicate the allocation algorithm. |
| 41 | // |
| 42 | // Construction of this object can accept new, clean (i.e. zeroed) memory |
| 43 | // or previously initialized memory. In the first case, construction must |
| 44 | // be allowed to complete before letting other allocators attach to the same |
| 45 | // segment. In other words, don't share the segment until at least one |
| 46 | // allocator has been attached to it. |
| 47 | // |
| 48 | // Note that memory not in active use is not accessed so it is possible to |
| 49 | // use virtual memory, including memory-mapped files, as backing storage with |
| 50 | // the OS "pinning" new (zeroed) physical RAM pages only as they are needed. |
piman | 03cd21b | 2016-11-22 21:03:29 | [diff] [blame] | 51 | // |
bcwhite | 3f999d3 | 2017-01-11 12:42:13 | [diff] [blame^] | 52 | // OBJECTS: Although the allocator can be used in a "malloc" sense, fetching |
| 53 | // character arrays and manipulating that memory manually, the better way is |
| 54 | // generally to use the "Object" methods to create and manage allocations. In |
| 55 | // this way the sizing, type-checking, and construction are all automatic. For |
| 56 | // this to work, however, every type of stored object must define two public |
| 57 | // "constexpr" values, kPersistentTypeId and kExpectedInstanceSize, as such: |
piman | 03cd21b | 2016-11-22 21:03:29 | [diff] [blame] | 58 | // |
bcwhite | 3f999d3 | 2017-01-11 12:42:13 | [diff] [blame^] | 59 | // struct MyPersistentObjectType { |
| 60 | // // SHA1(MyPersistentObjectType): Increment this if structure changes! |
| 61 | // static constexpr uint32_t kPersistentTypeId = 0x3E15F6DE + 1; |
| 62 | // |
| 63 | // // Expected size for 32/64-bit check. Update this if structure changes! |
| 64 | // static constexpr size_t kExpectedInstanceSize = 20; |
| 65 | // |
| 66 | // ... |
| 67 | // }; |
| 68 | // |
| 69 | // kPersistentTypeId: This value is an arbitrary identifier that allows the |
| 70 | // identification of these objects in the allocator, including the ability |
| 71 | // to find them via iteration. The number is arbitrary but using the first |
| 72 | // four bytes of the SHA1 hash of the type name means that there shouldn't |
| 73 | // be any conflicts with other types that may also be stored in the memory. |
| 74 | // The fully qualified name (e.g. base::debug::MyPersistentObjectType) could |
| 75 | // be used to generate the hash if the type name seems common. Use a command |
| 76 | // like this to get the hash: echo -n "MyPersistentObjectType" | sha1sum |
| 77 | // If the structure layout changes, ALWAYS increment this number so that |
| 78 | // newer versions of the code don't try to interpret persistent data written |
| 79 | // by older versions with a different layout. |
| 80 | // |
| 81 | // kExpectedInstanceSize: This value is the hard-coded number that matches |
| 82 | // what sizeof(T) would return. By providing it explicitly, the allocator can |
| 83 | // verify that the structure is compatible between both 32-bit and 64-bit |
| 84 | // versions of the code. |
| 85 | // |
| 86 | // Using AllocateObject (and ChangeObject) will zero the memory and then call |
| 87 | // the default constructor for the object. Given that objects are persistent, |
| 88 | // no destructor is ever called automatically though a caller can explicitly |
| 89 | // call DeleteObject to destruct it and change the type to something indicating |
| 90 | // it is no longer in use. |
| 91 | // |
| 92 | // Though persistent memory segments are transferrable between programs built |
| 93 | // for different natural word widths, they CANNOT be exchanged between CPUs |
| 94 | // of different endianess. Attempts to do so will simply see the existing data |
| 95 | // as corrupt and refuse to access any of it. |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 96 | class BASE_EXPORT PersistentMemoryAllocator { |
| 97 | public: |
| 98 | typedef uint32_t Reference; |
| 99 | |
bcwhite | f246202 | 2016-04-06 15:39:01 | [diff] [blame] | 100 | // Iterator for going through all iterable memory records in an allocator. |
| 101 | // Like the allocator itself, iterators are lock-free and thread-secure. |
| 102 | // That means that multiple threads can share an iterator and the same |
| 103 | // reference will not be returned twice. |
| 104 | // |
bcwhite | dadd315 | 2016-10-13 23:49:14 | [diff] [blame] | 105 | // The order of the items returned by an iterator matches the order in which |
| 106 | // MakeIterable() was called on them. Once an allocation is made iterable, |
| 107 | // it is always such so the only possible difference between successive |
| 108 | // iterations is for more to be added to the end. |
| 109 | // |
bcwhite | f246202 | 2016-04-06 15:39:01 | [diff] [blame] | 110 | // Iteration, in general, is tolerant of corrupted memory. It will return |
| 111 | // what it can and stop only when corruption forces it to. Bad corruption |
| 112 | // could cause the same object to be returned many times but it will |
| 113 | // eventually quit. |
| 114 | class BASE_EXPORT Iterator { |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 115 | public: |
bcwhite | f246202 | 2016-04-06 15:39:01 | [diff] [blame] | 116 | // Constructs an iterator on a given |allocator|, starting at the beginning. |
| 117 | // The allocator must live beyond the lifetime of the iterator. This class |
| 118 | // has read-only access to the allocator (hence "const") but the returned |
| 119 | // references can be used on a read/write version, too. |
| 120 | explicit Iterator(const PersistentMemoryAllocator* allocator); |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 121 | |
bcwhite | f246202 | 2016-04-06 15:39:01 | [diff] [blame] | 122 | // As above but resuming from the |starting_after| reference. The first call |
| 123 | // to GetNext() will return the next object found after that reference. The |
| 124 | // reference must be to an "iterable" object; references to non-iterable |
| 125 | // objects (those that never had MakeIterable() called for them) will cause |
| 126 | // a run-time error. |
| 127 | Iterator(const PersistentMemoryAllocator* allocator, |
| 128 | Reference starting_after); |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 129 | |
bcwhite | dadd315 | 2016-10-13 23:49:14 | [diff] [blame] | 130 | // Resets the iterator back to the beginning. |
| 131 | void Reset(); |
| 132 | |
| 133 | // Resets the iterator, resuming from the |starting_after| reference. |
| 134 | void Reset(Reference starting_after); |
| 135 | |
| 136 | // Returns the previously retrieved reference, or kReferenceNull if none. |
| 137 | // If constructor or reset with a starting_after location, this will return |
| 138 | // that value. |
| 139 | Reference GetLast(); |
| 140 | |
bcwhite | f246202 | 2016-04-06 15:39:01 | [diff] [blame] | 141 | // Gets the next iterable, storing that type in |type_return|. The actual |
| 142 | // return value is a reference to the allocation inside the allocator or |
| 143 | // zero if there are no more. GetNext() may still be called again at a |
| 144 | // later time to retrieve any new allocations that have been added. |
| 145 | Reference GetNext(uint32_t* type_return); |
| 146 | |
| 147 | // Similar to above but gets the next iterable of a specific |type_match|. |
| 148 | // This should not be mixed with calls to GetNext() because any allocations |
| 149 | // skipped here due to a type mis-match will never be returned by later |
| 150 | // calls to GetNext() meaning it's possible to completely miss entries. |
| 151 | Reference GetNextOfType(uint32_t type_match); |
| 152 | |
bcwhite | 3f999d3 | 2017-01-11 12:42:13 | [diff] [blame^] | 153 | // As above but works using object type. |
| 154 | template <typename T> |
| 155 | Reference GetNextOfType() { |
| 156 | return GetNextOfType(T::kPersistentTypeId); |
| 157 | } |
| 158 | |
| 159 | // As above but works using objects and returns null if not found. |
| 160 | template <typename T> |
| 161 | const T* GetNextOfObject() { |
| 162 | return GetAsObject<T>(GetNextOfType<T>()); |
| 163 | } |
| 164 | |
bcwhite | f246202 | 2016-04-06 15:39:01 | [diff] [blame] | 165 | // Converts references to objects. This is a convenience method so that |
| 166 | // users of the iterator don't need to also have their own pointer to the |
| 167 | // allocator over which the iterator runs in order to retrieve objects. |
| 168 | // Because the iterator is not read/write, only "const" objects can be |
| 169 | // fetched. Non-const objects can be fetched using the reference on a |
| 170 | // non-const (external) pointer to the same allocator (or use const_cast |
| 171 | // to remove the qualifier). |
| 172 | template <typename T> |
bcwhite | 3f999d3 | 2017-01-11 12:42:13 | [diff] [blame^] | 173 | const T* GetAsObject(Reference ref) const { |
| 174 | return allocator_->GetAsObject<T>(ref); |
bcwhite | f246202 | 2016-04-06 15:39:01 | [diff] [blame] | 175 | } |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 176 | |
bcwhite | 3f999d3 | 2017-01-11 12:42:13 | [diff] [blame^] | 177 | // Similar to GetAsObject() but converts references to arrays of things. |
piman | 03cd21b | 2016-11-22 21:03:29 | [diff] [blame] | 178 | template <typename T> |
| 179 | const T* GetAsArray(Reference ref, uint32_t type_id, size_t count) const { |
| 180 | return allocator_->GetAsArray<T>(ref, type_id, count); |
| 181 | } |
| 182 | |
bcwhite | 3f999d3 | 2017-01-11 12:42:13 | [diff] [blame^] | 183 | // Convert a generic pointer back into a reference. A null reference will |
| 184 | // be returned if |memory| is not inside the persistent segment or does not |
| 185 | // point to an object of the specified |type_id|. |
| 186 | Reference GetAsReference(const void* memory, uint32_t type_id) const { |
| 187 | return allocator_->GetAsReference(memory, type_id); |
| 188 | } |
| 189 | |
| 190 | // As above but convert an object back into a reference. |
| 191 | template <typename T> |
| 192 | Reference GetAsReference(const T* obj) const { |
| 193 | return allocator_->GetAsReference(obj); |
| 194 | } |
| 195 | |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 196 | private: |
bcwhite | f246202 | 2016-04-06 15:39:01 | [diff] [blame] | 197 | // Weak-pointer to memory allocator being iterated over. |
| 198 | const PersistentMemoryAllocator* allocator_; |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 199 | |
bcwhite | f246202 | 2016-04-06 15:39:01 | [diff] [blame] | 200 | // The last record that was returned. |
| 201 | std::atomic<Reference> last_record_; |
| 202 | |
| 203 | // The number of records found; used for detecting loops. |
| 204 | std::atomic<uint32_t> record_count_; |
| 205 | |
| 206 | DISALLOW_COPY_AND_ASSIGN(Iterator); |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 207 | }; |
| 208 | |
| 209 | // Returned information about the internal state of the heap. |
| 210 | struct MemoryInfo { |
| 211 | size_t total; |
| 212 | size_t free; |
| 213 | }; |
| 214 | |
bcwhite | b0bb919 | 2016-04-18 01:33:10 | [diff] [blame] | 215 | enum : Reference { |
bcwhite | 3f999d3 | 2017-01-11 12:42:13 | [diff] [blame^] | 216 | // A common "null" reference value. |
| 217 | kReferenceNull = 0, |
bcwhite | b0bb919 | 2016-04-18 01:33:10 | [diff] [blame] | 218 | |
bcwhite | 3f999d3 | 2017-01-11 12:42:13 | [diff] [blame^] | 219 | // A value indicating that the type is in transition. Work is being done |
| 220 | // on the contents to prepare it for a new type to come. |
| 221 | kReferenceTransitioning = 0xFFFFFFFF, |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 222 | }; |
| 223 | |
piman | 03cd21b | 2016-11-22 21:03:29 | [diff] [blame] | 224 | enum : size_t { |
| 225 | kSizeAny = 1 // Constant indicating that any array size is acceptable. |
| 226 | }; |
| 227 | |
bcwhite | 65e57d0 | 2016-05-13 14:39:40 | [diff] [blame] | 228 | // This is the standard file extension (suitable for being passed to the |
| 229 | // AddExtension() method of base::FilePath) for dumps of persistent memory. |
| 230 | static const base::FilePath::CharType kFileExtension[]; |
| 231 | |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 232 | // The allocator operates on any arbitrary block of memory. Creation and |
| 233 | // persisting or sharing of that block with another process is the |
| 234 | // responsibility of the caller. The allocator needs to know only the |
| 235 | // block's |base| address, the total |size| of the block, and any internal |
| 236 | // |page| size (zero if not paged) across which allocations should not span. |
| 237 | // The |id| is an arbitrary value the caller can use to identify a |
| 238 | // particular memory segment. It will only be loaded during the initial |
| 239 | // creation of the segment and can be checked by the caller for consistency. |
| 240 | // The |name|, if provided, is used to distinguish histograms for this |
| 241 | // allocator. Only the primary owner of the segment should define this value; |
| 242 | // other processes can learn it from the shared state. If the underlying |
| 243 | // memory is |readonly| then no changes will be made to it. The resulting |
| 244 | // object should be stored as a "const" pointer. |
| 245 | // |
| 246 | // PersistentMemoryAllocator does NOT take ownership of the memory block. |
| 247 | // The caller must manage it and ensure it stays available throughout the |
| 248 | // lifetime of this object. |
| 249 | // |
| 250 | // Memory segments for sharing must have had an allocator attached to them |
| 251 | // before actually being shared. If the memory segment was just created, it |
| 252 | // should be zeroed before being passed here. If it was an existing segment, |
| 253 | // the values here will be compared to copies stored in the shared segment |
| 254 | // as a guard against corruption. |
| 255 | // |
| 256 | // Make sure that the memory segment is acceptable (see IsMemoryAcceptable() |
| 257 | // method below) before construction if the definition of the segment can |
| 258 | // vary in any way at run-time. Invalid memory segments will cause a crash. |
| 259 | PersistentMemoryAllocator(void* base, size_t size, size_t page_size, |
bcwhite | 3779f98 | 2016-02-11 22:37:01 | [diff] [blame] | 260 | uint64_t id, base::StringPiece name, |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 261 | bool readonly); |
| 262 | virtual ~PersistentMemoryAllocator(); |
| 263 | |
| 264 | // Check if memory segment is acceptable for creation of an Allocator. This |
| 265 | // doesn't do any analysis of the data and so doesn't guarantee that the |
| 266 | // contents are valid, just that the paramaters won't cause the program to |
| 267 | // abort. The IsCorrupt() method will report detection of data problems |
| 268 | // found during construction and general operation. |
| 269 | static bool IsMemoryAcceptable(const void* data, size_t size, |
| 270 | size_t page_size, bool readonly); |
| 271 | |
| 272 | // Get the internal identifier for this persistent memory segment. |
| 273 | uint64_t Id() const; |
| 274 | |
| 275 | // Get the internal name of this allocator (possibly an empty string). |
| 276 | const char* Name() const; |
| 277 | |
| 278 | // Is this segment open only for read? |
| 279 | bool IsReadonly() { return readonly_; } |
| 280 | |
| 281 | // Create internal histograms for tracking memory use and allocation sizes |
| 282 | // for allocator of |name| (which can simply be the result of Name()). This |
| 283 | // is done seperately from construction for situations such as when the |
| 284 | // histograms will be backed by memory provided by this very allocator. |
bcwhite | 3779f98 | 2016-02-11 22:37:01 | [diff] [blame] | 285 | // |
| 286 | // IMPORTANT: Callers must update tools/metrics/histograms/histograms.xml |
| 287 | // with the following histograms: |
| 288 | // UMA.PersistentAllocator.name.Allocs |
| 289 | // UMA.PersistentAllocator.name.UsedPct |
| 290 | void CreateTrackingHistograms(base::StringPiece name); |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 291 | |
| 292 | // Direct access to underlying memory segment. If the segment is shared |
| 293 | // across threads or processes, reading data through these values does |
| 294 | // not guarantee consistency. Use with care. Do not write. |
| 295 | const void* data() const { return const_cast<const char*>(mem_base_); } |
| 296 | size_t length() const { return mem_size_; } |
bcwhite | 65e57d0 | 2016-05-13 14:39:40 | [diff] [blame] | 297 | size_t size() const { return mem_size_; } |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 298 | size_t used() const; |
| 299 | |
| 300 | // Get an object referenced by a |ref|. For safety reasons, the |type_id| |
| 301 | // code and size-of(|T|) are compared to ensure the reference is valid |
| 302 | // and cannot return an object outside of the memory segment. A |type_id| of |
| 303 | // kTypeIdAny (zero) will match any though the size is still checked. NULL is |
| 304 | // returned if any problem is detected, such as corrupted storage or incorrect |
| 305 | // parameters. Callers MUST check that the returned value is not-null EVERY |
| 306 | // TIME before accessing it or risk crashing! Once dereferenced, the pointer |
| 307 | // is safe to reuse forever. |
| 308 | // |
piman | 03cd21b | 2016-11-22 21:03:29 | [diff] [blame] | 309 | // It is essential that the object be of a fixed size. All fields must be of |
| 310 | // a defined type that does not change based on the compiler or the CPU |
| 311 | // natural word size. Acceptable are char, float, double, and (u)intXX_t. |
| 312 | // Unacceptable are int, bool, and wchar_t which are implementation defined |
| 313 | // with regards to their size. |
bcwhite | e99be2d | 2016-11-09 19:39:41 | [diff] [blame] | 314 | // |
piman | 03cd21b | 2016-11-22 21:03:29 | [diff] [blame] | 315 | // Alignment must also be consistent. A uint64_t after a uint32_t will pad |
bcwhite | e99be2d | 2016-11-09 19:39:41 | [diff] [blame] | 316 | // differently between 32 and 64 bit architectures. Either put the bigger |
| 317 | // elements first, group smaller elements into blocks the size of larger |
piman | 03cd21b | 2016-11-22 21:03:29 | [diff] [blame] | 318 | // elements, or manually insert padding fields as appropriate for the |
| 319 | // largest architecture, including at the end. |
| 320 | // |
| 321 | // To protected against mistakes, all objects must have the attribute |
| 322 | // |kExpectedInstanceSize| (static constexpr size_t) that is a hard-coded |
| 323 | // numerical value -- NNN, not sizeof(T) -- that can be tested. If the |
| 324 | // instance size is not fixed, at least one build will fail. |
| 325 | // |
| 326 | // If the size of a structure changes, the type-ID used to recognize it |
| 327 | // should also change so later versions of the code don't try to read |
| 328 | // incompatible structures from earlier versions. |
bcwhite | e99be2d | 2016-11-09 19:39:41 | [diff] [blame] | 329 | // |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 330 | // NOTE: Though this method will guarantee that an object of the specified |
| 331 | // type can be accessed without going outside the bounds of the memory |
| 332 | // segment, it makes no guarantees of the validity of the data within the |
| 333 | // object itself. If it is expected that the contents of the segment could |
| 334 | // be compromised with malicious intent, the object must be hardened as well. |
| 335 | // |
| 336 | // Though the persistent data may be "volatile" if it is shared with |
| 337 | // other processes, such is not necessarily the case. The internal |
| 338 | // "volatile" designation is discarded so as to not propagate the viral |
| 339 | // nature of that keyword to the caller. It can add it back, if necessary, |
| 340 | // based on knowledge of how the allocator is being used. |
| 341 | template <typename T> |
bcwhite | 3f999d3 | 2017-01-11 12:42:13 | [diff] [blame^] | 342 | T* GetAsObject(Reference ref) { |
| 343 | static_assert(std::is_standard_layout<T>::value, "only standard objects"); |
| 344 | static_assert(!std::is_array<T>::value, "use GetAsArray<>()"); |
piman | 03cd21b | 2016-11-22 21:03:29 | [diff] [blame] | 345 | static_assert(T::kExpectedInstanceSize == sizeof(T), "inconsistent size"); |
bcwhite | 3f999d3 | 2017-01-11 12:42:13 | [diff] [blame^] | 346 | return const_cast<T*>(reinterpret_cast<volatile T*>( |
| 347 | GetBlockData(ref, T::kPersistentTypeId, sizeof(T)))); |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 348 | } |
| 349 | template <typename T> |
bcwhite | 3f999d3 | 2017-01-11 12:42:13 | [diff] [blame^] | 350 | const T* GetAsObject(Reference ref) const { |
| 351 | static_assert(std::is_standard_layout<T>::value, "only standard objects"); |
| 352 | static_assert(!std::is_array<T>::value, "use GetAsArray<>()"); |
piman | 03cd21b | 2016-11-22 21:03:29 | [diff] [blame] | 353 | static_assert(T::kExpectedInstanceSize == sizeof(T), "inconsistent size"); |
bcwhite | 3f999d3 | 2017-01-11 12:42:13 | [diff] [blame^] | 354 | return const_cast<const T*>(reinterpret_cast<const volatile T*>( |
| 355 | GetBlockData(ref, T::kPersistentTypeId, sizeof(T)))); |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 356 | } |
| 357 | |
piman | 03cd21b | 2016-11-22 21:03:29 | [diff] [blame] | 358 | // Like GetAsObject but get an array of simple, fixed-size types. |
| 359 | // |
| 360 | // Use a |count| of the required number of array elements, or kSizeAny. |
| 361 | // GetAllocSize() can be used to calculate the upper bound but isn't reliable |
| 362 | // because padding can make space for extra elements that were not written. |
| 363 | // |
| 364 | // Remember that an array of char is a string but may not be NUL terminated. |
| 365 | // |
| 366 | // There are no compile-time or run-time checks to ensure 32/64-bit size |
| 367 | // compatibilty when using these accessors. Only use fixed-size types such |
| 368 | // as char, float, double, or (u)intXX_t. |
| 369 | template <typename T> |
| 370 | T* GetAsArray(Reference ref, uint32_t type_id, size_t count) { |
| 371 | static_assert(std::is_fundamental<T>::value, "use GetAsObject<>()"); |
| 372 | return const_cast<T*>(reinterpret_cast<volatile T*>( |
| 373 | GetBlockData(ref, type_id, count * sizeof(T)))); |
| 374 | } |
| 375 | template <typename T> |
| 376 | const T* GetAsArray(Reference ref, uint32_t type_id, size_t count) const { |
| 377 | static_assert(std::is_fundamental<T>::value, "use GetAsObject<>()"); |
| 378 | return const_cast<const char*>(reinterpret_cast<const volatile T*>( |
| 379 | GetBlockData(ref, type_id, count * sizeof(T)))); |
| 380 | } |
| 381 | |
bcwhite | 1010504 | 2016-11-29 00:09:55 | [diff] [blame] | 382 | // Get the corresponding reference for an object held in persistent memory. |
| 383 | // If the |memory| is not valid or the type does not match, a kReferenceNull |
| 384 | // result will be returned. |
| 385 | Reference GetAsReference(const void* memory, uint32_t type_id) const; |
| 386 | |
bcwhite | 3f999d3 | 2017-01-11 12:42:13 | [diff] [blame^] | 387 | // As above but works with objects allocated from persistent memory. |
| 388 | template <typename T> |
| 389 | Reference GetAsReference(const T* obj) const { |
| 390 | return GetAsReference(obj, T::kPersistentTypeId); |
| 391 | } |
| 392 | |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 393 | // Get the number of bytes allocated to a block. This is useful when storing |
| 394 | // arrays in order to validate the ending boundary. The returned value will |
| 395 | // include any padding added to achieve the required alignment and so could |
| 396 | // be larger than given in the original Allocate() request. |
| 397 | size_t GetAllocSize(Reference ref) const; |
| 398 | |
| 399 | // Access the internal "type" of an object. This generally isn't necessary |
| 400 | // but can be used to "clear" the type and so effectively mark it as deleted |
bcwhite | 84a8e09 | 2016-06-03 15:19:31 | [diff] [blame] | 401 | // even though the memory stays valid and allocated. Changing the type is |
| 402 | // an atomic compare/exchange and so requires knowing the existing value. |
| 403 | // It will return false if the existing type is not what is expected. |
bcwhite | 3f999d3 | 2017-01-11 12:42:13 | [diff] [blame^] | 404 | // Changing the type doesn't mean the data is compatible with the new type. |
| 405 | // It will likely be necessary to clear or reconstruct the type before it |
| 406 | // can be used. Changing the type WILL NOT invalidate existing pointers to |
| 407 | // the data, either in this process or others, so changing the data structure |
| 408 | // could have unpredicatable results. USE WITH CARE! |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 409 | uint32_t GetType(Reference ref) const; |
bcwhite | 84a8e09 | 2016-06-03 15:19:31 | [diff] [blame] | 410 | bool ChangeType(Reference ref, uint32_t to_type_id, uint32_t from_type_id); |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 411 | |
bcwhite | 3f999d3 | 2017-01-11 12:42:13 | [diff] [blame^] | 412 | // Like ChangeType() but gets the "to" type from the object type, clears |
| 413 | // the memory, and constructs a new object of the desired type just as |
| 414 | // though it was fresh from AllocateObject<>(). The old type simply ceases |
| 415 | // to exist; no destructor is called for it. Calling this will not invalidate |
| 416 | // existing pointers to the object, either in this process or others, so |
| 417 | // changing the object could have unpredictable results. USE WITH CARE! |
| 418 | template <typename T> |
| 419 | T* ChangeObject(Reference ref, uint32_t from_type_id) { |
| 420 | DCHECK_LE(sizeof(T), GetAllocSize(ref)) << "alloc not big enough for obj"; |
| 421 | // Make sure the memory is appropriate. This won't be used until after |
| 422 | // the type is changed but checking first avoids the possibility of having |
| 423 | // to change the type back. |
| 424 | void* mem = const_cast<void*>(GetBlockData(ref, 0, sizeof(T))); |
| 425 | if (!mem) |
| 426 | return nullptr; |
| 427 | // Ensure the allocator's internal alignment is sufficient for this object. |
| 428 | // This protects against coding errors in the allocator. |
| 429 | DCHECK_EQ(0U, reinterpret_cast<uintptr_t>(mem) & (ALIGNOF(T) - 1)); |
| 430 | // First change the type to "transitioning" so that there is no race |
| 431 | // condition with the clearing and construction of the object should |
| 432 | // another thread be simultaneously iterating over data. This will |
| 433 | // "acquire" the memory so no changes get reordered before it. |
| 434 | if (!ChangeType(ref, kReferenceTransitioning, from_type_id)) |
| 435 | return nullptr; |
| 436 | // Clear the memory so that the property of all memory being zero after an |
| 437 | // allocation also applies here. |
| 438 | memset(mem, 0, GetAllocSize(ref)); |
| 439 | // Construct an object of the desired type on this memory, just as if |
| 440 | // AllocateObject had been called to create it. |
| 441 | T* obj = new (mem) T(); |
| 442 | // Finally change the type to the desired one. This will "release" all of |
| 443 | // the changes above and so provide a consistent view to other threads. |
| 444 | bool success = |
| 445 | ChangeType(ref, T::kPersistentTypeId, kReferenceTransitioning); |
| 446 | DCHECK(success); |
| 447 | return obj; |
| 448 | } |
| 449 | |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 450 | // Reserve space in the memory segment of the desired |size| and |type_id|. |
| 451 | // A return value of zero indicates the allocation failed, otherwise the |
| 452 | // returned reference can be used by any process to get a real pointer via |
| 453 | // the GetAsObject() call. |
| 454 | Reference Allocate(size_t size, uint32_t type_id); |
| 455 | |
bcwhite | 3f999d3 | 2017-01-11 12:42:13 | [diff] [blame^] | 456 | // Allocate and construct an object in persistent memory. The type must have |
| 457 | // both (size_t) kExpectedInstanceSize and (uint32_t) kPersistentTypeId |
| 458 | // static constexpr fields that are used to ensure compatibility between |
| 459 | // software versions. An optional size parameter can be specified to force |
| 460 | // the allocation to be bigger than the size of the object; this is useful |
| 461 | // when the last field is actually variable length. |
| 462 | template <typename T> |
| 463 | T* AllocateObject(size_t size) { |
| 464 | if (size < sizeof(T)) |
| 465 | size = sizeof(T); |
| 466 | Reference ref = Allocate(size, T::kPersistentTypeId); |
| 467 | void* mem = |
| 468 | const_cast<void*>(GetBlockData(ref, T::kPersistentTypeId, size)); |
| 469 | if (!mem) |
| 470 | return nullptr; |
| 471 | DCHECK_EQ(0U, reinterpret_cast<uintptr_t>(mem) & (ALIGNOF(T) - 1)); |
| 472 | return new (mem) T(); |
| 473 | } |
| 474 | template <typename T> |
| 475 | T* AllocateObject() { |
| 476 | return AllocateObject<T>(sizeof(T)); |
| 477 | } |
| 478 | |
| 479 | // Deletes an object by destructing it and then changing the type to a |
| 480 | // different value (default 0). |
| 481 | template <typename T> |
| 482 | void DeleteObject(T* obj, uint32_t new_type) { |
| 483 | // Get the reference for the object. |
| 484 | Reference ref = GetAsReference<T>(obj); |
| 485 | // First change the type to "transitioning" so there is no race condition |
| 486 | // where another thread could find the object through iteration while it |
| 487 | // is been destructed. This will "acquire" the memory so no changes get |
| 488 | // reordered before it. It will fail if |ref| is invalid. |
| 489 | if (!ChangeType(ref, kReferenceTransitioning, T::kPersistentTypeId)) |
| 490 | return; |
| 491 | // Destruct the object. |
| 492 | obj->~T(); |
| 493 | // Finally change the type to the desired value. This will "release" all |
| 494 | // the changes above. |
| 495 | bool success = ChangeType(ref, new_type, kReferenceTransitioning); |
| 496 | DCHECK(success); |
| 497 | } |
| 498 | template <typename T> |
| 499 | void DeleteObject(T* obj) { |
| 500 | DeleteObject<T>(obj, 0); |
| 501 | } |
| 502 | |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 503 | // Allocated objects can be added to an internal list that can then be |
| 504 | // iterated over by other processes. If an allocated object can be found |
| 505 | // another way, such as by having its reference within a different object |
| 506 | // that will be made iterable, then this call is not necessary. This always |
| 507 | // succeeds unless corruption is detected; check IsCorrupted() to find out. |
| 508 | // Once an object is made iterable, its position in iteration can never |
| 509 | // change; new iterable objects will always be added after it in the series. |
bcwhite | 3f999d3 | 2017-01-11 12:42:13 | [diff] [blame^] | 510 | // Changing the type does not alter its "iterable" status. |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 511 | void MakeIterable(Reference ref); |
| 512 | |
bcwhite | 3f999d3 | 2017-01-11 12:42:13 | [diff] [blame^] | 513 | // As above but works with an object allocated from persistent memory. |
| 514 | template <typename T> |
| 515 | void MakeIterable(const T* obj) { |
| 516 | MakeIterable(GetAsReference<T>(obj)); |
| 517 | } |
| 518 | |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 519 | // Get the information about the amount of free space in the allocator. The |
| 520 | // amount of free space should be treated as approximate due to extras from |
| 521 | // alignment and metadata. Concurrent allocations from other threads will |
| 522 | // also make the true amount less than what is reported. |
| 523 | void GetMemoryInfo(MemoryInfo* meminfo) const; |
| 524 | |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 525 | // If there is some indication that the memory has become corrupted, |
| 526 | // calling this will attempt to prevent further damage by indicating to |
| 527 | // all processes that something is not as expected. |
| 528 | void SetCorrupt() const; |
| 529 | |
| 530 | // This can be called to determine if corruption has been detected in the |
| 531 | // segment, possibly my a malicious actor. Once detected, future allocations |
| 532 | // will fail and iteration may not locate all objects. |
| 533 | bool IsCorrupt() const; |
| 534 | |
| 535 | // Flag set if an allocation has failed because the memory segment was full. |
| 536 | bool IsFull() const; |
| 537 | |
| 538 | // Update those "tracking" histograms which do not get updates during regular |
| 539 | // operation, such as how much memory is currently used. This should be |
| 540 | // called before such information is to be displayed or uploaded. |
| 541 | void UpdateTrackingHistograms(); |
| 542 | |
| 543 | protected: |
bcwhite | cd4923d | 2016-09-23 18:30:03 | [diff] [blame] | 544 | enum MemoryType { |
| 545 | MEM_EXTERNAL, |
| 546 | MEM_MALLOC, |
| 547 | MEM_VIRTUAL, |
| 548 | MEM_SHARED, |
| 549 | MEM_FILE, |
| 550 | }; |
| 551 | |
| 552 | struct Memory { |
| 553 | Memory(void* b, MemoryType t) : base(b), type(t) {} |
| 554 | |
| 555 | void* base; |
| 556 | MemoryType type; |
| 557 | }; |
| 558 | |
| 559 | // Constructs the allocator. Everything is the same as the public allocator |
| 560 | // except |memory| which is a structure with additional information besides |
| 561 | // the base address. |
| 562 | PersistentMemoryAllocator(Memory memory, size_t size, size_t page_size, |
| 563 | uint64_t id, base::StringPiece name, |
| 564 | bool readonly); |
| 565 | |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 566 | volatile char* const mem_base_; // Memory base. (char so sizeof guaranteed 1) |
bcwhite | cd4923d | 2016-09-23 18:30:03 | [diff] [blame] | 567 | const MemoryType mem_type_; // Type of memory allocation. |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 568 | const uint32_t mem_size_; // Size of entire memory segment. |
| 569 | const uint32_t mem_page_; // Page size allocations shouldn't cross. |
| 570 | |
| 571 | private: |
| 572 | struct SharedMetadata; |
| 573 | struct BlockHeader; |
| 574 | static const uint32_t kAllocAlignment; |
| 575 | static const Reference kReferenceQueue; |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 576 | |
| 577 | // The shared metadata is always located at the top of the memory segment. |
| 578 | // These convenience functions eliminate constant casting of the base |
| 579 | // pointer within the code. |
bcwhite | c03fc0a | 2016-02-05 01:18:03 | [diff] [blame] | 580 | const SharedMetadata* shared_meta() const { |
| 581 | return reinterpret_cast<const SharedMetadata*>( |
| 582 | const_cast<const char*>(mem_base_)); |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 583 | } |
bcwhite | c03fc0a | 2016-02-05 01:18:03 | [diff] [blame] | 584 | SharedMetadata* shared_meta() { |
| 585 | return reinterpret_cast<SharedMetadata*>(const_cast<char*>(mem_base_)); |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 586 | } |
| 587 | |
| 588 | // Actual method for doing the allocation. |
| 589 | Reference AllocateImpl(size_t size, uint32_t type_id); |
| 590 | |
| 591 | // Get the block header associated with a specific reference. |
| 592 | const volatile BlockHeader* GetBlock(Reference ref, uint32_t type_id, |
| 593 | uint32_t size, bool queue_ok, |
| 594 | bool free_ok) const; |
| 595 | volatile BlockHeader* GetBlock(Reference ref, uint32_t type_id, uint32_t size, |
| 596 | bool queue_ok, bool free_ok) { |
| 597 | return const_cast<volatile BlockHeader*>( |
| 598 | const_cast<const PersistentMemoryAllocator*>(this)->GetBlock( |
| 599 | ref, type_id, size, queue_ok, free_ok)); |
| 600 | } |
| 601 | |
| 602 | // Get the actual data within a block associated with a specific reference. |
| 603 | const volatile void* GetBlockData(Reference ref, uint32_t type_id, |
| 604 | uint32_t size) const; |
| 605 | volatile void* GetBlockData(Reference ref, uint32_t type_id, |
| 606 | uint32_t size) { |
| 607 | return const_cast<volatile void*>( |
| 608 | const_cast<const PersistentMemoryAllocator*>(this)->GetBlockData( |
| 609 | ref, type_id, size)); |
| 610 | } |
| 611 | |
| 612 | const bool readonly_; // Indicates access to read-only memory. |
| 613 | std::atomic<bool> corrupt_; // Local version of "corrupted" flag. |
| 614 | |
| 615 | HistogramBase* allocs_histogram_; // Histogram recording allocs. |
| 616 | HistogramBase* used_histogram_; // Histogram recording used space. |
| 617 | |
| 618 | friend class PersistentMemoryAllocatorTest; |
| 619 | FRIEND_TEST_ALL_PREFIXES(PersistentMemoryAllocatorTest, AllocateAndIterate); |
| 620 | DISALLOW_COPY_AND_ASSIGN(PersistentMemoryAllocator); |
| 621 | }; |
| 622 | |
| 623 | |
| 624 | // This allocator uses a local memory block it allocates from the general |
| 625 | // heap. It is generally used when some kind of "death rattle" handler will |
| 626 | // save the contents to persistent storage during process shutdown. It is |
| 627 | // also useful for testing. |
| 628 | class BASE_EXPORT LocalPersistentMemoryAllocator |
| 629 | : public PersistentMemoryAllocator { |
| 630 | public: |
| 631 | LocalPersistentMemoryAllocator(size_t size, uint64_t id, |
bcwhite | 3779f98 | 2016-02-11 22:37:01 | [diff] [blame] | 632 | base::StringPiece name); |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 633 | ~LocalPersistentMemoryAllocator() override; |
| 634 | |
| 635 | private: |
bcwhite | eda1a9978 | 2016-06-02 19:27:17 | [diff] [blame] | 636 | // Allocates a block of local memory of the specified |size|, ensuring that |
| 637 | // the memory will not be physically allocated until accessed and will read |
| 638 | // as zero when that happens. |
bcwhite | cd4923d | 2016-09-23 18:30:03 | [diff] [blame] | 639 | static Memory AllocateLocalMemory(size_t size); |
bcwhite | eda1a9978 | 2016-06-02 19:27:17 | [diff] [blame] | 640 | |
| 641 | // Deallocates a block of local |memory| of the specified |size|. |
bcwhite | cd4923d | 2016-09-23 18:30:03 | [diff] [blame] | 642 | static void DeallocateLocalMemory(void* memory, size_t size, MemoryType type); |
bcwhite | eda1a9978 | 2016-06-02 19:27:17 | [diff] [blame] | 643 | |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 644 | DISALLOW_COPY_AND_ASSIGN(LocalPersistentMemoryAllocator); |
| 645 | }; |
| 646 | |
| 647 | |
bcwhite | 5451c58 | 2016-02-12 18:47:15 | [diff] [blame] | 648 | // This allocator takes a shared-memory object and performs allocation from |
| 649 | // it. The memory must be previously mapped via Map() or MapAt(). The allocator |
| 650 | // takes ownership of the memory object. |
| 651 | class BASE_EXPORT SharedPersistentMemoryAllocator |
| 652 | : public PersistentMemoryAllocator { |
| 653 | public: |
dcheng | 093de9b | 2016-04-04 21:25:51 | [diff] [blame] | 654 | SharedPersistentMemoryAllocator(std::unique_ptr<SharedMemory> memory, |
| 655 | uint64_t id, |
| 656 | base::StringPiece name, |
| 657 | bool read_only); |
bcwhite | 5451c58 | 2016-02-12 18:47:15 | [diff] [blame] | 658 | ~SharedPersistentMemoryAllocator() override; |
| 659 | |
| 660 | SharedMemory* shared_memory() { return shared_memory_.get(); } |
| 661 | |
bcwhite | cd4923d | 2016-09-23 18:30:03 | [diff] [blame] | 662 | // Ensure that the memory isn't so invalid that it would crash when passing it |
bcwhite | 5451c58 | 2016-02-12 18:47:15 | [diff] [blame] | 663 | // to the allocator. This doesn't guarantee the data is valid, just that it |
| 664 | // won't cause the program to abort. The existing IsCorrupt() call will handle |
| 665 | // the rest. |
| 666 | static bool IsSharedMemoryAcceptable(const SharedMemory& memory); |
| 667 | |
| 668 | private: |
dcheng | 093de9b | 2016-04-04 21:25:51 | [diff] [blame] | 669 | std::unique_ptr<SharedMemory> shared_memory_; |
bcwhite | 5451c58 | 2016-02-12 18:47:15 | [diff] [blame] | 670 | |
| 671 | DISALLOW_COPY_AND_ASSIGN(SharedPersistentMemoryAllocator); |
| 672 | }; |
| 673 | |
| 674 | |
bcwhite | 9e68eef | 2016-05-30 15:40:30 | [diff] [blame] | 675 | #if !defined(OS_NACL) // NACL doesn't support any kind of file access in build. |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 676 | // This allocator takes a memory-mapped file object and performs allocation |
bcwhite | 34229a8 | 2016-05-26 23:24:32 | [diff] [blame] | 677 | // from it. The allocator takes ownership of the file object. |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 678 | class BASE_EXPORT FilePersistentMemoryAllocator |
| 679 | : public PersistentMemoryAllocator { |
| 680 | public: |
bcwhite | 34229a8 | 2016-05-26 23:24:32 | [diff] [blame] | 681 | // A |max_size| of zero will use the length of the file as the maximum |
| 682 | // size. The |file| object must have been already created with sufficient |
| 683 | // permissions (read, read/write, or read/write/extend). |
dcheng | 093de9b | 2016-04-04 21:25:51 | [diff] [blame] | 684 | FilePersistentMemoryAllocator(std::unique_ptr<MemoryMappedFile> file, |
bcwhite | 34229a8 | 2016-05-26 23:24:32 | [diff] [blame] | 685 | size_t max_size, |
dcheng | 093de9b | 2016-04-04 21:25:51 | [diff] [blame] | 686 | uint64_t id, |
bcwhite | 34229a8 | 2016-05-26 23:24:32 | [diff] [blame] | 687 | base::StringPiece name, |
| 688 | bool read_only); |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 689 | ~FilePersistentMemoryAllocator() override; |
| 690 | |
bcwhite | cd4923d | 2016-09-23 18:30:03 | [diff] [blame] | 691 | // Ensure that the file isn't so invalid that it would crash when passing it |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 692 | // to the allocator. This doesn't guarantee the file is valid, just that it |
bcwhite | 5451c58 | 2016-02-12 18:47:15 | [diff] [blame] | 693 | // won't cause the program to abort. The existing IsCorrupt() call will handle |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 694 | // the rest. |
bcwhite | 34229a8 | 2016-05-26 23:24:32 | [diff] [blame] | 695 | static bool IsFileAcceptable(const MemoryMappedFile& file, bool read_only); |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 696 | |
| 697 | private: |
dcheng | 093de9b | 2016-04-04 21:25:51 | [diff] [blame] | 698 | std::unique_ptr<MemoryMappedFile> mapped_file_; |
bcwhite | 5451c58 | 2016-02-12 18:47:15 | [diff] [blame] | 699 | |
| 700 | DISALLOW_COPY_AND_ASSIGN(FilePersistentMemoryAllocator); |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 701 | }; |
bcwhite | 9e68eef | 2016-05-30 15:40:30 | [diff] [blame] | 702 | #endif // !defined(OS_NACL) |
bcwhite | 34ae498 | 2016-01-20 13:44:46 | [diff] [blame] | 703 | |
| 704 | } // namespace base |
| 705 | |
| 706 | #endif // BASE_METRICS_PERSISTENT_MEMORY_ALLOCATOR_H_ |