blob: d1c729fd0415a785484d5410efcad7ddaab51e0e [file] [log] [blame]
André Kempeddbe3072023-03-03 18:44:381// Copyright 2023 The Chromium Authors
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef BASE_DEBUG_ALLOCATION_TRACE_H_
6#define BASE_DEBUG_ALLOCATION_TRACE_H_
7
8#include <algorithm>
9#include <array>
10#include <atomic>
Avi Drissmanc73aa5d22023-12-07 17:24:2211#include <bit>
André Kempeddbe3072023-03-03 18:44:3812#include <cstdint>
13
André Kempe69aec282024-01-03 19:28:4514#include "base/allocator/dispatcher/notification_data.h"
André Kempeddbe3072023-03-03 18:44:3815#include "base/base_export.h"
André Kempeddbe3072023-03-03 18:44:3816#include "base/compiler_specific.h"
17#include "base/debug/debugging_buildflags.h"
18#include "base/debug/stack_trace.h"
Kalvin Lee3da7fcf2023-11-10 06:19:4319#include "base/memory/raw_ptr_exclusion.h"
André Kempeddbe3072023-03-03 18:44:3820#include "build/build_config.h"
21
22namespace base::debug::tracer {
23
24// Number of traces that can be stored. This number must be a power of two to
25// allow for fast computation of modulo.
26constexpr size_t kMaximumNumberOfMemoryOperationTraces = (1 << 15);
27// Number of frames stored for each operation. Probably the lower frames
28// represent the memory allocation system. Hence, we store more frames to
29// increase chances of having a meaningful trace of the path that caused the
30// allocation or free.
31constexpr size_t kStackTraceSize = 16;
32
33// The type of an operation stored in the recorder.
34enum class OperationType {
35 // The state of an operation record before calling any of the initialization
36 // functions.
37 kNone = 0,
38 // The record represents an allocation operation.
39 kAllocation,
40 // The record represents a free operation.
41 kFree,
42};
43
44using StackTraceContainer = std::array<const void*, kStackTraceSize>;
45
46// The record for a single operation. A record can represent any type of
47// operation, allocation or free, but not at the same time.
48//
49// A record protects itself from concurrent initializations. If a thread B calls
50// any of the Initialize*-functions while another thread A is currently
51// initializing, B's invocations shall immediately return |false| without
52// interfering with thread A.
André Kempe80698f62023-10-11 10:00:0153class BASE_EXPORT OperationRecord {
54 public:
55 constexpr OperationRecord() = default;
56
57 OperationRecord(const OperationRecord&) = delete;
58 OperationRecord& operator=(const OperationRecord&) = delete;
59
André Kempeddbe3072023-03-03 18:44:3860 // Is the record currently being taken?
61 bool IsRecording() const;
62
63 OperationType GetOperationType() const;
64 // The address allocated or freed.
65 const void* GetAddress() const;
66 // Number of allocated bytes. Returns 0 for free operations.
67 size_t GetSize() const;
68 // The stacktrace as taken by the Initialize*-functions.
Tom Sepez04e98bf2024-10-25 18:19:3169 const StackTraceContainer& GetStackTrace() const LIFETIME_BOUND;
André Kempeddbe3072023-03-03 18:44:3870
71 // Initialize the record with data for another operation. Data from any
72 // previous operation will be silently overwritten. These functions are
73 // declared ALWAYS_INLINE to minimize pollution of the recorded stack trace.
74 //
75 // Both functions return false in case no record was taken, i.e. if another
76 // thread is capturing.
77 ALWAYS_INLINE bool InitializeFree(const void* freed_address) {
78 return InitializeOperationRecord(freed_address, 0, OperationType::kFree);
79 }
80
81 ALWAYS_INLINE bool InitializeAllocation(const void* allocated_address,
82 size_t allocated_size) {
83 return InitializeOperationRecord(allocated_address, allocated_size,
84 OperationType::kAllocation);
85 }
86
87 private:
88 // Initialize a record with the given data. Return true if the record was
89 // initialized successfully, false if no record was taken, i.e. if another
90 // thread is capturing.
91 ALWAYS_INLINE bool InitializeOperationRecord(const void* address,
92 size_t size,
93 OperationType operation_type);
94 ALWAYS_INLINE void StoreStackTrace();
95
96 // The stack trace taken in one of the Initialize* functions.
97 StackTraceContainer stack_trace_ = {};
98 // The number of allocated bytes.
99 size_t size_ = 0;
100 // The address that was allocated or freed.
101 // We use a raw C++ pointer instead of base::raw_ptr for performance
102 // reasons.
103 // - In the recorder we only store pointers, we never allocate or free on
104 // our own.
105 // - Storing is the hot path. base::raw_ptr::operator== may perform sanity
106 // checks which do not make sense in our case (otherwise the allocated
107 // address would have been quirky)
108 RAW_PTR_EXCLUSION const void* address_ = nullptr;
109 // The type of the operation that was performed. In the course of making a
110 // record, this value is reset to |OperationType::kNone| and later set to
111 // the operation type specific value, so if the process crashes whilst writing
112 // the record, it's marked as empty. To prevent the compiler from optimizing
113 // away the initial reset, this value is marked as volatile.
114 volatile OperationType operation_type_ = OperationType::kNone;
115 // Is the record currently being taken from another thread? Used to prevent
116 // concurrent writes to the same record.
117 //
118 // The value is mutable since pre C++20 there is no const getter in
119 // atomic_flag. All ways to get the value involve setting it.
Alison Galed94ce4f2024-04-22 15:20:39120 // TODO(crbug.com/42050406): Remove mutable and make IsRecording() use
André Kempeddbe3072023-03-03 18:44:38121 // atomic_flag::test();
122 mutable std::atomic_flag is_recording_ = ATOMIC_FLAG_INIT;
123};
124
125ALWAYS_INLINE bool OperationRecord::InitializeOperationRecord(
126 const void* address,
127 size_t size,
128 OperationType operation_type) {
129 if (is_recording_.test_and_set(std::memory_order_acquire)) {
130 return false;
131 }
132
133 operation_type_ = operation_type;
134 StoreStackTrace();
135 address_ = address;
136 size_ = size;
137
138 is_recording_.clear(std::memory_order_release);
139
140 return true;
141}
142
143ALWAYS_INLINE void OperationRecord::StoreStackTrace() {
144 stack_trace_.fill(nullptr);
145
146#if BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
147 // Currently we limit ourselves to use TraceStackFramePointers. We know that
148 // TraceStackFramePointers has an acceptable performance impact on Android.
danakje75c6c812024-07-26 20:37:47149 base::debug::TraceStackFramePointers(stack_trace_, 0);
André Kempeddbe3072023-03-03 18:44:38150#elif BUILDFLAG(IS_LINUX)
151 // Use base::debug::CollectStackTrace as an alternative for tests on Linux. We
152 // still have a check in /base/debug/debug.gni to prevent that
153 // AllocationStackTraceRecorder is enabled accidentally on Linux.
danakje75c6c812024-07-26 20:37:47154 base::debug::CollectStackTrace(stack_trace_);
André Kempeddbe3072023-03-03 18:44:38155#else
156#error "No supported stack tracer found."
157#endif
158}
159
André Kempe7c53f622023-06-26 15:37:14160struct BASE_EXPORT AllocationTraceRecorderStatistics {
161#if BUILDFLAG(ENABLE_ALLOCATION_TRACE_RECORDER_FULL_REPORTING)
162 AllocationTraceRecorderStatistics(size_t total_number_of_allocations,
163 size_t total_number_of_collisions);
164#else
165 AllocationTraceRecorderStatistics(size_t total_number_of_allocations);
166#endif
167
168 // The total number of allocations that have been recorded.
169 size_t total_number_of_allocations;
170#if BUILDFLAG(ENABLE_ALLOCATION_TRACE_RECORDER_FULL_REPORTING)
171 // The total number of collisions that have been encountered. A collision
172 // happens when two threads concurrently try to record using the same slot.
173 size_t total_number_of_collisions;
174#endif
175};
176
André Kempeddbe3072023-03-03 18:44:38177// The recorder which holds entries for past memory operations.
178//
179// The memory image of the recorder will be copied into the crash-handler.
180// Therefore, it must not hold any references to external data which are vital
181// for proper functioning.
182//
183// It is important that the recorder itself does not allocate to prevent
184// recursive calls and save as much runtime overhead as possible.
185//
186// Therefore, records are stored in a preallocated buffer with a compile time
187// constant maximum size, see |kMaximumNumberOfMemoryOperationTraces|. Once all
188// records have been used, old records will be overwritten (fifo-style).
189//
190// The recorder works in an multithreaded environment without external locking.
191// Concurrent writes are prevented by two means:
192// 1 - We atomically increment and calculate the effective index of the record
193// to be written.
194// 2 - If this entry is still being used (the recording thread didn't finish
195// yet), we go back to step 1
196// Currently we do not enforce separate cache lines for each entry, which means
197// false sharing can occur. On the other hand, with 64 byte cachelines a clean
198// separation would introduce some 3*64 - sizeof(OperationRecord) = 40 bytes of
199// padding per entry.
200//
201// Note: As a process might be terminated for whatever reason while stack
202// traces are being written, the recorded data may contain some garbage.
203//
Alison Gale47d1537d2024-04-19 21:31:46204// TODO(crbug.com/40258550): Evaluate the impact of the shared cache
André Kempeddbe3072023-03-03 18:44:38205// lines between entries.
André Kempe80698f62023-10-11 10:00:01206class BASE_EXPORT AllocationTraceRecorder {
207 public:
208 constexpr AllocationTraceRecorder() = default;
209
210 AllocationTraceRecorder(const AllocationTraceRecorder&) = delete;
211 AllocationTraceRecorder& operator=(const AllocationTraceRecorder&) = delete;
212
André Kempeddbe3072023-03-03 18:44:38213 // The allocation event observer interface. See the dispatcher for further
214 // details. The functions are marked NO_INLINE. All other functions called but
215 // the one taking the call stack are marked ALWAYS_INLINE. This way we ensure
216 // the number of frames recorded from these functions is fixed.
André Kempe69aec282024-01-03 19:28:45217 inline void OnAllocation(
218 const base::allocator::dispatcher::AllocationNotificationData&
219 allocation_data);
André Kempeddbe3072023-03-03 18:44:38220
221 // Handle all free events.
André Kempe69aec282024-01-03 19:28:45222 inline void OnFree(
223 const base::allocator::dispatcher::FreeNotificationData& free_data);
André Kempeddbe3072023-03-03 18:44:38224
225 // Access functions to retrieve the current content of the recorder.
226 // Note: Since the recorder is usually updated upon each allocation or free,
227 // it is important to prevent updates if you want to read the entries at any
228 // point.
229
230 // Get the current number of entries stored in the recorder. When the
231 // recorder has reached its maximum capacity, it always returns
232 // |GetMaximumNumberOfTraces()|.
233 size_t size() const;
234
235 // Access the record of an operation by index. Oldest operation is always
236 // accessible at index 0, latest operation at |size()-1|.
237 // Note: Since a process might have crashed while a trace is being written,
238 // especially the last records might be corrupted.
239 const OperationRecord& operator[](size_t idx) const;
240
241 constexpr size_t GetMaximumNumberOfTraces() const {
242 return kMaximumNumberOfMemoryOperationTraces;
243 }
244
André Kempe7c53f622023-06-26 15:37:14245 AllocationTraceRecorderStatistics GetRecorderStatistics() const;
246
André Kempeddbe3072023-03-03 18:44:38247 private:
André Kempe69aec282024-01-03 19:28:45248 // Handle all allocation events.
249 NOINLINE void OnAllocation(const void* allocated_address,
250 size_t allocated_size);
251
252 // Handle all free events.
253 NOINLINE void OnFree(const void* freed_address);
254
André Kempeddbe3072023-03-03 18:44:38255 ALWAYS_INLINE size_t GetNextIndex();
256
257 ALWAYS_INLINE static constexpr size_t WrapIdxIfNeeded(size_t idx);
258
André Kempeddbe3072023-03-03 18:44:38259 // The actual container.
260 std::array<OperationRecord, kMaximumNumberOfMemoryOperationTraces>
André Kempe699ccd82023-05-12 09:21:48261 alloc_trace_buffer_ = {};
André Kempeddbe3072023-03-03 18:44:38262 // The total number of records that have been taken so far. Note that this
263 // might be greater than |kMaximumNumberOfMemoryOperationTraces| since we
264 // overwrite oldest items.
265 std::atomic<size_t> total_number_of_records_ = 0;
André Kempe7c53f622023-06-26 15:37:14266#if BUILDFLAG(ENABLE_ALLOCATION_TRACE_RECORDER_FULL_REPORTING)
267 std::atomic<size_t> total_number_of_collisions_ = 0;
268#endif
André Kempeddbe3072023-03-03 18:44:38269};
270
André Kempe69aec282024-01-03 19:28:45271inline void AllocationTraceRecorder::OnAllocation(
272 const base::allocator::dispatcher::AllocationNotificationData&
273 allocation_data) {
274 OnAllocation(allocation_data.address(), allocation_data.size());
275}
276
277// Handle all free events.
278inline void AllocationTraceRecorder::OnFree(
279 const base::allocator::dispatcher::FreeNotificationData& free_data) {
280 OnFree(free_data.address());
281}
282
André Kempeddbe3072023-03-03 18:44:38283ALWAYS_INLINE constexpr size_t AllocationTraceRecorder::WrapIdxIfNeeded(
284 size_t idx) {
285 // Wrapping around counter, e.g. for BUFFER_SIZE = 256, the counter will
286 // wrap around when reaching 256. To enable the compiler to emit more
287 // optimized code we assert |kMaximumNumberOfMemoryOperationTraces| is a power
288 // of two .
289 static_assert(
Avi Drissmanc73aa5d22023-12-07 17:24:22290 std::has_single_bit(kMaximumNumberOfMemoryOperationTraces),
André Kempeddbe3072023-03-03 18:44:38291 "kMaximumNumberOfMemoryOperationTraces should be a power of 2 to "
292 "allow for fast modulo operation.");
293
294 return idx % kMaximumNumberOfMemoryOperationTraces;
295}
296
297ALWAYS_INLINE size_t AllocationTraceRecorder::GetNextIndex() {
298 const auto raw_idx =
299 total_number_of_records_.fetch_add(1, std::memory_order_relaxed);
300 return WrapIdxIfNeeded(raw_idx);
301}
302
303} // namespace base::debug::tracer
304
305#endif // BASE_DEBUG_ALLOCATION_TRACE_H_