blob: 721223c28e9353d133a6dc0abf0434d5f3d678ad [file] [log] [blame]
André Kempeddbe3072023-03-03 18:44:381// Copyright 2023 The Chromium Authors
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "base/debug/allocation_trace.h"
6
7#include <array>
8#include <atomic>
9
10#include "base/check_op.h"
11
12namespace base::debug::tracer {
13
14bool OperationRecord::IsRecording() const {
15 if (is_recording_.test_and_set()) {
16 return true;
17 }
18
19 is_recording_.clear();
20 return false;
21}
22
23OperationType OperationRecord::GetOperationType() const {
24 return operation_type_;
25}
26
27const void* OperationRecord::GetAddress() const {
28 return address_;
29}
30
31size_t OperationRecord::GetSize() const {
32 return size_;
33}
34
35const StackTraceContainer& OperationRecord::GetStackTrace() const {
36 return stack_trace_;
37}
38
André Kempe7c53f622023-06-26 15:37:1439#if BUILDFLAG(ENABLE_ALLOCATION_TRACE_RECORDER_FULL_REPORTING)
40AllocationTraceRecorderStatistics::AllocationTraceRecorderStatistics(
41 size_t total_number_of_allocations,
42 size_t total_number_of_collisions)
43 : total_number_of_allocations(total_number_of_allocations),
44 total_number_of_collisions(total_number_of_collisions) {}
45#else
46AllocationTraceRecorderStatistics::AllocationTraceRecorderStatistics(
47 size_t total_number_of_allocations)
48 : total_number_of_allocations(total_number_of_allocations) {}
49#endif
50
André Kempe69aec282024-01-03 19:28:4551void AllocationTraceRecorder::OnAllocation(const void* allocated_address,
52 size_t allocated_size) {
André Kempe7c53f622023-06-26 15:37:1453 // Record the allocation into the next available slot, allowing for failure
54 // due to the slot already being in-use by another
55 // OperationRecord::Initialize*() call from another thread.
56 for (auto index = GetNextIndex();
57 !alloc_trace_buffer_[index].InitializeAllocation(allocated_address,
58 allocated_size);
59 index = GetNextIndex()) {
60#if BUILDFLAG(ENABLE_ALLOCATION_TRACE_RECORDER_FULL_REPORTING)
61 total_number_of_collisions_.fetch_add(1, std::memory_order_relaxed);
62#endif
André Kempeddbe3072023-03-03 18:44:3863 }
64}
65
66void AllocationTraceRecorder::OnFree(const void* freed_address) {
André Kempe7c53f622023-06-26 15:37:1467 // Record the free into the next available slot, allowing for failure due to
68 // the slot already being in-use by another OperationRecord::Initialize*()
69 // call from another thread.
70 for (auto index = GetNextIndex();
71 !alloc_trace_buffer_[index].InitializeFree(freed_address);
72 index = GetNextIndex()) {
73#if BUILDFLAG(ENABLE_ALLOCATION_TRACE_RECORDER_FULL_REPORTING)
74 total_number_of_collisions_.fetch_add(1, std::memory_order_relaxed);
75#endif
André Kempeddbe3072023-03-03 18:44:3876 }
77}
78
André Kempeddbe3072023-03-03 18:44:3879size_t AllocationTraceRecorder::size() const {
80 return std::min(kMaximumNumberOfMemoryOperationTraces,
81 total_number_of_records_.load(std::memory_order_relaxed));
82}
83
84const OperationRecord& AllocationTraceRecorder::operator[](size_t idx) const {
85 DCHECK_LT(idx, size());
86
87 const size_t array_index =
88 size() < GetMaximumNumberOfTraces()
89 ? idx
90 : WrapIdxIfNeeded(
91 total_number_of_records_.load(std::memory_order_relaxed) + idx);
92
93 DCHECK_LT(array_index, alloc_trace_buffer_.size());
94
95 return alloc_trace_buffer_[array_index];
96}
97
André Kempe7c53f622023-06-26 15:37:1498AllocationTraceRecorderStatistics
99AllocationTraceRecorder::GetRecorderStatistics() const {
100#if BUILDFLAG(ENABLE_ALLOCATION_TRACE_RECORDER_FULL_REPORTING)
101 return {total_number_of_records_.load(std::memory_order_relaxed),
102 total_number_of_collisions_.load(std::memory_order_relaxed)};
103#else
104 return {total_number_of_records_.load(std::memory_order_relaxed)};
105#endif
106}
107
Peter Kasting134ef9af2024-12-28 02:30:09108} // namespace base::debug::tracer