Avi Drissman | e4622aa | 2022-09-08 20:36:06 | [diff] [blame] | 1 | // Copyright 2012 The Chromium Authors |
[email protected] | 2f7d9cd | 2012-09-22 03:42:12 | [diff] [blame] | 2 | // Use of this source code is governed by a BSD-style license that can be |
| 3 | // found in the LICENSE file. |
| 4 | |
| 5 | #include "base/metrics/sample_vector.h" |
| 6 | |
Jean-Philippe Gravel | d90e05ca | 2022-11-27 20:29:47 | [diff] [blame] | 7 | #include <ostream> |
| 8 | |
Hans Wennborg | c3cffa6 | 2020-04-27 10:09:12 | [diff] [blame] | 9 | #include "base/check_op.h" |
Peter McNeeley | 2e1581a | 2023-12-14 22:57:15 | [diff] [blame] | 10 | #include "base/compiler_specific.h" |
danakj | ad724a8 | 2024-01-25 17:37:40 | [diff] [blame] | 11 | #include "base/containers/heap_array.h" |
Luc Nguyen | 8084cab5 | 2023-04-14 16:20:56 | [diff] [blame] | 12 | #include "base/debug/crash_logging.h" |
Alexei Svitkine | 2d87fb5a | 2024-03-04 16:49:38 | [diff] [blame] | 13 | #include "base/debug/leak_annotations.h" |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 14 | #include "base/lazy_instance.h" |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 15 | #include "base/memory/ptr_util.h" |
Tom Sepez | 7a008da | 2023-11-07 21:02:33 | [diff] [blame] | 16 | #include "base/memory/raw_span.h" |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 17 | #include "base/metrics/persistent_memory_allocator.h" |
Hans Wennborg | c3cffa6 | 2020-04-27 10:09:12 | [diff] [blame] | 18 | #include "base/notreached.h" |
asvitkine | 3f17b146 | 2017-05-03 21:37:37 | [diff] [blame] | 19 | #include "base/numerics/safe_conversions.h" |
Greg Thompson | de48d06 | 2024-01-27 12:19:49 | [diff] [blame] | 20 | #include "base/strings/strcat.h" |
| 21 | #include "base/strings/string_number_conversions.h" |
Quang Minh Tuan Nguyen | 39b1fed | 2021-03-16 00:49:38 | [diff] [blame] | 22 | #include "base/strings/stringprintf.h" |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 23 | #include "base/synchronization/lock.h" |
| 24 | #include "base/threading/platform_thread.h" |
| 25 | |
| 26 | // This SampleVector makes use of the single-sample embedded in the base |
| 27 | // HistogramSamples class. If the count is non-zero then there is guaranteed |
| 28 | // (within the bounds of "eventual consistency") to be no allocated external |
| 29 | // storage. Once the full counts storage is allocated, the single-sample must |
| 30 | // be extracted and disabled. |
[email protected] | 2f7d9cd | 2012-09-22 03:42:12 | [diff] [blame] | 31 | |
[email protected] | 2f7d9cd | 2012-09-22 03:42:12 | [diff] [blame] | 32 | namespace base { |
| 33 | |
| 34 | typedef HistogramBase::Count Count; |
| 35 | typedef HistogramBase::Sample Sample; |
| 36 | |
Luc Nguyen | 60b27cb | 2023-03-29 15:22:56 | [diff] [blame] | 37 | namespace { |
| 38 | |
| 39 | // An iterator for sample vectors. |
Luc Nguyen | 6458a5c | 2023-03-31 20:59:17 | [diff] [blame] | 40 | template <typename T> |
| 41 | class IteratorTemplate : public SampleCountIterator { |
Luc Nguyen | 60b27cb | 2023-03-29 15:22:56 | [diff] [blame] | 42 | public: |
Tom Sepez | 7a008da | 2023-11-07 21:02:33 | [diff] [blame] | 43 | IteratorTemplate(base::span<T> counts, const BucketRanges* bucket_ranges) |
| 44 | : counts_(counts), bucket_ranges_(bucket_ranges) { |
Luc Nguyen | 60b27cb | 2023-03-29 15:22:56 | [diff] [blame] | 45 | SkipEmptyBuckets(); |
| 46 | } |
| 47 | |
Luc Nguyen | 6458a5c | 2023-03-31 20:59:17 | [diff] [blame] | 48 | ~IteratorTemplate() override; |
Luc Nguyen | 60b27cb | 2023-03-29 15:22:56 | [diff] [blame] | 49 | |
| 50 | // SampleCountIterator: |
Tom Sepez | 7a008da | 2023-11-07 21:02:33 | [diff] [blame] | 51 | bool Done() const override { return index_ >= counts_.size(); } |
Luc Nguyen | 60b27cb | 2023-03-29 15:22:56 | [diff] [blame] | 52 | void Next() override { |
| 53 | DCHECK(!Done()); |
| 54 | index_++; |
| 55 | SkipEmptyBuckets(); |
| 56 | } |
| 57 | void Get(HistogramBase::Sample* min, |
| 58 | int64_t* max, |
Luc Nguyen | 6458a5c | 2023-03-31 20:59:17 | [diff] [blame] | 59 | HistogramBase::Count* count) override; |
Luc Nguyen | 60b27cb | 2023-03-29 15:22:56 | [diff] [blame] | 60 | |
| 61 | // SampleVector uses predefined buckets, so iterator can return bucket index. |
| 62 | bool GetBucketIndex(size_t* index) const override { |
| 63 | DCHECK(!Done()); |
| 64 | if (index != nullptr) { |
| 65 | *index = index_; |
| 66 | } |
| 67 | return true; |
| 68 | } |
| 69 | |
| 70 | private: |
| 71 | void SkipEmptyBuckets() { |
| 72 | if (Done()) { |
| 73 | return; |
| 74 | } |
| 75 | |
Tom Sepez | 7a008da | 2023-11-07 21:02:33 | [diff] [blame] | 76 | while (index_ < counts_.size()) { |
Luc Nguyen | 60b27cb | 2023-03-29 15:22:56 | [diff] [blame] | 77 | if (subtle::NoBarrier_Load(&counts_[index_]) != 0) { |
| 78 | return; |
| 79 | } |
| 80 | index_++; |
| 81 | } |
| 82 | } |
| 83 | |
Tom Sepez | 7a008da | 2023-11-07 21:02:33 | [diff] [blame] | 84 | raw_span<T> counts_; |
Luc Nguyen | 60b27cb | 2023-03-29 15:22:56 | [diff] [blame] | 85 | raw_ptr<const BucketRanges> bucket_ranges_; |
Luc Nguyen | 60b27cb | 2023-03-29 15:22:56 | [diff] [blame] | 86 | size_t index_ = 0; |
| 87 | }; |
| 88 | |
Tom Sepez | 7a008da | 2023-11-07 21:02:33 | [diff] [blame] | 89 | using SampleVectorIterator = IteratorTemplate<const HistogramBase::AtomicCount>; |
Luc Nguyen | 6458a5c | 2023-03-31 20:59:17 | [diff] [blame] | 90 | |
| 91 | template <> |
| 92 | SampleVectorIterator::~IteratorTemplate() = default; |
| 93 | |
| 94 | // Get() for an iterator of a SampleVector. |
| 95 | template <> |
| 96 | void SampleVectorIterator::Get(HistogramBase::Sample* min, |
| 97 | int64_t* max, |
| 98 | HistogramBase::Count* count) { |
| 99 | DCHECK(!Done()); |
| 100 | *min = bucket_ranges_->range(index_); |
| 101 | *max = strict_cast<int64_t>(bucket_ranges_->range(index_ + 1)); |
| 102 | *count = subtle::NoBarrier_Load(&counts_[index_]); |
| 103 | } |
| 104 | |
Tom Sepez | 7a008da | 2023-11-07 21:02:33 | [diff] [blame] | 105 | using ExtractingSampleVectorIterator = |
| 106 | IteratorTemplate<HistogramBase::AtomicCount>; |
Luc Nguyen | 6458a5c | 2023-03-31 20:59:17 | [diff] [blame] | 107 | |
| 108 | template <> |
| 109 | ExtractingSampleVectorIterator::~IteratorTemplate() { |
| 110 | // Ensure that the user has consumed all the samples in order to ensure no |
| 111 | // samples are lost. |
| 112 | DCHECK(Done()); |
| 113 | } |
| 114 | |
| 115 | // Get() for an extracting iterator of a SampleVector. |
| 116 | template <> |
| 117 | void ExtractingSampleVectorIterator::Get(HistogramBase::Sample* min, |
| 118 | int64_t* max, |
| 119 | HistogramBase::Count* count) { |
| 120 | DCHECK(!Done()); |
| 121 | *min = bucket_ranges_->range(index_); |
| 122 | *max = strict_cast<int64_t>(bucket_ranges_->range(index_ + 1)); |
| 123 | *count = subtle::NoBarrier_AtomicExchange(&counts_[index_], 0); |
| 124 | } |
| 125 | |
Luc Nguyen | 60b27cb | 2023-03-29 15:22:56 | [diff] [blame] | 126 | } // namespace |
| 127 | |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 128 | SampleVectorBase::SampleVectorBase(uint64_t id, |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 129 | Metadata* meta, |
| 130 | const BucketRanges* bucket_ranges) |
danakj | ad724a8 | 2024-01-25 17:37:40 | [diff] [blame] | 131 | : HistogramSamples(id, meta), |
| 132 | bucket_ranges_(bucket_ranges), |
| 133 | counts_size_(bucket_ranges_->bucket_count()) { |
| 134 | CHECK_GE(counts_size_, 1u); |
bcwhite | b036e432 | 2015-12-10 18:36:34 | [diff] [blame] | 135 | } |
| 136 | |
Arthur Sonzogni | bc8777c30 | 2022-05-24 09:23:06 | [diff] [blame] | 137 | SampleVectorBase::SampleVectorBase(uint64_t id, |
| 138 | std::unique_ptr<Metadata> meta, |
| 139 | const BucketRanges* bucket_ranges) |
danakj | ad724a8 | 2024-01-25 17:37:40 | [diff] [blame] | 140 | : HistogramSamples(id, std::move(meta)), |
| 141 | bucket_ranges_(bucket_ranges), |
| 142 | counts_size_(bucket_ranges_->bucket_count()) { |
| 143 | CHECK_GE(counts_size_, 1u); |
Arthur Sonzogni | bc8777c30 | 2022-05-24 09:23:06 | [diff] [blame] | 144 | } |
| 145 | |
Chris Watkins | bb7211c | 2017-11-29 07:16:38 | [diff] [blame] | 146 | SampleVectorBase::~SampleVectorBase() = default; |
[email protected] | 2f7d9cd | 2012-09-22 03:42:12 | [diff] [blame] | 147 | |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 148 | void SampleVectorBase::Accumulate(Sample value, Count count) { |
| 149 | const size_t bucket_index = GetBucketIndex(value); |
[email protected] | 2f7d9cd | 2012-09-22 03:42:12 | [diff] [blame] | 150 | |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 151 | // Handle the single-sample case. |
danakj | ad724a8 | 2024-01-25 17:37:40 | [diff] [blame] | 152 | if (!counts().has_value()) { |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 153 | // Try to accumulate the parameters into the single-count entry. |
| 154 | if (AccumulateSingleSample(value, count, bucket_index)) { |
| 155 | // A race condition could lead to a new single-sample being accumulated |
| 156 | // above just after another thread executed the MountCountsStorage below. |
| 157 | // Since it is mounted, it could be mounted elsewhere and have values |
| 158 | // written to it. It's not allowed to have both a single-sample and |
| 159 | // entries in the counts array so move the single-sample. |
danakj | ad724a8 | 2024-01-25 17:37:40 | [diff] [blame] | 160 | if (counts().has_value()) { |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 161 | MoveSingleSampleToCounts(); |
danakj | ad724a8 | 2024-01-25 17:37:40 | [diff] [blame] | 162 | } |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 163 | return; |
| 164 | } |
[email protected] | 2f7d9cd | 2012-09-22 03:42:12 | [diff] [blame] | 165 | |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 166 | // Need real storage to store both what was in the single-sample plus the |
| 167 | // parameter information. |
| 168 | MountCountsStorageAndMoveSingleSample(); |
[email protected] | 2f7d9cd | 2012-09-22 03:42:12 | [diff] [blame] | 169 | } |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 170 | |
| 171 | // Handle the multi-sample case. |
Peter McNeeley | 2e1581a | 2023-12-14 22:57:15 | [diff] [blame] | 172 | Count new_bucket_count = |
danakj | ad724a8 | 2024-01-25 17:37:40 | [diff] [blame] | 173 | subtle::NoBarrier_AtomicIncrement(&counts_at(bucket_index), count); |
asvitkine | 3f17b146 | 2017-05-03 21:37:37 | [diff] [blame] | 174 | IncreaseSumAndCount(strict_cast<int64_t>(count) * value, count); |
Brian White | 537e41a | 2017-11-01 03:02:13 | [diff] [blame] | 175 | |
| 176 | // TODO(bcwhite) Remove after crbug.com/682680. |
Peter McNeeley | 2e1581a | 2023-12-14 22:57:15 | [diff] [blame] | 177 | Count old_bucket_count = new_bucket_count - count; |
| 178 | bool record_negative_sample = |
| 179 | (new_bucket_count >= 0) != (old_bucket_count >= 0) && count > 0; |
| 180 | if (UNLIKELY(record_negative_sample)) { |
Brian White | 537e41a | 2017-11-01 03:02:13 | [diff] [blame] | 181 | RecordNegativeSample(SAMPLES_ACCUMULATE_OVERFLOW, count); |
Peter McNeeley | 2e1581a | 2023-12-14 22:57:15 | [diff] [blame] | 182 | } |
[email protected] | 2f7d9cd | 2012-09-22 03:42:12 | [diff] [blame] | 183 | } |
| 184 | |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 185 | Count SampleVectorBase::GetCount(Sample value) const { |
| 186 | return GetCountAtIndex(GetBucketIndex(value)); |
[email protected] | 2f7d9cd | 2012-09-22 03:42:12 | [diff] [blame] | 187 | } |
| 188 | |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 189 | Count SampleVectorBase::TotalCount() const { |
| 190 | // Handle the single-sample case. |
| 191 | SingleSample sample = single_sample().Load(); |
danakj | ad724a8 | 2024-01-25 17:37:40 | [diff] [blame] | 192 | if (sample.count != 0) { |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 193 | return sample.count; |
danakj | ad724a8 | 2024-01-25 17:37:40 | [diff] [blame] | 194 | } |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 195 | |
| 196 | // Handle the multi-sample case. |
danakj | ad724a8 | 2024-01-25 17:37:40 | [diff] [blame] | 197 | if (counts().has_value() || MountExistingCountsStorage()) { |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 198 | Count count = 0; |
danakj | ad724a8 | 2024-01-25 17:37:40 | [diff] [blame] | 199 | // TODO(danakj): In C++23 we can skip the `counts_span` lvalue and iterate |
| 200 | // over `counts().value()` directly without creating a dangling reference. |
| 201 | span<const HistogramBase::AtomicCount> counts_span = counts().value(); |
| 202 | for (const HistogramBase::AtomicCount& c : counts_span) { |
| 203 | count += subtle::NoBarrier_Load(&c); |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 204 | } |
| 205 | return count; |
| 206 | } |
| 207 | |
| 208 | // And the no-value case. |
| 209 | return 0; |
[email protected] | 2f7d9cd | 2012-09-22 03:42:12 | [diff] [blame] | 210 | } |
| 211 | |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 212 | Count SampleVectorBase::GetCountAtIndex(size_t bucket_index) const { |
| 213 | DCHECK(bucket_index < counts_size()); |
| 214 | |
| 215 | // Handle the single-sample case. |
| 216 | SingleSample sample = single_sample().Load(); |
danakj | ad724a8 | 2024-01-25 17:37:40 | [diff] [blame] | 217 | if (sample.count != 0) { |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 218 | return sample.bucket == bucket_index ? sample.count : 0; |
danakj | ad724a8 | 2024-01-25 17:37:40 | [diff] [blame] | 219 | } |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 220 | |
| 221 | // Handle the multi-sample case. |
danakj | ad724a8 | 2024-01-25 17:37:40 | [diff] [blame] | 222 | if (counts().has_value() || MountExistingCountsStorage()) { |
| 223 | return subtle::NoBarrier_Load(&counts_at(bucket_index)); |
| 224 | } |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 225 | |
| 226 | // And the no-value case. |
| 227 | return 0; |
| 228 | } |
| 229 | |
| 230 | std::unique_ptr<SampleCountIterator> SampleVectorBase::Iterator() const { |
| 231 | // Handle the single-sample case. |
| 232 | SingleSample sample = single_sample().Load(); |
| 233 | if (sample.count != 0) { |
Luc Nguyen | 415ee2cf | 2024-03-07 17:47:10 | [diff] [blame^] | 234 | static_assert(std::is_unsigned<decltype(SingleSample::bucket)>::value); |
| 235 | if (sample.bucket >= bucket_ranges_->bucket_count()) { |
| 236 | // Return an empty iterator if the specified bucket is invalid (e.g. due |
| 237 | // to corruption). If a different sample is eventually emitted, we will |
| 238 | // move from SingleSample to a counts storage, and that time, we will |
| 239 | // discard this invalid sample (see MoveSingleSampleToCounts()). |
| 240 | return std::make_unique<SampleVectorIterator>( |
| 241 | base::span<const HistogramBase::AtomicCount>(), bucket_ranges_); |
| 242 | } |
| 243 | |
Jeremy Roman | 9532f25 | 2017-08-16 23:27:24 | [diff] [blame] | 244 | return std::make_unique<SingleSampleIterator>( |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 245 | bucket_ranges_->range(sample.bucket), |
Luc Nguyen | 6458a5c | 2023-03-31 20:59:17 | [diff] [blame] | 246 | bucket_ranges_->range(sample.bucket + 1), sample.count, sample.bucket, |
| 247 | /*value_was_extracted=*/false); |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 248 | } |
| 249 | |
| 250 | // Handle the multi-sample case. |
danakj | ad724a8 | 2024-01-25 17:37:40 | [diff] [blame] | 251 | if (counts().has_value() || MountExistingCountsStorage()) { |
| 252 | return std::make_unique<SampleVectorIterator>(*counts(), bucket_ranges_); |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 253 | } |
| 254 | |
| 255 | // And the no-value case. |
Tom Sepez | 7a008da | 2023-11-07 21:02:33 | [diff] [blame] | 256 | return std::make_unique<SampleVectorIterator>( |
| 257 | base::span<const HistogramBase::AtomicCount>(), bucket_ranges_); |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 258 | } |
| 259 | |
Luc Nguyen | 6458a5c | 2023-03-31 20:59:17 | [diff] [blame] | 260 | std::unique_ptr<SampleCountIterator> SampleVectorBase::ExtractingIterator() { |
| 261 | // Handle the single-sample case. |
| 262 | SingleSample sample = single_sample().Extract(); |
| 263 | if (sample.count != 0) { |
Luc Nguyen | 415ee2cf | 2024-03-07 17:47:10 | [diff] [blame^] | 264 | static_assert(std::is_unsigned<decltype(SingleSample::bucket)>::value); |
| 265 | if (sample.bucket >= bucket_ranges_->bucket_count()) { |
| 266 | // Return an empty iterator if the specified bucket is invalid (e.g. due |
| 267 | // to corruption). Note that we've already removed the sample from the |
| 268 | // underlying data, so this invalid sample is discarded. |
| 269 | return std::make_unique<ExtractingSampleVectorIterator>( |
| 270 | base::span<HistogramBase::AtomicCount>(), bucket_ranges_); |
| 271 | } |
| 272 | |
Luc Nguyen | 6458a5c | 2023-03-31 20:59:17 | [diff] [blame] | 273 | // Note that we have already extracted the samples (i.e., reset the |
| 274 | // underlying data back to 0 samples), even before the iterator has been |
| 275 | // used. This means that the caller needs to ensure that this value is |
| 276 | // eventually consumed, otherwise the sample is lost. There is no iterator |
| 277 | // that simply points to the underlying SingleSample and extracts its value |
| 278 | // on-demand because there are tricky edge cases when the SingleSample is |
| 279 | // disabled between the creation of the iterator and the actual call to |
| 280 | // Get() (for example, due to histogram changing to use a vector to store |
| 281 | // its samples). |
| 282 | return std::make_unique<SingleSampleIterator>( |
| 283 | bucket_ranges_->range(sample.bucket), |
| 284 | bucket_ranges_->range(sample.bucket + 1), sample.count, sample.bucket, |
| 285 | /*value_was_extracted=*/true); |
| 286 | } |
| 287 | |
| 288 | // Handle the multi-sample case. |
danakj | ad724a8 | 2024-01-25 17:37:40 | [diff] [blame] | 289 | if (counts().has_value() || MountExistingCountsStorage()) { |
| 290 | return std::make_unique<ExtractingSampleVectorIterator>(*counts(), |
| 291 | bucket_ranges_); |
Luc Nguyen | 6458a5c | 2023-03-31 20:59:17 | [diff] [blame] | 292 | } |
| 293 | |
| 294 | // And the no-value case. |
Tom Sepez | 7a008da | 2023-11-07 21:02:33 | [diff] [blame] | 295 | return std::make_unique<ExtractingSampleVectorIterator>( |
| 296 | base::span<HistogramBase::AtomicCount>(), bucket_ranges_); |
Luc Nguyen | 6458a5c | 2023-03-31 20:59:17 | [diff] [blame] | 297 | } |
| 298 | |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 299 | bool SampleVectorBase::AddSubtractImpl(SampleCountIterator* iter, |
| 300 | HistogramSamples::Operator op) { |
| 301 | // Stop now if there's nothing to do. |
danakj | ad724a8 | 2024-01-25 17:37:40 | [diff] [blame] | 302 | if (iter->Done()) { |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 303 | return true; |
danakj | ad724a8 | 2024-01-25 17:37:40 | [diff] [blame] | 304 | } |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 305 | |
| 306 | // Get the first value and its index. |
[email protected] | 2f7d9cd | 2012-09-22 03:42:12 | [diff] [blame] | 307 | HistogramBase::Sample min; |
asvitkine | 3f17b146 | 2017-05-03 21:37:37 | [diff] [blame] | 308 | int64_t max; |
[email protected] | 2f7d9cd | 2012-09-22 03:42:12 | [diff] [blame] | 309 | HistogramBase::Count count; |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 310 | iter->Get(&min, &max, &count); |
| 311 | size_t dest_index = GetBucketIndex(min); |
[email protected] | 2f7d9cd | 2012-09-22 03:42:12 | [diff] [blame] | 312 | |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 313 | // The destination must be a superset of the source meaning that though the |
| 314 | // incoming ranges will find an exact match, the incoming bucket-index, if |
| 315 | // it exists, may be offset from the destination bucket-index. Calculate |
| 316 | // that offset of the passed iterator; there are are no overflow checks |
| 317 | // because 2's compliment math will work it out in the end. |
| 318 | // |
| 319 | // Because GetBucketIndex() always returns the same true or false result for |
| 320 | // a given iterator object, |index_offset| is either set here and used below, |
| 321 | // or never set and never used. The compiler doesn't know this, though, which |
| 322 | // is why it's necessary to initialize it to something. |
| 323 | size_t index_offset = 0; |
| 324 | size_t iter_index; |
danakj | ad724a8 | 2024-01-25 17:37:40 | [diff] [blame] | 325 | if (iter->GetBucketIndex(&iter_index)) { |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 326 | index_offset = dest_index - iter_index; |
danakj | ad724a8 | 2024-01-25 17:37:40 | [diff] [blame] | 327 | } |
| 328 | if (dest_index >= counts_size()) { |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 329 | return false; |
danakj | ad724a8 | 2024-01-25 17:37:40 | [diff] [blame] | 330 | } |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 331 | |
| 332 | // Post-increment. Information about the current sample is not available |
| 333 | // after this point. |
| 334 | iter->Next(); |
| 335 | |
| 336 | // Single-value storage is possible if there is no counts storage and the |
| 337 | // retrieved entry is the only one in the iterator. |
danakj | ad724a8 | 2024-01-25 17:37:40 | [diff] [blame] | 338 | if (!counts().has_value()) { |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 339 | if (iter->Done()) { |
| 340 | // Don't call AccumulateSingleSample because that updates sum and count |
| 341 | // which was already done by the caller of this method. |
| 342 | if (single_sample().Accumulate( |
| 343 | dest_index, op == HistogramSamples::ADD ? count : -count)) { |
| 344 | // Handle race-condition that mounted counts storage between above and |
| 345 | // here. |
danakj | ad724a8 | 2024-01-25 17:37:40 | [diff] [blame] | 346 | if (counts().has_value()) { |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 347 | MoveSingleSampleToCounts(); |
danakj | ad724a8 | 2024-01-25 17:37:40 | [diff] [blame] | 348 | } |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 349 | return true; |
| 350 | } |
tdanderson | 9419102 | 2017-04-21 23:19:50 | [diff] [blame] | 351 | } |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 352 | |
| 353 | // The counts storage will be needed to hold the multiple incoming values. |
| 354 | MountCountsStorageAndMoveSingleSample(); |
bcwhite | 4162baf | 2017-04-21 18:58:00 | [diff] [blame] | 355 | } |
tdanderson | 9419102 | 2017-04-21 23:19:50 | [diff] [blame] | 356 | |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 357 | // Go through the iterator and add the counts into correct bucket. |
| 358 | while (true) { |
| 359 | // Ensure that the sample's min/max match the ranges min/max. |
| 360 | if (min != bucket_ranges_->range(dest_index) || |
| 361 | max != bucket_ranges_->range(dest_index + 1)) { |
Luc Nguyen | 8084cab5 | 2023-04-14 16:20:56 | [diff] [blame] | 362 | #if !BUILDFLAG(IS_NACL) |
| 363 | // TODO(crbug/1432981): Remove these. They are used to investigate |
| 364 | // unexpected failures. |
| 365 | SCOPED_CRASH_KEY_NUMBER("SampleVector", "min", min); |
| 366 | SCOPED_CRASH_KEY_NUMBER("SampleVector", "max", max); |
| 367 | SCOPED_CRASH_KEY_NUMBER("SampleVector", "range_min", |
| 368 | bucket_ranges_->range(dest_index)); |
| 369 | SCOPED_CRASH_KEY_NUMBER("SampleVector", "range_max", |
| 370 | bucket_ranges_->range(dest_index + 1)); |
| 371 | #endif // !BUILDFLAG(IS_NACL) |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 372 | NOTREACHED() << "sample=" << min << "," << max |
| 373 | << "; range=" << bucket_ranges_->range(dest_index) << "," |
| 374 | << bucket_ranges_->range(dest_index + 1); |
| 375 | return false; |
| 376 | } |
| 377 | |
| 378 | // Sample's bucket matches exactly. Adjust count. |
| 379 | subtle::NoBarrier_AtomicIncrement( |
danakj | ad724a8 | 2024-01-25 17:37:40 | [diff] [blame] | 380 | &counts_at(dest_index), op == HistogramSamples::ADD ? count : -count); |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 381 | |
| 382 | // Advance to the next iterable sample. See comments above for how |
| 383 | // everything works. |
danakj | ad724a8 | 2024-01-25 17:37:40 | [diff] [blame] | 384 | if (iter->Done()) { |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 385 | return true; |
danakj | ad724a8 | 2024-01-25 17:37:40 | [diff] [blame] | 386 | } |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 387 | iter->Get(&min, &max, &count); |
| 388 | if (iter->GetBucketIndex(&iter_index)) { |
| 389 | // Destination bucket is a known offset from the source bucket. |
| 390 | dest_index = iter_index + index_offset; |
| 391 | } else { |
| 392 | // Destination bucket has to be determined anew each time. |
| 393 | dest_index = GetBucketIndex(min); |
| 394 | } |
danakj | ad724a8 | 2024-01-25 17:37:40 | [diff] [blame] | 395 | if (dest_index >= counts_size()) { |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 396 | return false; |
danakj | ad724a8 | 2024-01-25 17:37:40 | [diff] [blame] | 397 | } |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 398 | iter->Next(); |
| 399 | } |
[email protected] | 2f7d9cd | 2012-09-22 03:42:12 | [diff] [blame] | 400 | } |
| 401 | |
Weilun Shi | bf3ceb19 | 2020-07-31 22:39:37 | [diff] [blame] | 402 | // Uses simple binary search or calculates the index directly if it's an "exact" |
| 403 | // linear histogram. This is very general, but there are better approaches if we |
| 404 | // knew that the buckets were linearly distributed. |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 405 | size_t SampleVectorBase::GetBucketIndex(Sample value) const { |
[email protected] | 15ce384 | 2013-06-27 14:38:45 | [diff] [blame] | 406 | size_t bucket_count = bucket_ranges_->bucket_count(); |
[email protected] | 2f7d9cd | 2012-09-22 03:42:12 | [diff] [blame] | 407 | CHECK_GE(value, bucket_ranges_->range(0)); |
| 408 | CHECK_LT(value, bucket_ranges_->range(bucket_count)); |
| 409 | |
Weilun Shi | bf3ceb19 | 2020-07-31 22:39:37 | [diff] [blame] | 410 | // For "exact" linear histograms, e.g. bucket_count = maximum + 1, their |
| 411 | // minimum is 1 and bucket sizes are 1. Thus, we don't need to binary search |
| 412 | // the bucket index. The bucket index for bucket |value| is just the |value|. |
| 413 | Sample maximum = bucket_ranges_->range(bucket_count - 1); |
| 414 | if (maximum == static_cast<Sample>(bucket_count - 1)) { |
| 415 | // |value| is in the underflow bucket. |
danakj | ad724a8 | 2024-01-25 17:37:40 | [diff] [blame] | 416 | if (value < 1) { |
Weilun Shi | bf3ceb19 | 2020-07-31 22:39:37 | [diff] [blame] | 417 | return 0; |
danakj | ad724a8 | 2024-01-25 17:37:40 | [diff] [blame] | 418 | } |
Weilun Shi | bf3ceb19 | 2020-07-31 22:39:37 | [diff] [blame] | 419 | // |value| is in the overflow bucket. |
danakj | ad724a8 | 2024-01-25 17:37:40 | [diff] [blame] | 420 | if (value > maximum) { |
Weilun Shi | bf3ceb19 | 2020-07-31 22:39:37 | [diff] [blame] | 421 | return bucket_count - 1; |
danakj | ad724a8 | 2024-01-25 17:37:40 | [diff] [blame] | 422 | } |
Weilun Shi | bf3ceb19 | 2020-07-31 22:39:37 | [diff] [blame] | 423 | return static_cast<size_t>(value); |
| 424 | } |
| 425 | |
[email protected] | 2f7d9cd | 2012-09-22 03:42:12 | [diff] [blame] | 426 | size_t under = 0; |
| 427 | size_t over = bucket_count; |
| 428 | size_t mid; |
| 429 | do { |
| 430 | DCHECK_GE(over, under); |
danakj | ad724a8 | 2024-01-25 17:37:40 | [diff] [blame] | 431 | mid = under + (over - under) / 2; |
| 432 | if (mid == under) { |
[email protected] | 2f7d9cd | 2012-09-22 03:42:12 | [diff] [blame] | 433 | break; |
danakj | ad724a8 | 2024-01-25 17:37:40 | [diff] [blame] | 434 | } |
| 435 | if (bucket_ranges_->range(mid) <= value) { |
[email protected] | 2f7d9cd | 2012-09-22 03:42:12 | [diff] [blame] | 436 | under = mid; |
danakj | ad724a8 | 2024-01-25 17:37:40 | [diff] [blame] | 437 | } else { |
[email protected] | 2f7d9cd | 2012-09-22 03:42:12 | [diff] [blame] | 438 | over = mid; |
danakj | ad724a8 | 2024-01-25 17:37:40 | [diff] [blame] | 439 | } |
[email protected] | 2f7d9cd | 2012-09-22 03:42:12 | [diff] [blame] | 440 | } while (true); |
| 441 | |
| 442 | DCHECK_LE(bucket_ranges_->range(mid), value); |
| 443 | CHECK_GT(bucket_ranges_->range(mid + 1), value); |
| 444 | return mid; |
| 445 | } |
| 446 | |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 447 | void SampleVectorBase::MoveSingleSampleToCounts() { |
danakj | ad724a8 | 2024-01-25 17:37:40 | [diff] [blame] | 448 | DCHECK(counts().has_value()); |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 449 | |
| 450 | // Disable the single-sample since there is now counts storage for the data. |
Luc Nguyen | 5f4f42c | 2023-03-24 23:29:07 | [diff] [blame] | 451 | SingleSample sample = single_sample().ExtractAndDisable(); |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 452 | |
| 453 | // Stop here if there is no "count" as trying to find the bucket index of |
| 454 | // an invalid (including zero) "value" will crash. |
danakj | ad724a8 | 2024-01-25 17:37:40 | [diff] [blame] | 455 | if (sample.count == 0) { |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 456 | return; |
danakj | ad724a8 | 2024-01-25 17:37:40 | [diff] [blame] | 457 | } |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 458 | |
Will Harris | 552939b | 2023-02-17 21:09:35 | [diff] [blame] | 459 | // Stop here if the sample bucket would be out of range for the AtomicCount |
| 460 | // array. |
| 461 | if (sample.bucket >= counts_size()) { |
| 462 | return; |
| 463 | } |
| 464 | |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 465 | // Move the value into storage. Sum and redundant-count already account |
| 466 | // for this entry so no need to call IncreaseSumAndCount(). |
danakj | ad724a8 | 2024-01-25 17:37:40 | [diff] [blame] | 467 | subtle::NoBarrier_AtomicIncrement(&counts_at(sample.bucket), sample.count); |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 468 | } |
| 469 | |
| 470 | void SampleVectorBase::MountCountsStorageAndMoveSingleSample() { |
| 471 | // There are many SampleVector objects and the lock is needed very |
| 472 | // infrequently (just when advancing from single-sample to multi-sample) so |
| 473 | // define a single, global lock that all can use. This lock only prevents |
danakj | ad724a8 | 2024-01-25 17:37:40 | [diff] [blame] | 474 | // concurrent entry into the code below; access and updates to |counts_data_| |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 475 | // still requires atomic operations. |
| 476 | static LazyInstance<Lock>::Leaky counts_lock = LAZY_INSTANCE_INITIALIZER; |
danakj | ad724a8 | 2024-01-25 17:37:40 | [diff] [blame] | 477 | if (counts_data_.load(std::memory_order_relaxed) == nullptr) { |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 478 | AutoLock lock(counts_lock.Get()); |
danakj | ad724a8 | 2024-01-25 17:37:40 | [diff] [blame] | 479 | if (counts_data_.load(std::memory_order_relaxed) == nullptr) { |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 480 | // Create the actual counts storage while the above lock is acquired. |
danakj | ad724a8 | 2024-01-25 17:37:40 | [diff] [blame] | 481 | span<HistogramBase::Count> counts = CreateCountsStorageWhileLocked(); |
| 482 | // Point |counts()| to the newly created storage. This is done while |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 483 | // locked to prevent possible concurrent calls to CreateCountsStorage |
| 484 | // but, between that call and here, other threads could notice the |
dschuyler | 1185773 | 2017-06-09 20:45:33 | [diff] [blame] | 485 | // existence of the storage and race with this to set_counts(). That's |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 486 | // okay because (a) it's atomic and (b) it always writes the same value. |
| 487 | set_counts(counts); |
| 488 | } |
| 489 | } |
| 490 | |
| 491 | // Move any single-sample into the newly mounted storage. |
| 492 | MoveSingleSampleToCounts(); |
| 493 | } |
| 494 | |
| 495 | SampleVector::SampleVector(const BucketRanges* bucket_ranges) |
| 496 | : SampleVector(0, bucket_ranges) {} |
| 497 | |
| 498 | SampleVector::SampleVector(uint64_t id, const BucketRanges* bucket_ranges) |
Arthur Sonzogni | bc8777c30 | 2022-05-24 09:23:06 | [diff] [blame] | 499 | : SampleVectorBase(id, std::make_unique<LocalMetadata>(), bucket_ranges) {} |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 500 | |
Arthur Sonzogni | bc8777c30 | 2022-05-24 09:23:06 | [diff] [blame] | 501 | SampleVector::~SampleVector() = default; |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 502 | |
Luc Nguyen | bbaab29 | 2023-09-28 23:30:57 | [diff] [blame] | 503 | bool SampleVector::IsDefinitelyEmpty() const { |
| 504 | // If we are still using SingleSample, and it has a count of 0, then |this| |
| 505 | // has no samples. If we are not using SingleSample, always return false, even |
| 506 | // though it is possible that |this| has no samples (e.g. we are using a |
| 507 | // counts array and all the bucket counts are 0). If we are wrong, this will |
| 508 | // just make the caller perform some extra work thinking that |this| is |
| 509 | // non-empty. |
| 510 | AtomicSingleSample sample = single_sample(); |
| 511 | return HistogramSamples::IsDefinitelyEmpty() && !sample.IsDisabled() && |
| 512 | sample.Load().count == 0; |
| 513 | } |
| 514 | |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 515 | bool SampleVector::MountExistingCountsStorage() const { |
| 516 | // There is never any existing storage other than what is already in use. |
danakj | ad724a8 | 2024-01-25 17:37:40 | [diff] [blame] | 517 | return counts().has_value(); |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 518 | } |
| 519 | |
Quang Minh Tuan Nguyen | 39b1fed | 2021-03-16 00:49:38 | [diff] [blame] | 520 | std::string SampleVector::GetAsciiHeader(StringPiece histogram_name, |
| 521 | int32_t flags) const { |
| 522 | Count sample_count = TotalCount(); |
| 523 | std::string output; |
Greg Thompson | de48d06 | 2024-01-27 12:19:49 | [diff] [blame] | 524 | StrAppend(&output, {"Histogram: ", histogram_name, " recorded ", |
| 525 | NumberToString(sample_count), " samples"}); |
Quang Minh Tuan Nguyen | 39b1fed | 2021-03-16 00:49:38 | [diff] [blame] | 526 | if (sample_count == 0) { |
| 527 | DCHECK_EQ(sum(), 0); |
| 528 | } else { |
| 529 | double mean = static_cast<float>(sum()) / sample_count; |
| 530 | StringAppendF(&output, ", mean = %.1f", mean); |
| 531 | } |
danakj | ad724a8 | 2024-01-25 17:37:40 | [diff] [blame] | 532 | if (flags) { |
Quang Minh Tuan Nguyen | 39b1fed | 2021-03-16 00:49:38 | [diff] [blame] | 533 | StringAppendF(&output, " (flags = 0x%x)", flags); |
danakj | ad724a8 | 2024-01-25 17:37:40 | [diff] [blame] | 534 | } |
Quang Minh Tuan Nguyen | 39b1fed | 2021-03-16 00:49:38 | [diff] [blame] | 535 | return output; |
| 536 | } |
| 537 | |
| 538 | std::string SampleVector::GetAsciiBody() const { |
| 539 | Count sample_count = TotalCount(); |
| 540 | |
| 541 | // Prepare to normalize graphical rendering of bucket contents. |
| 542 | double max_size = 0; |
| 543 | double scaling_factor = 1; |
| 544 | max_size = GetPeakBucketSize(); |
| 545 | // Scale histogram bucket counts to take at most 72 characters. |
| 546 | // Note: Keep in sync w/ kLineLength histogram_samples.cc |
| 547 | const double kLineLength = 72; |
danakj | ad724a8 | 2024-01-25 17:37:40 | [diff] [blame] | 548 | if (max_size > kLineLength) { |
Quang Minh Tuan Nguyen | 39b1fed | 2021-03-16 00:49:38 | [diff] [blame] | 549 | scaling_factor = kLineLength / max_size; |
danakj | ad724a8 | 2024-01-25 17:37:40 | [diff] [blame] | 550 | } |
Quang Minh Tuan Nguyen | 39b1fed | 2021-03-16 00:49:38 | [diff] [blame] | 551 | |
Quang Minh Tuan Nguyen | 39b1fed | 2021-03-16 00:49:38 | [diff] [blame] | 552 | // Calculate largest print width needed for any of our bucket range displays. |
| 553 | size_t print_width = 1; |
| 554 | for (uint32_t i = 0; i < bucket_count(); ++i) { |
| 555 | if (GetCountAtIndex(i)) { |
| 556 | size_t width = |
| 557 | GetSimpleAsciiBucketRange(bucket_ranges()->range(i)).size() + 1; |
danakj | ad724a8 | 2024-01-25 17:37:40 | [diff] [blame] | 558 | if (width > print_width) { |
Quang Minh Tuan Nguyen | 39b1fed | 2021-03-16 00:49:38 | [diff] [blame] | 559 | print_width = width; |
danakj | ad724a8 | 2024-01-25 17:37:40 | [diff] [blame] | 560 | } |
Quang Minh Tuan Nguyen | 39b1fed | 2021-03-16 00:49:38 | [diff] [blame] | 561 | } |
| 562 | } |
| 563 | |
| 564 | int64_t remaining = sample_count; |
| 565 | int64_t past = 0; |
| 566 | std::string output; |
| 567 | // Output the actual histogram graph. |
| 568 | for (uint32_t i = 0; i < bucket_count(); ++i) { |
| 569 | Count current = GetCountAtIndex(i); |
| 570 | remaining -= current; |
| 571 | std::string range = GetSimpleAsciiBucketRange(bucket_ranges()->range(i)); |
| 572 | output.append(range); |
danakj | ad724a8 | 2024-01-25 17:37:40 | [diff] [blame] | 573 | for (size_t j = 0; range.size() + j < print_width + 1; ++j) { |
Quang Minh Tuan Nguyen | 39b1fed | 2021-03-16 00:49:38 | [diff] [blame] | 574 | output.push_back(' '); |
danakj | ad724a8 | 2024-01-25 17:37:40 | [diff] [blame] | 575 | } |
Quang Minh Tuan Nguyen | 39b1fed | 2021-03-16 00:49:38 | [diff] [blame] | 576 | if (0 == current && i < bucket_count() - 1 && 0 == GetCountAtIndex(i + 1)) { |
| 577 | while (i < bucket_count() - 1 && 0 == GetCountAtIndex(i + 1)) { |
| 578 | ++i; |
| 579 | } |
| 580 | output.append("... \n"); |
| 581 | continue; // No reason to plot emptiness. |
| 582 | } |
| 583 | Count current_size = round(current * scaling_factor); |
| 584 | WriteAsciiBucketGraph(current_size, kLineLength, &output); |
| 585 | WriteAsciiBucketContext(past, current, remaining, i, &output); |
| 586 | output.append("\n"); |
| 587 | past += current; |
| 588 | } |
| 589 | DCHECK_EQ(sample_count, past); |
| 590 | return output; |
| 591 | } |
| 592 | |
| 593 | double SampleVector::GetPeakBucketSize() const { |
| 594 | Count max = 0; |
| 595 | for (uint32_t i = 0; i < bucket_count(); ++i) { |
| 596 | Count current = GetCountAtIndex(i); |
danakj | ad724a8 | 2024-01-25 17:37:40 | [diff] [blame] | 597 | if (current > max) { |
Quang Minh Tuan Nguyen | 39b1fed | 2021-03-16 00:49:38 | [diff] [blame] | 598 | max = current; |
danakj | ad724a8 | 2024-01-25 17:37:40 | [diff] [blame] | 599 | } |
Quang Minh Tuan Nguyen | 39b1fed | 2021-03-16 00:49:38 | [diff] [blame] | 600 | } |
| 601 | return max; |
| 602 | } |
| 603 | |
| 604 | void SampleVector::WriteAsciiBucketContext(int64_t past, |
| 605 | Count current, |
| 606 | int64_t remaining, |
| 607 | uint32_t current_bucket_index, |
| 608 | std::string* output) const { |
| 609 | double scaled_sum = (past + current + remaining) / 100.0; |
| 610 | WriteAsciiBucketValue(current, scaled_sum, output); |
| 611 | if (0 < current_bucket_index) { |
| 612 | double percentage = past / scaled_sum; |
| 613 | StringAppendF(output, " {%3.1f%%}", percentage); |
| 614 | } |
| 615 | } |
| 616 | |
danakj | ad724a8 | 2024-01-25 17:37:40 | [diff] [blame] | 617 | span<HistogramBase::AtomicCount> |
| 618 | SampleVector::CreateCountsStorageWhileLocked() { |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 619 | local_counts_.resize(counts_size()); |
danakj | ad724a8 | 2024-01-25 17:37:40 | [diff] [blame] | 620 | return local_counts_; |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 621 | } |
| 622 | |
| 623 | PersistentSampleVector::PersistentSampleVector( |
| 624 | uint64_t id, |
| 625 | const BucketRanges* bucket_ranges, |
| 626 | Metadata* meta, |
| 627 | const DelayedPersistentAllocation& counts) |
| 628 | : SampleVectorBase(id, meta, bucket_ranges), persistent_counts_(counts) { |
| 629 | // Only mount the full storage if the single-sample has been disabled. |
| 630 | // Otherwise, it is possible for this object instance to start using (empty) |
| 631 | // storage that was created incidentally while another instance continues to |
| 632 | // update to the single sample. This "incidental creation" can happen because |
| 633 | // the memory is a DelayedPersistentAllocation which allows multiple memory |
| 634 | // blocks within it and applies an all-or-nothing approach to the allocation. |
| 635 | // Thus, a request elsewhere for one of the _other_ blocks would make _this_ |
| 636 | // block available even though nothing has explicitly requested it. |
| 637 | // |
| 638 | // Note that it's not possible for the ctor to mount existing storage and |
| 639 | // move any single-sample to it because sometimes the persistent memory is |
| 640 | // read-only. Only non-const methods (which assume that memory is read/write) |
| 641 | // can do that. |
| 642 | if (single_sample().IsDisabled()) { |
| 643 | bool success = MountExistingCountsStorage(); |
| 644 | DCHECK(success); |
| 645 | } |
| 646 | } |
| 647 | |
Chris Watkins | bb7211c | 2017-11-29 07:16:38 | [diff] [blame] | 648 | PersistentSampleVector::~PersistentSampleVector() = default; |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 649 | |
Luc Nguyen | bbaab29 | 2023-09-28 23:30:57 | [diff] [blame] | 650 | bool PersistentSampleVector::IsDefinitelyEmpty() const { |
| 651 | // Not implemented. |
| 652 | NOTREACHED(); |
| 653 | |
| 654 | // Always return false. If we are wrong, this will just make the caller |
| 655 | // perform some extra work thinking that |this| is non-empty. |
| 656 | return false; |
| 657 | } |
| 658 | |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 659 | bool PersistentSampleVector::MountExistingCountsStorage() const { |
| 660 | // There is no early exit if counts is not yet mounted because, given that |
| 661 | // this is a virtual function, it's more efficient to do that at the call- |
| 662 | // site. There is no danger, however, should this get called anyway (perhaps |
danakj | ad724a8 | 2024-01-25 17:37:40 | [diff] [blame] | 663 | // because of a race condition) because at worst the `counts_data_` and |
| 664 | // `counts_size_` members would be over-written (in an atomic manner) |
| 665 | // with the exact same values. |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 666 | |
danakj | ad724a8 | 2024-01-25 17:37:40 | [diff] [blame] | 667 | if (!persistent_counts_.reference()) { |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 668 | return false; // Nothing to mount. |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 669 | } |
| 670 | |
danakj | ad724a8 | 2024-01-25 17:37:40 | [diff] [blame] | 671 | // Mount the counts array in position. This shouldn't fail but can if the |
| 672 | // data is corrupt or incomplete. |
| 673 | span<HistogramBase::AtomicCount> mem = |
| 674 | persistent_counts_.Get<HistogramBase::AtomicCount>(); |
| 675 | if (mem.empty()) { |
| 676 | return false; |
| 677 | } |
| 678 | // Uses a span that only covers the counts the SampleVector should have |
| 679 | // access to, which can be a subset of the entire persistent allocation. |
| 680 | set_counts(mem.first(counts_size())); |
| 681 | return true; |
| 682 | } |
| 683 | |
| 684 | span<HistogramBase::AtomicCount> |
| 685 | PersistentSampleVector::CreateCountsStorageWhileLocked() { |
| 686 | span<HistogramBase::AtomicCount> mem = |
| 687 | persistent_counts_.Get<HistogramBase::AtomicCount>(); |
| 688 | if (mem.empty()) { |
| 689 | // The above shouldn't fail but can if Bad Things(tm) are occurring in |
| 690 | // the persistent allocator. Crashing isn't a good option so instead |
| 691 | // just allocate something from the heap that we will leak and return that. |
| 692 | // There will be no sharing or persistence but worse things are already |
| 693 | // happening. |
| 694 | auto array = HeapArray<HistogramBase::AtomicCount>::WithSize(counts_size()); |
Alexei Svitkine | 2d87fb5a | 2024-03-04 16:49:38 | [diff] [blame] | 695 | ANNOTATE_LEAKING_OBJECT_PTR(array.data()); |
danakj | ad724a8 | 2024-01-25 17:37:40 | [diff] [blame] | 696 | return std::move(array).leak(); |
| 697 | } |
| 698 | |
| 699 | // Returns a span that only covers the counts the SampleVector should have |
| 700 | // access to, which can be a subset of the entire persistent allocation. |
| 701 | return mem.first(counts_size()); |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 702 | } |
| 703 | |
[email protected] | 2f7d9cd | 2012-09-22 03:42:12 | [diff] [blame] | 704 | } // namespace base |