[email protected] | 2f7d9cd | 2012-09-22 03:42:12 | [diff] [blame] | 1 | // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 | // Use of this source code is governed by a BSD-style license that can be |
| 3 | // found in the LICENSE file. |
| 4 | |
| 5 | #include "base/metrics/sample_vector.h" |
| 6 | |
Hans Wennborg | c3cffa6 | 2020-04-27 10:09:12 | [diff] [blame^] | 7 | #include "base/check_op.h" |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 8 | #include "base/lazy_instance.h" |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 9 | #include "base/memory/ptr_util.h" |
| 10 | #include "base/metrics/persistent_memory_allocator.h" |
Hans Wennborg | c3cffa6 | 2020-04-27 10:09:12 | [diff] [blame^] | 11 | #include "base/notreached.h" |
asvitkine | 3f17b146 | 2017-05-03 21:37:37 | [diff] [blame] | 12 | #include "base/numerics/safe_conversions.h" |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 13 | #include "base/synchronization/lock.h" |
| 14 | #include "base/threading/platform_thread.h" |
| 15 | |
| 16 | // This SampleVector makes use of the single-sample embedded in the base |
| 17 | // HistogramSamples class. If the count is non-zero then there is guaranteed |
| 18 | // (within the bounds of "eventual consistency") to be no allocated external |
| 19 | // storage. Once the full counts storage is allocated, the single-sample must |
| 20 | // be extracted and disabled. |
[email protected] | 2f7d9cd | 2012-09-22 03:42:12 | [diff] [blame] | 21 | |
[email protected] | 2f7d9cd | 2012-09-22 03:42:12 | [diff] [blame] | 22 | namespace base { |
| 23 | |
| 24 | typedef HistogramBase::Count Count; |
| 25 | typedef HistogramBase::Sample Sample; |
| 26 | |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 27 | SampleVectorBase::SampleVectorBase(uint64_t id, |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 28 | Metadata* meta, |
| 29 | const BucketRanges* bucket_ranges) |
| 30 | : HistogramSamples(id, meta), bucket_ranges_(bucket_ranges) { |
bcwhite | b036e432 | 2015-12-10 18:36:34 | [diff] [blame] | 31 | CHECK_GE(bucket_ranges_->bucket_count(), 1u); |
| 32 | } |
| 33 | |
Chris Watkins | bb7211c | 2017-11-29 07:16:38 | [diff] [blame] | 34 | SampleVectorBase::~SampleVectorBase() = default; |
[email protected] | 2f7d9cd | 2012-09-22 03:42:12 | [diff] [blame] | 35 | |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 36 | void SampleVectorBase::Accumulate(Sample value, Count count) { |
| 37 | const size_t bucket_index = GetBucketIndex(value); |
[email protected] | 2f7d9cd | 2012-09-22 03:42:12 | [diff] [blame] | 38 | |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 39 | // Handle the single-sample case. |
| 40 | if (!counts()) { |
| 41 | // Try to accumulate the parameters into the single-count entry. |
| 42 | if (AccumulateSingleSample(value, count, bucket_index)) { |
| 43 | // A race condition could lead to a new single-sample being accumulated |
| 44 | // above just after another thread executed the MountCountsStorage below. |
| 45 | // Since it is mounted, it could be mounted elsewhere and have values |
| 46 | // written to it. It's not allowed to have both a single-sample and |
| 47 | // entries in the counts array so move the single-sample. |
| 48 | if (counts()) |
| 49 | MoveSingleSampleToCounts(); |
| 50 | return; |
| 51 | } |
[email protected] | 2f7d9cd | 2012-09-22 03:42:12 | [diff] [blame] | 52 | |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 53 | // Need real storage to store both what was in the single-sample plus the |
| 54 | // parameter information. |
| 55 | MountCountsStorageAndMoveSingleSample(); |
[email protected] | 2f7d9cd | 2012-09-22 03:42:12 | [diff] [blame] | 56 | } |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 57 | |
| 58 | // Handle the multi-sample case. |
Brian White | 537e41a | 2017-11-01 03:02:13 | [diff] [blame] | 59 | Count new_value = |
| 60 | subtle::NoBarrier_AtomicIncrement(&counts()[bucket_index], count); |
asvitkine | 3f17b146 | 2017-05-03 21:37:37 | [diff] [blame] | 61 | IncreaseSumAndCount(strict_cast<int64_t>(count) * value, count); |
Brian White | 537e41a | 2017-11-01 03:02:13 | [diff] [blame] | 62 | |
| 63 | // TODO(bcwhite) Remove after crbug.com/682680. |
| 64 | Count old_value = new_value - count; |
| 65 | if ((new_value >= 0) != (old_value >= 0) && count > 0) |
| 66 | RecordNegativeSample(SAMPLES_ACCUMULATE_OVERFLOW, count); |
[email protected] | 2f7d9cd | 2012-09-22 03:42:12 | [diff] [blame] | 67 | } |
| 68 | |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 69 | Count SampleVectorBase::GetCount(Sample value) const { |
| 70 | return GetCountAtIndex(GetBucketIndex(value)); |
[email protected] | 2f7d9cd | 2012-09-22 03:42:12 | [diff] [blame] | 71 | } |
| 72 | |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 73 | Count SampleVectorBase::TotalCount() const { |
| 74 | // Handle the single-sample case. |
| 75 | SingleSample sample = single_sample().Load(); |
| 76 | if (sample.count != 0) |
| 77 | return sample.count; |
| 78 | |
| 79 | // Handle the multi-sample case. |
| 80 | if (counts() || MountExistingCountsStorage()) { |
| 81 | Count count = 0; |
| 82 | size_t size = counts_size(); |
| 83 | const HistogramBase::AtomicCount* counts_array = counts(); |
| 84 | for (size_t i = 0; i < size; ++i) { |
| 85 | count += subtle::NoBarrier_Load(&counts_array[i]); |
| 86 | } |
| 87 | return count; |
| 88 | } |
| 89 | |
| 90 | // And the no-value case. |
| 91 | return 0; |
[email protected] | 2f7d9cd | 2012-09-22 03:42:12 | [diff] [blame] | 92 | } |
| 93 | |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 94 | Count SampleVectorBase::GetCountAtIndex(size_t bucket_index) const { |
| 95 | DCHECK(bucket_index < counts_size()); |
| 96 | |
| 97 | // Handle the single-sample case. |
| 98 | SingleSample sample = single_sample().Load(); |
| 99 | if (sample.count != 0) |
| 100 | return sample.bucket == bucket_index ? sample.count : 0; |
| 101 | |
| 102 | // Handle the multi-sample case. |
| 103 | if (counts() || MountExistingCountsStorage()) |
| 104 | return subtle::NoBarrier_Load(&counts()[bucket_index]); |
| 105 | |
| 106 | // And the no-value case. |
| 107 | return 0; |
| 108 | } |
| 109 | |
| 110 | std::unique_ptr<SampleCountIterator> SampleVectorBase::Iterator() const { |
| 111 | // Handle the single-sample case. |
| 112 | SingleSample sample = single_sample().Load(); |
| 113 | if (sample.count != 0) { |
Jeremy Roman | 9532f25 | 2017-08-16 23:27:24 | [diff] [blame] | 114 | return std::make_unique<SingleSampleIterator>( |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 115 | bucket_ranges_->range(sample.bucket), |
| 116 | bucket_ranges_->range(sample.bucket + 1), sample.count, sample.bucket); |
| 117 | } |
| 118 | |
| 119 | // Handle the multi-sample case. |
| 120 | if (counts() || MountExistingCountsStorage()) { |
Jeremy Roman | 9532f25 | 2017-08-16 23:27:24 | [diff] [blame] | 121 | return std::make_unique<SampleVectorIterator>(counts(), counts_size(), |
| 122 | bucket_ranges_); |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 123 | } |
| 124 | |
| 125 | // And the no-value case. |
Jeremy Roman | 9532f25 | 2017-08-16 23:27:24 | [diff] [blame] | 126 | return std::make_unique<SampleVectorIterator>(nullptr, 0, bucket_ranges_); |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 127 | } |
| 128 | |
| 129 | bool SampleVectorBase::AddSubtractImpl(SampleCountIterator* iter, |
| 130 | HistogramSamples::Operator op) { |
| 131 | // Stop now if there's nothing to do. |
| 132 | if (iter->Done()) |
| 133 | return true; |
| 134 | |
| 135 | // Get the first value and its index. |
[email protected] | 2f7d9cd | 2012-09-22 03:42:12 | [diff] [blame] | 136 | HistogramBase::Sample min; |
asvitkine | 3f17b146 | 2017-05-03 21:37:37 | [diff] [blame] | 137 | int64_t max; |
[email protected] | 2f7d9cd | 2012-09-22 03:42:12 | [diff] [blame] | 138 | HistogramBase::Count count; |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 139 | iter->Get(&min, &max, &count); |
| 140 | size_t dest_index = GetBucketIndex(min); |
[email protected] | 2f7d9cd | 2012-09-22 03:42:12 | [diff] [blame] | 141 | |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 142 | // The destination must be a superset of the source meaning that though the |
| 143 | // incoming ranges will find an exact match, the incoming bucket-index, if |
| 144 | // it exists, may be offset from the destination bucket-index. Calculate |
| 145 | // that offset of the passed iterator; there are are no overflow checks |
| 146 | // because 2's compliment math will work it out in the end. |
| 147 | // |
| 148 | // Because GetBucketIndex() always returns the same true or false result for |
| 149 | // a given iterator object, |index_offset| is either set here and used below, |
| 150 | // or never set and never used. The compiler doesn't know this, though, which |
| 151 | // is why it's necessary to initialize it to something. |
| 152 | size_t index_offset = 0; |
| 153 | size_t iter_index; |
| 154 | if (iter->GetBucketIndex(&iter_index)) |
| 155 | index_offset = dest_index - iter_index; |
| 156 | if (dest_index >= counts_size()) |
| 157 | return false; |
| 158 | |
| 159 | // Post-increment. Information about the current sample is not available |
| 160 | // after this point. |
| 161 | iter->Next(); |
| 162 | |
| 163 | // Single-value storage is possible if there is no counts storage and the |
| 164 | // retrieved entry is the only one in the iterator. |
| 165 | if (!counts()) { |
| 166 | if (iter->Done()) { |
| 167 | // Don't call AccumulateSingleSample because that updates sum and count |
| 168 | // which was already done by the caller of this method. |
| 169 | if (single_sample().Accumulate( |
| 170 | dest_index, op == HistogramSamples::ADD ? count : -count)) { |
| 171 | // Handle race-condition that mounted counts storage between above and |
| 172 | // here. |
| 173 | if (counts()) |
| 174 | MoveSingleSampleToCounts(); |
| 175 | return true; |
| 176 | } |
tdanderson | 9419102 | 2017-04-21 23:19:50 | [diff] [blame] | 177 | } |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 178 | |
| 179 | // The counts storage will be needed to hold the multiple incoming values. |
| 180 | MountCountsStorageAndMoveSingleSample(); |
bcwhite | 4162baf | 2017-04-21 18:58:00 | [diff] [blame] | 181 | } |
tdanderson | 9419102 | 2017-04-21 23:19:50 | [diff] [blame] | 182 | |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 183 | // Go through the iterator and add the counts into correct bucket. |
| 184 | while (true) { |
| 185 | // Ensure that the sample's min/max match the ranges min/max. |
| 186 | if (min != bucket_ranges_->range(dest_index) || |
| 187 | max != bucket_ranges_->range(dest_index + 1)) { |
| 188 | NOTREACHED() << "sample=" << min << "," << max |
| 189 | << "; range=" << bucket_ranges_->range(dest_index) << "," |
| 190 | << bucket_ranges_->range(dest_index + 1); |
| 191 | return false; |
| 192 | } |
| 193 | |
| 194 | // Sample's bucket matches exactly. Adjust count. |
| 195 | subtle::NoBarrier_AtomicIncrement( |
| 196 | &counts()[dest_index], op == HistogramSamples::ADD ? count : -count); |
| 197 | |
| 198 | // Advance to the next iterable sample. See comments above for how |
| 199 | // everything works. |
| 200 | if (iter->Done()) |
| 201 | return true; |
| 202 | iter->Get(&min, &max, &count); |
| 203 | if (iter->GetBucketIndex(&iter_index)) { |
| 204 | // Destination bucket is a known offset from the source bucket. |
| 205 | dest_index = iter_index + index_offset; |
| 206 | } else { |
| 207 | // Destination bucket has to be determined anew each time. |
| 208 | dest_index = GetBucketIndex(min); |
| 209 | } |
| 210 | if (dest_index >= counts_size()) |
| 211 | return false; |
| 212 | iter->Next(); |
| 213 | } |
[email protected] | 2f7d9cd | 2012-09-22 03:42:12 | [diff] [blame] | 214 | } |
| 215 | |
| 216 | // Use simple binary search. This is very general, but there are better |
| 217 | // approaches if we knew that the buckets were linearly distributed. |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 218 | size_t SampleVectorBase::GetBucketIndex(Sample value) const { |
[email protected] | 15ce384 | 2013-06-27 14:38:45 | [diff] [blame] | 219 | size_t bucket_count = bucket_ranges_->bucket_count(); |
[email protected] | 2f7d9cd | 2012-09-22 03:42:12 | [diff] [blame] | 220 | CHECK_GE(bucket_count, 1u); |
| 221 | CHECK_GE(value, bucket_ranges_->range(0)); |
| 222 | CHECK_LT(value, bucket_ranges_->range(bucket_count)); |
| 223 | |
| 224 | size_t under = 0; |
| 225 | size_t over = bucket_count; |
| 226 | size_t mid; |
| 227 | do { |
| 228 | DCHECK_GE(over, under); |
| 229 | mid = under + (over - under)/2; |
| 230 | if (mid == under) |
| 231 | break; |
| 232 | if (bucket_ranges_->range(mid) <= value) |
| 233 | under = mid; |
| 234 | else |
| 235 | over = mid; |
| 236 | } while (true); |
| 237 | |
| 238 | DCHECK_LE(bucket_ranges_->range(mid), value); |
| 239 | CHECK_GT(bucket_ranges_->range(mid + 1), value); |
| 240 | return mid; |
| 241 | } |
| 242 | |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 243 | void SampleVectorBase::MoveSingleSampleToCounts() { |
| 244 | DCHECK(counts()); |
| 245 | |
| 246 | // Disable the single-sample since there is now counts storage for the data. |
| 247 | SingleSample sample = single_sample().Extract(/*disable=*/true); |
| 248 | |
| 249 | // Stop here if there is no "count" as trying to find the bucket index of |
| 250 | // an invalid (including zero) "value" will crash. |
| 251 | if (sample.count == 0) |
| 252 | return; |
| 253 | |
| 254 | // Move the value into storage. Sum and redundant-count already account |
| 255 | // for this entry so no need to call IncreaseSumAndCount(). |
| 256 | subtle::NoBarrier_AtomicIncrement(&counts()[sample.bucket], sample.count); |
| 257 | } |
| 258 | |
| 259 | void SampleVectorBase::MountCountsStorageAndMoveSingleSample() { |
| 260 | // There are many SampleVector objects and the lock is needed very |
| 261 | // infrequently (just when advancing from single-sample to multi-sample) so |
| 262 | // define a single, global lock that all can use. This lock only prevents |
| 263 | // concurrent entry into the code below; access and updates to |counts_| |
| 264 | // still requires atomic operations. |
| 265 | static LazyInstance<Lock>::Leaky counts_lock = LAZY_INSTANCE_INITIALIZER; |
| 266 | if (subtle::NoBarrier_Load(&counts_) == 0) { |
| 267 | AutoLock lock(counts_lock.Get()); |
| 268 | if (subtle::NoBarrier_Load(&counts_) == 0) { |
| 269 | // Create the actual counts storage while the above lock is acquired. |
| 270 | HistogramBase::Count* counts = CreateCountsStorageWhileLocked(); |
| 271 | DCHECK(counts); |
| 272 | |
| 273 | // Point |counts_| to the newly created storage. This is done while |
| 274 | // locked to prevent possible concurrent calls to CreateCountsStorage |
| 275 | // but, between that call and here, other threads could notice the |
dschuyler | 1185773 | 2017-06-09 20:45:33 | [diff] [blame] | 276 | // existence of the storage and race with this to set_counts(). That's |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 277 | // okay because (a) it's atomic and (b) it always writes the same value. |
| 278 | set_counts(counts); |
| 279 | } |
| 280 | } |
| 281 | |
| 282 | // Move any single-sample into the newly mounted storage. |
| 283 | MoveSingleSampleToCounts(); |
| 284 | } |
| 285 | |
| 286 | SampleVector::SampleVector(const BucketRanges* bucket_ranges) |
| 287 | : SampleVector(0, bucket_ranges) {} |
| 288 | |
| 289 | SampleVector::SampleVector(uint64_t id, const BucketRanges* bucket_ranges) |
bcwhite | b4d87bb | 2017-07-05 15:28:57 | [diff] [blame] | 290 | : SampleVectorBase(id, new LocalMetadata(), bucket_ranges) {} |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 291 | |
bcwhite | b4d87bb | 2017-07-05 15:28:57 | [diff] [blame] | 292 | SampleVector::~SampleVector() { |
| 293 | delete static_cast<LocalMetadata*>(meta()); |
| 294 | } |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 295 | |
| 296 | bool SampleVector::MountExistingCountsStorage() const { |
| 297 | // There is never any existing storage other than what is already in use. |
| 298 | return counts() != nullptr; |
| 299 | } |
| 300 | |
| 301 | HistogramBase::AtomicCount* SampleVector::CreateCountsStorageWhileLocked() { |
| 302 | local_counts_.resize(counts_size()); |
| 303 | return &local_counts_[0]; |
| 304 | } |
| 305 | |
| 306 | PersistentSampleVector::PersistentSampleVector( |
| 307 | uint64_t id, |
| 308 | const BucketRanges* bucket_ranges, |
| 309 | Metadata* meta, |
| 310 | const DelayedPersistentAllocation& counts) |
| 311 | : SampleVectorBase(id, meta, bucket_ranges), persistent_counts_(counts) { |
| 312 | // Only mount the full storage if the single-sample has been disabled. |
| 313 | // Otherwise, it is possible for this object instance to start using (empty) |
| 314 | // storage that was created incidentally while another instance continues to |
| 315 | // update to the single sample. This "incidental creation" can happen because |
| 316 | // the memory is a DelayedPersistentAllocation which allows multiple memory |
| 317 | // blocks within it and applies an all-or-nothing approach to the allocation. |
| 318 | // Thus, a request elsewhere for one of the _other_ blocks would make _this_ |
| 319 | // block available even though nothing has explicitly requested it. |
| 320 | // |
| 321 | // Note that it's not possible for the ctor to mount existing storage and |
| 322 | // move any single-sample to it because sometimes the persistent memory is |
| 323 | // read-only. Only non-const methods (which assume that memory is read/write) |
| 324 | // can do that. |
| 325 | if (single_sample().IsDisabled()) { |
| 326 | bool success = MountExistingCountsStorage(); |
| 327 | DCHECK(success); |
| 328 | } |
| 329 | } |
| 330 | |
Chris Watkins | bb7211c | 2017-11-29 07:16:38 | [diff] [blame] | 331 | PersistentSampleVector::~PersistentSampleVector() = default; |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 332 | |
| 333 | bool PersistentSampleVector::MountExistingCountsStorage() const { |
| 334 | // There is no early exit if counts is not yet mounted because, given that |
| 335 | // this is a virtual function, it's more efficient to do that at the call- |
| 336 | // site. There is no danger, however, should this get called anyway (perhaps |
| 337 | // because of a race condition) because at worst the |counts_| value would |
| 338 | // be over-written (in an atomic manner) with the exact same address. |
| 339 | |
| 340 | if (!persistent_counts_.reference()) |
| 341 | return false; // Nothing to mount. |
| 342 | |
| 343 | // Mount the counts array in position. |
| 344 | set_counts( |
| 345 | static_cast<HistogramBase::AtomicCount*>(persistent_counts_.Get())); |
Brian White | 101ec73a | 2017-08-01 17:09:36 | [diff] [blame] | 346 | |
| 347 | // The above shouldn't fail but can if the data is corrupt or incomplete. |
| 348 | return counts() != nullptr; |
bcwhite | fa8485b | 2017-05-01 16:43:25 | [diff] [blame] | 349 | } |
| 350 | |
| 351 | HistogramBase::AtomicCount* |
| 352 | PersistentSampleVector::CreateCountsStorageWhileLocked() { |
| 353 | void* mem = persistent_counts_.Get(); |
| 354 | if (!mem) { |
| 355 | // The above shouldn't fail but can if Bad Things(tm) are occurring in the |
| 356 | // persistent allocator. Crashing isn't a good option so instead just |
| 357 | // allocate something from the heap and return that. There will be no |
| 358 | // sharing or persistence but worse things are already happening. |
| 359 | return new HistogramBase::AtomicCount[counts_size()]; |
| 360 | } |
| 361 | |
| 362 | return static_cast<HistogramBase::AtomicCount*>(mem); |
| 363 | } |
| 364 | |
bcwhite | b036e432 | 2015-12-10 18:36:34 | [diff] [blame] | 365 | SampleVectorIterator::SampleVectorIterator( |
| 366 | const std::vector<HistogramBase::AtomicCount>* counts, |
| 367 | const BucketRanges* bucket_ranges) |
| 368 | : counts_(&(*counts)[0]), |
| 369 | counts_size_(counts->size()), |
[email protected] | 2f7d9cd | 2012-09-22 03:42:12 | [diff] [blame] | 370 | bucket_ranges_(bucket_ranges), |
| 371 | index_(0) { |
Brian White | 8f9bfb6 | 2017-08-02 19:21:59 | [diff] [blame] | 372 | DCHECK_GE(bucket_ranges_->bucket_count(), counts_size_); |
bcwhite | b036e432 | 2015-12-10 18:36:34 | [diff] [blame] | 373 | SkipEmptyBuckets(); |
| 374 | } |
| 375 | |
| 376 | SampleVectorIterator::SampleVectorIterator( |
| 377 | const HistogramBase::AtomicCount* counts, |
| 378 | size_t counts_size, |
| 379 | const BucketRanges* bucket_ranges) |
| 380 | : counts_(counts), |
| 381 | counts_size_(counts_size), |
| 382 | bucket_ranges_(bucket_ranges), |
| 383 | index_(0) { |
Brian White | 8f9bfb6 | 2017-08-02 19:21:59 | [diff] [blame] | 384 | DCHECK_GE(bucket_ranges_->bucket_count(), counts_size_); |
[email protected] | 2f7d9cd | 2012-09-22 03:42:12 | [diff] [blame] | 385 | SkipEmptyBuckets(); |
| 386 | } |
| 387 | |
Chris Watkins | bb7211c | 2017-11-29 07:16:38 | [diff] [blame] | 388 | SampleVectorIterator::~SampleVectorIterator() = default; |
[email protected] | b4af2ec | 2012-10-05 21:29:44 | [diff] [blame] | 389 | |
[email protected] | 2f7d9cd | 2012-09-22 03:42:12 | [diff] [blame] | 390 | bool SampleVectorIterator::Done() const { |
bcwhite | b036e432 | 2015-12-10 18:36:34 | [diff] [blame] | 391 | return index_ >= counts_size_; |
[email protected] | 2f7d9cd | 2012-09-22 03:42:12 | [diff] [blame] | 392 | } |
| 393 | |
| 394 | void SampleVectorIterator::Next() { |
| 395 | DCHECK(!Done()); |
| 396 | index_++; |
| 397 | SkipEmptyBuckets(); |
| 398 | } |
| 399 | |
| 400 | void SampleVectorIterator::Get(HistogramBase::Sample* min, |
asvitkine | 3f17b146 | 2017-05-03 21:37:37 | [diff] [blame] | 401 | int64_t* max, |
[email protected] | 2f7d9cd | 2012-09-22 03:42:12 | [diff] [blame] | 402 | HistogramBase::Count* count) const { |
| 403 | DCHECK(!Done()); |
Ivan Kotenkov | a16212a5 | 2017-11-08 12:37:33 | [diff] [blame] | 404 | if (min != nullptr) |
[email protected] | 2f7d9cd | 2012-09-22 03:42:12 | [diff] [blame] | 405 | *min = bucket_ranges_->range(index_); |
Ivan Kotenkov | a16212a5 | 2017-11-08 12:37:33 | [diff] [blame] | 406 | if (max != nullptr) |
asvitkine | 3f17b146 | 2017-05-03 21:37:37 | [diff] [blame] | 407 | *max = strict_cast<int64_t>(bucket_ranges_->range(index_ + 1)); |
Ivan Kotenkov | a16212a5 | 2017-11-08 12:37:33 | [diff] [blame] | 408 | if (count != nullptr) |
bcwhite | b036e432 | 2015-12-10 18:36:34 | [diff] [blame] | 409 | *count = subtle::NoBarrier_Load(&counts_[index_]); |
[email protected] | 2f7d9cd | 2012-09-22 03:42:12 | [diff] [blame] | 410 | } |
| 411 | |
| 412 | bool SampleVectorIterator::GetBucketIndex(size_t* index) const { |
| 413 | DCHECK(!Done()); |
Ivan Kotenkov | a16212a5 | 2017-11-08 12:37:33 | [diff] [blame] | 414 | if (index != nullptr) |
[email protected] | 2f7d9cd | 2012-09-22 03:42:12 | [diff] [blame] | 415 | *index = index_; |
| 416 | return true; |
| 417 | } |
| 418 | |
| 419 | void SampleVectorIterator::SkipEmptyBuckets() { |
| 420 | if (Done()) |
| 421 | return; |
| 422 | |
bcwhite | b036e432 | 2015-12-10 18:36:34 | [diff] [blame] | 423 | while (index_ < counts_size_) { |
| 424 | if (subtle::NoBarrier_Load(&counts_[index_]) != 0) |
[email protected] | 2f7d9cd | 2012-09-22 03:42:12 | [diff] [blame] | 425 | return; |
| 426 | index_++; |
| 427 | } |
| 428 | } |
| 429 | |
| 430 | } // namespace base |