1 // Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "net/disk_cache/stats.h"
6
7 #include "base/format_macros.h"
8 #include "base/logging.h"
9 #include "base/string_util.h"
10 #include "net/disk_cache/backend_impl.h"
11
12 namespace {
13
14 const int32 kDiskSignature = 0xF01427E0;
15
16 struct OnDiskStats {
17 int32 signature;
18 int size;
19 int data_sizes[disk_cache::Stats::kDataSizesLength];
20 int64 counters[disk_cache::Stats::MAX_COUNTER];
21 };
22
23 // Returns the "floor" (as opposed to "ceiling") of log base 2 of number.
LogBase2(int32 number)24 int LogBase2(int32 number) {
25 unsigned int value = static_cast<unsigned int>(number);
26 const unsigned int mask[] = {0x2, 0xC, 0xF0, 0xFF00, 0xFFFF0000};
27 const unsigned int s[] = {1, 2, 4, 8, 16};
28
29 unsigned int result = 0;
30 for (int i = 4; i >= 0; i--) {
31 if (value & mask[i]) {
32 value >>= s[i];
33 result |= s[i];
34 }
35 }
36 return static_cast<int>(result);
37 }
38
39 static const char* kCounterNames[] = {
40 "Open miss",
41 "Open hit",
42 "Create miss",
43 "Create hit",
44 "Resurrect hit",
45 "Create error",
46 "Trim entry",
47 "Doom entry",
48 "Doom cache",
49 "Invalid entry",
50 "Open entries",
51 "Max entries",
52 "Timer",
53 "Read data",
54 "Write data",
55 "Open rankings",
56 "Get rankings",
57 "Fatal error",
58 "Last report",
59 "Last report timer"
60 };
61 COMPILE_ASSERT(arraysize(kCounterNames) == disk_cache::Stats::MAX_COUNTER,
62 update_the_names);
63
64 } // namespace
65
66 namespace disk_cache {
67
LoadStats(BackendImpl * backend,Addr address,OnDiskStats * stats)68 bool LoadStats(BackendImpl* backend, Addr address, OnDiskStats* stats) {
69 MappedFile* file = backend->File(address);
70 if (!file)
71 return false;
72
73 size_t offset = address.start_block() * address.BlockSize() +
74 kBlockHeaderSize;
75 if (!file->Read(stats, sizeof(*stats), offset))
76 return false;
77
78 if (stats->signature != kDiskSignature)
79 return false;
80
81 // We don't want to discard the whole cache every time we have one extra
82 // counter; just reset them to zero.
83 if (stats->size != sizeof(*stats))
84 memset(stats, 0, sizeof(*stats));
85
86 return true;
87 }
88
StoreStats(BackendImpl * backend,Addr address,OnDiskStats * stats)89 bool StoreStats(BackendImpl* backend, Addr address, OnDiskStats* stats) {
90 MappedFile* file = backend->File(address);
91 if (!file)
92 return false;
93
94 size_t offset = address.start_block() * address.BlockSize() +
95 kBlockHeaderSize;
96 return file->Write(stats, sizeof(*stats), offset);
97 }
98
CreateStats(BackendImpl * backend,Addr * address,OnDiskStats * stats)99 bool CreateStats(BackendImpl* backend, Addr* address, OnDiskStats* stats) {
100 if (!backend->CreateBlock(BLOCK_256, 2, address))
101 return false;
102
103 // If we have more than 512 bytes of counters, change kDiskSignature so we
104 // don't overwrite something else (LoadStats must fail).
105 COMPILE_ASSERT(sizeof(*stats) <= 256 * 2, use_more_blocks);
106 memset(stats, 0, sizeof(*stats));
107 stats->signature = kDiskSignature;
108 stats->size = sizeof(*stats);
109
110 return StoreStats(backend, *address, stats);
111 }
112
Init(BackendImpl * backend,uint32 * storage_addr)113 bool Stats::Init(BackendImpl* backend, uint32* storage_addr) {
114 OnDiskStats stats;
115 Addr address(*storage_addr);
116 if (address.is_initialized()) {
117 if (!LoadStats(backend, address, &stats))
118 return false;
119 } else {
120 if (!CreateStats(backend, &address, &stats))
121 return false;
122 *storage_addr = address.value();
123 }
124
125 storage_addr_ = address.value();
126 backend_ = backend;
127
128 memcpy(data_sizes_, stats.data_sizes, sizeof(data_sizes_));
129 memcpy(counters_, stats.counters, sizeof(counters_));
130
131 // It seems impossible to support this histogram for more than one
132 // simultaneous objects with the current infrastructure.
133 static bool first_time = true;
134 if (first_time) {
135 first_time = false;
136 // ShouldReportAgain() will re-enter this object.
137 if (!size_histogram_.get() && backend->cache_type() == net::DISK_CACHE &&
138 backend->ShouldReportAgain()) {
139 // Stats may be reused when the cache is re-created, but we want only one
140 // histogram at any given time.
141 size_histogram_ =
142 StatsHistogram::StatsHistogramFactoryGet("DiskCache.SizeStats");
143 size_histogram_->Init(this);
144 }
145 }
146
147 return true;
148 }
149
~Stats()150 Stats::~Stats() {
151 Store();
152 }
153
154 // The array will be filled this way:
155 // index size
156 // 0 [0, 1024)
157 // 1 [1024, 2048)
158 // 2 [2048, 4096)
159 // 3 [4K, 6K)
160 // ...
161 // 10 [18K, 20K)
162 // 11 [20K, 24K)
163 // 12 [24k, 28K)
164 // ...
165 // 15 [36k, 40K)
166 // 16 [40k, 64K)
167 // 17 [64K, 128K)
168 // 18 [128K, 256K)
169 // ...
170 // 23 [4M, 8M)
171 // 24 [8M, 16M)
172 // 25 [16M, 32M)
173 // 26 [32M, 64M)
174 // 27 [64M, ...)
GetStatsBucket(int32 size)175 int Stats::GetStatsBucket(int32 size) {
176 if (size < 1024)
177 return 0;
178
179 // 10 slots more, until 20K.
180 if (size < 20 * 1024)
181 return size / 2048 + 1;
182
183 // 5 slots more, from 20K to 40K.
184 if (size < 40 * 1024)
185 return (size - 20 * 1024) / 4096 + 11;
186
187 // From this point on, use a logarithmic scale.
188 int result = LogBase2(size) + 1;
189
190 COMPILE_ASSERT(kDataSizesLength > 16, update_the_scale);
191 if (result >= kDataSizesLength)
192 result = kDataSizesLength - 1;
193
194 return result;
195 }
196
GetBucketRange(size_t i) const197 int Stats::GetBucketRange(size_t i) const {
198 if (i < 2)
199 return static_cast<int>(1024 * i);
200
201 if (i < 12)
202 return static_cast<int>(2048 * (i - 1));
203
204 if (i < 17)
205 return static_cast<int>(4096 * (i - 11)) + 20 * 1024;
206
207 int n = 64 * 1024;
208 if (i > static_cast<size_t>(kDataSizesLength)) {
209 NOTREACHED();
210 i = kDataSizesLength;
211 }
212
213 i -= 17;
214 n <<= i;
215 return n;
216 }
217
Snapshot(StatsHistogram::StatsSamples * samples) const218 void Stats::Snapshot(StatsHistogram::StatsSamples* samples) const {
219 samples->GetCounts()->resize(kDataSizesLength);
220 for (int i = 0; i < kDataSizesLength; i++) {
221 int count = data_sizes_[i];
222 if (count < 0)
223 count = 0;
224 samples->GetCounts()->at(i) = count;
225 }
226 }
227
ModifyStorageStats(int32 old_size,int32 new_size)228 void Stats::ModifyStorageStats(int32 old_size, int32 new_size) {
229 // We keep a counter of the data block size on an array where each entry is
230 // the adjusted log base 2 of the size. The first entry counts blocks of 256
231 // bytes, the second blocks up to 512 bytes, etc. With 20 entries, the last
232 // one stores entries of more than 64 MB
233 int new_index = GetStatsBucket(new_size);
234 int old_index = GetStatsBucket(old_size);
235
236 if (new_size)
237 data_sizes_[new_index]++;
238
239 if (old_size)
240 data_sizes_[old_index]--;
241 }
242
OnEvent(Counters an_event)243 void Stats::OnEvent(Counters an_event) {
244 DCHECK(an_event > MIN_COUNTER || an_event < MAX_COUNTER);
245 counters_[an_event]++;
246 }
247
SetCounter(Counters counter,int64 value)248 void Stats::SetCounter(Counters counter, int64 value) {
249 DCHECK(counter > MIN_COUNTER || counter < MAX_COUNTER);
250 counters_[counter] = value;
251 }
252
GetCounter(Counters counter) const253 int64 Stats::GetCounter(Counters counter) const {
254 DCHECK(counter > MIN_COUNTER || counter < MAX_COUNTER);
255 return counters_[counter];
256 }
257
GetItems(StatsItems * items)258 void Stats::GetItems(StatsItems* items) {
259 std::pair<std::string, std::string> item;
260 for (int i = 0; i < kDataSizesLength; i++) {
261 item.first = StringPrintf("Size%02d", i);
262 item.second = StringPrintf("0x%08x", data_sizes_[i]);
263 items->push_back(item);
264 }
265
266 for (int i = MIN_COUNTER + 1; i < MAX_COUNTER; i++) {
267 item.first = kCounterNames[i];
268 item.second = StringPrintf("0x%" PRIx64, counters_[i]);
269 items->push_back(item);
270 }
271 }
272
GetHitRatio() const273 int Stats::GetHitRatio() const {
274 return GetRatio(OPEN_HIT, OPEN_MISS);
275 }
276
GetResurrectRatio() const277 int Stats::GetResurrectRatio() const {
278 return GetRatio(RESURRECT_HIT, CREATE_HIT);
279 }
280
GetRatio(Counters hit,Counters miss) const281 int Stats::GetRatio(Counters hit, Counters miss) const {
282 int64 ratio = GetCounter(hit) * 100;
283 if (!ratio)
284 return 0;
285
286 ratio /= (GetCounter(hit) + GetCounter(miss));
287 return static_cast<int>(ratio);
288 }
289
ResetRatios()290 void Stats::ResetRatios() {
291 SetCounter(OPEN_HIT, 0);
292 SetCounter(OPEN_MISS, 0);
293 SetCounter(RESURRECT_HIT, 0);
294 SetCounter(CREATE_HIT, 0);
295 }
296
GetLargeEntriesSize()297 int Stats::GetLargeEntriesSize() {
298 int total = 0;
299 // data_sizes_[20] stores values between 512 KB and 1 MB (see comment before
300 // GetStatsBucket()).
301 for (int bucket = 20; bucket < kDataSizesLength; bucket++)
302 total += data_sizes_[bucket] * GetBucketRange(bucket);
303
304 return total;
305 }
306
Store()307 void Stats::Store() {
308 if (!backend_)
309 return;
310
311 OnDiskStats stats;
312 stats.signature = kDiskSignature;
313 stats.size = sizeof(stats);
314 memcpy(stats.data_sizes, data_sizes_, sizeof(data_sizes_));
315 memcpy(stats.counters, counters_, sizeof(counters_));
316
317 Addr address(storage_addr_);
318 StoreStats(backend_, address, &stats);
319 }
320
321 } // namespace disk_cache
322