1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "net/disk_cache/blockfile/backend_impl_v3.h"
6
7 #include "base/bind.h"
8 #include "base/bind_helpers.h"
9 #include "base/file_util.h"
10 #include "base/files/file_path.h"
11 #include "base/hash.h"
12 #include "base/message_loop/message_loop.h"
13 #include "base/metrics/field_trial.h"
14 #include "base/metrics/histogram.h"
15 #include "base/metrics/stats_counters.h"
16 #include "base/rand_util.h"
17 #include "base/strings/string_util.h"
18 #include "base/strings/stringprintf.h"
19 #include "base/sys_info.h"
20 #include "base/threading/thread_restrictions.h"
21 #include "base/time/time.h"
22 #include "base/timer/timer.h"
23 #include "net/base/net_errors.h"
24 #include "net/disk_cache/blockfile/disk_format_v3.h"
25 #include "net/disk_cache/blockfile/entry_impl_v3.h"
26 #include "net/disk_cache/blockfile/errors.h"
27 #include "net/disk_cache/blockfile/experiments.h"
28 #include "net/disk_cache/blockfile/file.h"
29 #include "net/disk_cache/blockfile/histogram_macros_v3.h"
30 #include "net/disk_cache/blockfile/index_table_v3.h"
31 #include "net/disk_cache/blockfile/storage_block-inl.h"
32 #include "net/disk_cache/cache_util.h"
33
34 // Provide a BackendImpl object to macros from histogram_macros.h.
35 #define CACHE_UMA_BACKEND_IMPL_OBJ this
36
37 using base::Time;
38 using base::TimeDelta;
39 using base::TimeTicks;
40
41 namespace {
42
43 #if defined(V3_NOT_JUST_YET_READY)
44 const int kDefaultCacheSize = 80 * 1024 * 1024;
45
46 // Avoid trimming the cache for the first 5 minutes (10 timer ticks).
47 const int kTrimDelay = 10;
48 #endif // defined(V3_NOT_JUST_YET_READY).
49
50 } // namespace
51
52 // ------------------------------------------------------------------------
53
54 namespace disk_cache {
55
BackendImplV3(const base::FilePath & path,base::MessageLoopProxy * cache_thread,net::NetLog * net_log)56 BackendImplV3::BackendImplV3(const base::FilePath& path,
57 base::MessageLoopProxy* cache_thread,
58 net::NetLog* net_log)
59 : index_(NULL),
60 path_(path),
61 block_files_(),
62 max_size_(0),
63 up_ticks_(0),
64 cache_type_(net::DISK_CACHE),
65 uma_report_(0),
66 user_flags_(0),
67 init_(false),
68 restarted_(false),
69 read_only_(false),
70 disabled_(false),
71 lru_eviction_(true),
72 first_timer_(true),
73 user_load_(false),
74 net_log_(net_log),
75 ptr_factory_(this) {
76 }
77
~BackendImplV3()78 BackendImplV3::~BackendImplV3() {
79 CleanupCache();
80 }
81
Init(const CompletionCallback & callback)82 int BackendImplV3::Init(const CompletionCallback& callback) {
83 DCHECK(!init_);
84 if (init_)
85 return net::ERR_FAILED;
86
87 return net::ERR_IO_PENDING;
88 }
89
90 // ------------------------------------------------------------------------
91
92 #if defined(V3_NOT_JUST_YET_READY)
OpenPrevEntry(void ** iter,Entry ** prev_entry,const CompletionCallback & callback)93 int BackendImplV3::OpenPrevEntry(void** iter, Entry** prev_entry,
94 const CompletionCallback& callback) {
95 DCHECK(!callback.is_null());
96 return OpenFollowingEntry(true, iter, prev_entry, callback);
97 }
98 #endif // defined(V3_NOT_JUST_YET_READY).
99
SetMaxSize(int max_bytes)100 bool BackendImplV3::SetMaxSize(int max_bytes) {
101 COMPILE_ASSERT(sizeof(max_bytes) == sizeof(max_size_), unsupported_int_model);
102 if (max_bytes < 0)
103 return false;
104
105 // Zero size means use the default.
106 if (!max_bytes)
107 return true;
108
109 // Avoid a DCHECK later on.
110 if (max_bytes >= kint32max - kint32max / 10)
111 max_bytes = kint32max - kint32max / 10 - 1;
112
113 user_flags_ |= MAX_SIZE;
114 max_size_ = max_bytes;
115 return true;
116 }
117
SetType(net::CacheType type)118 void BackendImplV3::SetType(net::CacheType type) {
119 DCHECK_NE(net::MEMORY_CACHE, type);
120 cache_type_ = type;
121 }
122
CreateBlock(FileType block_type,int block_count,Addr * block_address)123 bool BackendImplV3::CreateBlock(FileType block_type, int block_count,
124 Addr* block_address) {
125 return block_files_.CreateBlock(block_type, block_count, block_address);
126 }
127
128 #if defined(V3_NOT_JUST_YET_READY)
UpdateRank(EntryImplV3 * entry,bool modified)129 void BackendImplV3::UpdateRank(EntryImplV3* entry, bool modified) {
130 if (read_only_ || (!modified && cache_type() == net::SHADER_CACHE))
131 return;
132 eviction_.UpdateRank(entry, modified);
133 }
134
InternalDoomEntry(EntryImplV3 * entry)135 void BackendImplV3::InternalDoomEntry(EntryImplV3* entry) {
136 uint32 hash = entry->GetHash();
137 std::string key = entry->GetKey();
138 Addr entry_addr = entry->entry()->address();
139 bool error;
140 EntryImpl* parent_entry = MatchEntry(key, hash, true, entry_addr, &error);
141 CacheAddr child(entry->GetNextAddress());
142
143 Trace("Doom entry 0x%p", entry);
144
145 if (!entry->doomed()) {
146 // We may have doomed this entry from within MatchEntry.
147 eviction_.OnDoomEntry(entry);
148 entry->InternalDoom();
149 if (!new_eviction_) {
150 DecreaseNumEntries();
151 }
152 stats_.OnEvent(Stats::DOOM_ENTRY);
153 }
154
155 if (parent_entry) {
156 parent_entry->SetNextAddress(Addr(child));
157 parent_entry->Release();
158 } else if (!error) {
159 data_->table[hash & mask_] = child;
160 }
161
162 FlushIndex();
163 }
164
OnEntryDestroyBegin(Addr address)165 void BackendImplV3::OnEntryDestroyBegin(Addr address) {
166 EntriesMap::iterator it = open_entries_.find(address.value());
167 if (it != open_entries_.end())
168 open_entries_.erase(it);
169 }
170
OnEntryDestroyEnd()171 void BackendImplV3::OnEntryDestroyEnd() {
172 DecreaseNumRefs();
173 if (data_->header.num_bytes > max_size_ && !read_only_ &&
174 (up_ticks_ > kTrimDelay || user_flags_ & kNoRandom))
175 eviction_.TrimCache(false);
176 }
177
GetOpenEntry(Addr address) const178 EntryImplV3* BackendImplV3::GetOpenEntry(Addr address) const {
179 DCHECK(rankings->HasData());
180 EntriesMap::const_iterator it =
181 open_entries_.find(rankings->Data()->contents);
182 if (it != open_entries_.end()) {
183 // We have this entry in memory.
184 return it->second;
185 }
186
187 return NULL;
188 }
189
MaxFileSize() const190 int BackendImplV3::MaxFileSize() const {
191 return max_size_ / 8;
192 }
193
ModifyStorageSize(int32 old_size,int32 new_size)194 void BackendImplV3::ModifyStorageSize(int32 old_size, int32 new_size) {
195 if (disabled_ || old_size == new_size)
196 return;
197 if (old_size > new_size)
198 SubstractStorageSize(old_size - new_size);
199 else
200 AddStorageSize(new_size - old_size);
201
202 // Update the usage statistics.
203 stats_.ModifyStorageStats(old_size, new_size);
204 }
205
TooMuchStorageRequested(int32 size)206 void BackendImplV3::TooMuchStorageRequested(int32 size) {
207 stats_.ModifyStorageStats(0, size);
208 }
209
IsAllocAllowed(int current_size,int new_size)210 bool BackendImplV3::IsAllocAllowed(int current_size, int new_size) {
211 DCHECK_GT(new_size, current_size);
212 if (user_flags_ & NO_BUFFERING)
213 return false;
214
215 int to_add = new_size - current_size;
216 if (buffer_bytes_ + to_add > MaxBuffersSize())
217 return false;
218
219 buffer_bytes_ += to_add;
220 CACHE_UMA(COUNTS_50000, "BufferBytes", buffer_bytes_ / 1024);
221 return true;
222 }
223 #endif // defined(V3_NOT_JUST_YET_READY).
224
BufferDeleted(int size)225 void BackendImplV3::BufferDeleted(int size) {
226 DCHECK_GE(size, 0);
227 buffer_bytes_ -= size;
228 DCHECK_GE(buffer_bytes_, 0);
229 }
230
IsLoaded() const231 bool BackendImplV3::IsLoaded() const {
232 if (user_flags_ & NO_LOAD_PROTECTION)
233 return false;
234
235 return user_load_;
236 }
237
HistogramName(const char * name) const238 std::string BackendImplV3::HistogramName(const char* name) const {
239 static const char* names[] = { "Http", "", "Media", "AppCache", "Shader" };
240 DCHECK_NE(cache_type_, net::MEMORY_CACHE);
241 return base::StringPrintf("DiskCache3.%s_%s", name, names[cache_type_]);
242 }
243
GetWeakPtr()244 base::WeakPtr<BackendImplV3> BackendImplV3::GetWeakPtr() {
245 return ptr_factory_.GetWeakPtr();
246 }
247
248 #if defined(V3_NOT_JUST_YET_READY)
249 // We want to remove biases from some histograms so we only send data once per
250 // week.
ShouldReportAgain()251 bool BackendImplV3::ShouldReportAgain() {
252 if (uma_report_)
253 return uma_report_ == 2;
254
255 uma_report_++;
256 int64 last_report = stats_.GetCounter(Stats::LAST_REPORT);
257 Time last_time = Time::FromInternalValue(last_report);
258 if (!last_report || (Time::Now() - last_time).InDays() >= 7) {
259 stats_.SetCounter(Stats::LAST_REPORT, Time::Now().ToInternalValue());
260 uma_report_++;
261 return true;
262 }
263 return false;
264 }
265
FirstEviction()266 void BackendImplV3::FirstEviction() {
267 IndexHeaderV3* header = index_.header();
268 header->flags |= CACHE_EVICTED;
269 DCHECK(header->create_time);
270 if (!GetEntryCount())
271 return; // This is just for unit tests.
272
273 Time create_time = Time::FromInternalValue(header->create_time);
274 CACHE_UMA(AGE, "FillupAge", create_time);
275
276 int64 use_time = stats_.GetCounter(Stats::TIMER);
277 CACHE_UMA(HOURS, "FillupTime", static_cast<int>(use_time / 120));
278 CACHE_UMA(PERCENTAGE, "FirstHitRatio", stats_.GetHitRatio());
279
280 if (!use_time)
281 use_time = 1;
282 CACHE_UMA(COUNTS_10000, "FirstEntryAccessRate",
283 static_cast<int>(header->num_entries / use_time));
284 CACHE_UMA(COUNTS, "FirstByteIORate",
285 static_cast<int>((header->num_bytes / 1024) / use_time));
286
287 int avg_size = header->num_bytes / GetEntryCount();
288 CACHE_UMA(COUNTS, "FirstEntrySize", avg_size);
289
290 int large_entries_bytes = stats_.GetLargeEntriesSize();
291 int large_ratio = large_entries_bytes * 100 / header->num_bytes;
292 CACHE_UMA(PERCENTAGE, "FirstLargeEntriesRatio", large_ratio);
293
294 if (!lru_eviction_) {
295 CACHE_UMA(PERCENTAGE, "FirstResurrectRatio", stats_.GetResurrectRatio());
296 CACHE_UMA(PERCENTAGE, "FirstNoUseRatio",
297 header->num_no_use_entries * 100 / header->num_entries);
298 CACHE_UMA(PERCENTAGE, "FirstLowUseRatio",
299 header->num_low_use_entries * 100 / header->num_entries);
300 CACHE_UMA(PERCENTAGE, "FirstHighUseRatio",
301 header->num_high_use_entries * 100 / header->num_entries);
302 }
303
304 stats_.ResetRatios();
305 }
306
OnEvent(Stats::Counters an_event)307 void BackendImplV3::OnEvent(Stats::Counters an_event) {
308 stats_.OnEvent(an_event);
309 }
310
OnRead(int32 bytes)311 void BackendImplV3::OnRead(int32 bytes) {
312 DCHECK_GE(bytes, 0);
313 byte_count_ += bytes;
314 if (byte_count_ < 0)
315 byte_count_ = kint32max;
316 }
317
OnWrite(int32 bytes)318 void BackendImplV3::OnWrite(int32 bytes) {
319 // We use the same implementation as OnRead... just log the number of bytes.
320 OnRead(bytes);
321 }
322
OnTimerTick()323 void BackendImplV3::OnTimerTick() {
324 stats_.OnEvent(Stats::TIMER);
325 int64 time = stats_.GetCounter(Stats::TIMER);
326 int64 current = stats_.GetCounter(Stats::OPEN_ENTRIES);
327
328 // OPEN_ENTRIES is a sampled average of the number of open entries, avoiding
329 // the bias towards 0.
330 if (num_refs_ && (current != num_refs_)) {
331 int64 diff = (num_refs_ - current) / 50;
332 if (!diff)
333 diff = num_refs_ > current ? 1 : -1;
334 current = current + diff;
335 stats_.SetCounter(Stats::OPEN_ENTRIES, current);
336 stats_.SetCounter(Stats::MAX_ENTRIES, max_refs_);
337 }
338
339 CACHE_UMA(COUNTS, "NumberOfReferences", num_refs_);
340
341 CACHE_UMA(COUNTS_10000, "EntryAccessRate", entry_count_);
342 CACHE_UMA(COUNTS, "ByteIORate", byte_count_ / 1024);
343
344 // These values cover about 99.5% of the population (Oct 2011).
345 user_load_ = (entry_count_ > 300 || byte_count_ > 7 * 1024 * 1024);
346 entry_count_ = 0;
347 byte_count_ = 0;
348 up_ticks_++;
349
350 if (!data_)
351 first_timer_ = false;
352 if (first_timer_) {
353 first_timer_ = false;
354 if (ShouldReportAgain())
355 ReportStats();
356 }
357
358 // Save stats to disk at 5 min intervals.
359 if (time % 10 == 0)
360 StoreStats();
361 }
362
SetUnitTestMode()363 void BackendImplV3::SetUnitTestMode() {
364 user_flags_ |= UNIT_TEST_MODE;
365 }
366
SetUpgradeMode()367 void BackendImplV3::SetUpgradeMode() {
368 user_flags_ |= UPGRADE_MODE;
369 read_only_ = true;
370 }
371
SetNewEviction()372 void BackendImplV3::SetNewEviction() {
373 user_flags_ |= EVICTION_V2;
374 lru_eviction_ = false;
375 }
376
SetFlags(uint32 flags)377 void BackendImplV3::SetFlags(uint32 flags) {
378 user_flags_ |= flags;
379 }
380
FlushQueueForTest(const CompletionCallback & callback)381 int BackendImplV3::FlushQueueForTest(const CompletionCallback& callback) {
382 background_queue_.FlushQueue(callback);
383 return net::ERR_IO_PENDING;
384 }
385
TrimForTest(bool empty)386 void BackendImplV3::TrimForTest(bool empty) {
387 eviction_.SetTestMode();
388 eviction_.TrimCache(empty);
389 }
390
TrimDeletedListForTest(bool empty)391 void BackendImplV3::TrimDeletedListForTest(bool empty) {
392 eviction_.SetTestMode();
393 eviction_.TrimDeletedList(empty);
394 }
395
SelfCheck()396 int BackendImplV3::SelfCheck() {
397 if (!init_) {
398 LOG(ERROR) << "Init failed";
399 return ERR_INIT_FAILED;
400 }
401
402 int num_entries = rankings_.SelfCheck();
403 if (num_entries < 0) {
404 LOG(ERROR) << "Invalid rankings list, error " << num_entries;
405 #if !defined(NET_BUILD_STRESS_CACHE)
406 return num_entries;
407 #endif
408 }
409
410 if (num_entries != data_->header.num_entries) {
411 LOG(ERROR) << "Number of entries mismatch";
412 #if !defined(NET_BUILD_STRESS_CACHE)
413 return ERR_NUM_ENTRIES_MISMATCH;
414 #endif
415 }
416
417 return CheckAllEntries();
418 }
419
420 // ------------------------------------------------------------------------
421
GetCacheType() const422 net::CacheType BackendImplV3::GetCacheType() const {
423 return cache_type_;
424 }
425
GetEntryCount() const426 int32 BackendImplV3::GetEntryCount() const {
427 if (disabled_)
428 return 0;
429 DCHECK(init_);
430 return index_.header()->num_entries;
431 }
432
OpenEntry(const std::string & key,Entry ** entry,const CompletionCallback & callback)433 int BackendImplV3::OpenEntry(const std::string& key, Entry** entry,
434 const CompletionCallback& callback) {
435 if (disabled_)
436 return NULL;
437
438 TimeTicks start = TimeTicks::Now();
439 uint32 hash = base::Hash(key);
440 Trace("Open hash 0x%x", hash);
441
442 bool error;
443 EntryImpl* cache_entry = MatchEntry(key, hash, false, Addr(), &error);
444 if (cache_entry && ENTRY_NORMAL != cache_entry->entry()->Data()->state) {
445 // The entry was already evicted.
446 cache_entry->Release();
447 cache_entry = NULL;
448 }
449
450 int current_size = data_->header.num_bytes / (1024 * 1024);
451 int64 total_hours = stats_.GetCounter(Stats::TIMER) / 120;
452 int64 no_use_hours = stats_.GetCounter(Stats::LAST_REPORT_TIMER) / 120;
453 int64 use_hours = total_hours - no_use_hours;
454
455 if (!cache_entry) {
456 CACHE_UMA(AGE_MS, "OpenTime.Miss", 0, start);
457 CACHE_UMA(COUNTS_10000, "AllOpenBySize.Miss", 0, current_size);
458 CACHE_UMA(HOURS, "AllOpenByTotalHours.Miss", 0, total_hours);
459 CACHE_UMA(HOURS, "AllOpenByUseHours.Miss", 0, use_hours);
460 stats_.OnEvent(Stats::OPEN_MISS);
461 return NULL;
462 }
463
464 eviction_.OnOpenEntry(cache_entry);
465 entry_count_++;
466
467 Trace("Open hash 0x%x end: 0x%x", hash,
468 cache_entry->entry()->address().value());
469 CACHE_UMA(AGE_MS, "OpenTime", 0, start);
470 CACHE_UMA(COUNTS_10000, "AllOpenBySize.Hit", 0, current_size);
471 CACHE_UMA(HOURS, "AllOpenByTotalHours.Hit", 0, total_hours);
472 CACHE_UMA(HOURS, "AllOpenByUseHours.Hit", 0, use_hours);
473 stats_.OnEvent(Stats::OPEN_HIT);
474 SIMPLE_STATS_COUNTER("disk_cache.hit");
475 return cache_entry;
476 }
477
CreateEntry(const std::string & key,Entry ** entry,const CompletionCallback & callback)478 int BackendImplV3::CreateEntry(const std::string& key, Entry** entry,
479 const CompletionCallback& callback) {
480 if (disabled_ || key.empty())
481 return NULL;
482
483 TimeTicks start = TimeTicks::Now();
484 Trace("Create hash 0x%x", hash);
485
486 scoped_refptr<EntryImpl> parent;
487 Addr entry_address(data_->table[hash & mask_]);
488 if (entry_address.is_initialized()) {
489 // We have an entry already. It could be the one we are looking for, or just
490 // a hash conflict.
491 bool error;
492 EntryImpl* old_entry = MatchEntry(key, hash, false, Addr(), &error);
493 if (old_entry)
494 return ResurrectEntry(old_entry);
495
496 EntryImpl* parent_entry = MatchEntry(key, hash, true, Addr(), &error);
497 DCHECK(!error);
498 if (parent_entry) {
499 parent.swap(&parent_entry);
500 } else if (data_->table[hash & mask_]) {
501 // We should have corrected the problem.
502 NOTREACHED();
503 return NULL;
504 }
505 }
506
507 // The general flow is to allocate disk space and initialize the entry data,
508 // followed by saving that to disk, then linking the entry though the index
509 // and finally through the lists. If there is a crash in this process, we may
510 // end up with:
511 // a. Used, unreferenced empty blocks on disk (basically just garbage).
512 // b. Used, unreferenced but meaningful data on disk (more garbage).
513 // c. A fully formed entry, reachable only through the index.
514 // d. A fully formed entry, also reachable through the lists, but still dirty.
515 //
516 // Anything after (b) can be automatically cleaned up. We may consider saving
517 // the current operation (as we do while manipulating the lists) so that we
518 // can detect and cleanup (a) and (b).
519
520 int num_blocks = EntryImpl::NumBlocksForEntry(key.size());
521 if (!block_files_.CreateBlock(BLOCK_256, num_blocks, &entry_address)) {
522 LOG(ERROR) << "Create entry failed " << key.c_str();
523 stats_.OnEvent(Stats::CREATE_ERROR);
524 return NULL;
525 }
526
527 Addr node_address(0);
528 if (!block_files_.CreateBlock(RANKINGS, 1, &node_address)) {
529 block_files_.DeleteBlock(entry_address, false);
530 LOG(ERROR) << "Create entry failed " << key.c_str();
531 stats_.OnEvent(Stats::CREATE_ERROR);
532 return NULL;
533 }
534
535 scoped_refptr<EntryImpl> cache_entry(
536 new EntryImpl(this, entry_address, false));
537 IncreaseNumRefs();
538
539 if (!cache_entry->CreateEntry(node_address, key, hash)) {
540 block_files_.DeleteBlock(entry_address, false);
541 block_files_.DeleteBlock(node_address, false);
542 LOG(ERROR) << "Create entry failed " << key.c_str();
543 stats_.OnEvent(Stats::CREATE_ERROR);
544 return NULL;
545 }
546
547 cache_entry->BeginLogging(net_log_, true);
548
549 // We are not failing the operation; let's add this to the map.
550 open_entries_[entry_address.value()] = cache_entry.get();
551
552 // Save the entry.
553 cache_entry->entry()->Store();
554 cache_entry->rankings()->Store();
555 IncreaseNumEntries();
556 entry_count_++;
557
558 // Link this entry through the index.
559 if (parent.get()) {
560 parent->SetNextAddress(entry_address);
561 } else {
562 data_->table[hash & mask_] = entry_address.value();
563 }
564
565 // Link this entry through the lists.
566 eviction_.OnCreateEntry(cache_entry.get());
567
568 CACHE_UMA(AGE_MS, "CreateTime", 0, start);
569 stats_.OnEvent(Stats::CREATE_HIT);
570 SIMPLE_STATS_COUNTER("disk_cache.miss");
571 Trace("create entry hit ");
572 FlushIndex();
573 cache_entry->AddRef();
574 return cache_entry.get();
575 }
576
DoomEntry(const std::string & key,const CompletionCallback & callback)577 int BackendImplV3::DoomEntry(const std::string& key,
578 const CompletionCallback& callback) {
579 if (disabled_)
580 return net::ERR_FAILED;
581
582 EntryImpl* entry = OpenEntryImpl(key);
583 if (!entry)
584 return net::ERR_FAILED;
585
586 entry->DoomImpl();
587 entry->Release();
588 return net::OK;
589 }
590
DoomAllEntries(const CompletionCallback & callback)591 int BackendImplV3::DoomAllEntries(const CompletionCallback& callback) {
592 // This is not really an error, but it is an interesting condition.
593 ReportError(ERR_CACHE_DOOMED);
594 stats_.OnEvent(Stats::DOOM_CACHE);
595 if (!num_refs_) {
596 RestartCache(false);
597 return disabled_ ? net::ERR_FAILED : net::OK;
598 } else {
599 if (disabled_)
600 return net::ERR_FAILED;
601
602 eviction_.TrimCache(true);
603 return net::OK;
604 }
605 }
606
DoomEntriesBetween(base::Time initial_time,base::Time end_time,const CompletionCallback & callback)607 int BackendImplV3::DoomEntriesBetween(base::Time initial_time,
608 base::Time end_time,
609 const CompletionCallback& callback) {
610 DCHECK_NE(net::APP_CACHE, cache_type_);
611 if (end_time.is_null())
612 return SyncDoomEntriesSince(initial_time);
613
614 DCHECK(end_time >= initial_time);
615
616 if (disabled_)
617 return net::ERR_FAILED;
618
619 EntryImpl* node;
620 void* iter = NULL;
621 EntryImpl* next = OpenNextEntryImpl(&iter);
622 if (!next)
623 return net::OK;
624
625 while (next) {
626 node = next;
627 next = OpenNextEntryImpl(&iter);
628
629 if (node->GetLastUsed() >= initial_time &&
630 node->GetLastUsed() < end_time) {
631 node->DoomImpl();
632 } else if (node->GetLastUsed() < initial_time) {
633 if (next)
634 next->Release();
635 next = NULL;
636 SyncEndEnumeration(iter);
637 }
638
639 node->Release();
640 }
641
642 return net::OK;
643 }
644
DoomEntriesSince(base::Time initial_time,const CompletionCallback & callback)645 int BackendImplV3::DoomEntriesSince(base::Time initial_time,
646 const CompletionCallback& callback) {
647 DCHECK_NE(net::APP_CACHE, cache_type_);
648 if (disabled_)
649 return net::ERR_FAILED;
650
651 stats_.OnEvent(Stats::DOOM_RECENT);
652 for (;;) {
653 void* iter = NULL;
654 EntryImpl* entry = OpenNextEntryImpl(&iter);
655 if (!entry)
656 return net::OK;
657
658 if (initial_time > entry->GetLastUsed()) {
659 entry->Release();
660 SyncEndEnumeration(iter);
661 return net::OK;
662 }
663
664 entry->DoomImpl();
665 entry->Release();
666 SyncEndEnumeration(iter); // Dooming the entry invalidates the iterator.
667 }
668 }
669
OpenNextEntry(void ** iter,Entry ** next_entry,const CompletionCallback & callback)670 int BackendImplV3::OpenNextEntry(void** iter, Entry** next_entry,
671 const CompletionCallback& callback) {
672 DCHECK(!callback.is_null());
673 background_queue_.OpenNextEntry(iter, next_entry, callback);
674 return net::ERR_IO_PENDING;
675 }
676
EndEnumeration(void ** iter)677 void BackendImplV3::EndEnumeration(void** iter) {
678 scoped_ptr<IndexIterator> iterator(
679 reinterpret_cast<IndexIterator*>(*iter));
680 *iter = NULL;
681 }
682
GetStats(StatsItems * stats)683 void BackendImplV3::GetStats(StatsItems* stats) {
684 if (disabled_)
685 return;
686
687 std::pair<std::string, std::string> item;
688
689 item.first = "Entries";
690 item.second = base::StringPrintf("%d", data_->header.num_entries);
691 stats->push_back(item);
692
693 item.first = "Pending IO";
694 item.second = base::StringPrintf("%d", num_pending_io_);
695 stats->push_back(item);
696
697 item.first = "Max size";
698 item.second = base::StringPrintf("%d", max_size_);
699 stats->push_back(item);
700
701 item.first = "Current size";
702 item.second = base::StringPrintf("%d", data_->header.num_bytes);
703 stats->push_back(item);
704
705 item.first = "Cache type";
706 item.second = "Blockfile Cache";
707 stats->push_back(item);
708
709 stats_.GetItems(stats);
710 }
711
OnExternalCacheHit(const std::string & key)712 void BackendImplV3::OnExternalCacheHit(const std::string& key) {
713 if (disabled_)
714 return;
715
716 uint32 hash = base::Hash(key);
717 bool error;
718 EntryImpl* cache_entry = MatchEntry(key, hash, false, Addr(), &error);
719 if (cache_entry) {
720 if (ENTRY_NORMAL == cache_entry->entry()->Data()->state) {
721 UpdateRank(cache_entry, cache_type() == net::SHADER_CACHE);
722 }
723 cache_entry->Release();
724 }
725 }
726
727 // ------------------------------------------------------------------------
728
729 // The maximum cache size will be either set explicitly by the caller, or
730 // calculated by this code.
AdjustMaxCacheSize(int table_len)731 void BackendImplV3::AdjustMaxCacheSize(int table_len) {
732 if (max_size_)
733 return;
734
735 // If table_len is provided, the index file exists.
736 DCHECK(!table_len || data_->header.magic);
737
738 // The user is not setting the size, let's figure it out.
739 int64 available = base::SysInfo::AmountOfFreeDiskSpace(path_);
740 if (available < 0) {
741 max_size_ = kDefaultCacheSize;
742 return;
743 }
744
745 if (table_len)
746 available += data_->header.num_bytes;
747
748 max_size_ = PreferedCacheSize(available);
749
750 // Let's not use more than the default size while we tune-up the performance
751 // of bigger caches. TODO(rvargas): remove this limit.
752 if (max_size_ > kDefaultCacheSize * 4)
753 max_size_ = kDefaultCacheSize * 4;
754
755 if (!table_len)
756 return;
757
758 // If we already have a table, adjust the size to it.
759 int current_max_size = MaxStorageSizeForTable(table_len);
760 if (max_size_ > current_max_size)
761 max_size_= current_max_size;
762 }
763
InitStats()764 bool BackendImplV3::InitStats() {
765 Addr address(data_->header.stats);
766 int size = stats_.StorageSize();
767
768 if (!address.is_initialized()) {
769 FileType file_type = Addr::RequiredFileType(size);
770 DCHECK_NE(file_type, EXTERNAL);
771 int num_blocks = Addr::RequiredBlocks(size, file_type);
772
773 if (!CreateBlock(file_type, num_blocks, &address))
774 return false;
775 return stats_.Init(NULL, 0, address);
776 }
777
778 if (!address.is_block_file()) {
779 NOTREACHED();
780 return false;
781 }
782
783 // Load the required data.
784 size = address.num_blocks() * address.BlockSize();
785 MappedFile* file = File(address);
786 if (!file)
787 return false;
788
789 scoped_ptr<char[]> data(new char[size]);
790 size_t offset = address.start_block() * address.BlockSize() +
791 kBlockHeaderSize;
792 if (!file->Read(data.get(), size, offset))
793 return false;
794
795 if (!stats_.Init(data.get(), size, address))
796 return false;
797 if (cache_type_ == net::DISK_CACHE && ShouldReportAgain())
798 stats_.InitSizeHistogram();
799 return true;
800 }
801
StoreStats()802 void BackendImplV3::StoreStats() {
803 int size = stats_.StorageSize();
804 scoped_ptr<char[]> data(new char[size]);
805 Addr address;
806 size = stats_.SerializeStats(data.get(), size, &address);
807 DCHECK(size);
808 if (!address.is_initialized())
809 return;
810
811 MappedFile* file = File(address);
812 if (!file)
813 return;
814
815 size_t offset = address.start_block() * address.BlockSize() +
816 kBlockHeaderSize;
817 file->Write(data.get(), size, offset); // ignore result.
818 }
819
RestartCache(bool failure)820 void BackendImplV3::RestartCache(bool failure) {
821 int64 errors = stats_.GetCounter(Stats::FATAL_ERROR);
822 int64 full_dooms = stats_.GetCounter(Stats::DOOM_CACHE);
823 int64 partial_dooms = stats_.GetCounter(Stats::DOOM_RECENT);
824 int64 last_report = stats_.GetCounter(Stats::LAST_REPORT);
825
826 PrepareForRestart();
827 if (failure) {
828 DCHECK(!num_refs_);
829 DCHECK(!open_entries_.size());
830 DelayedCacheCleanup(path_);
831 } else {
832 DeleteCache(path_, false);
833 }
834
835 // Don't call Init() if directed by the unit test: we are simulating a failure
836 // trying to re-enable the cache.
837 if (unit_test_)
838 init_ = true; // Let the destructor do proper cleanup.
839 else if (SyncInit() == net::OK) {
840 stats_.SetCounter(Stats::FATAL_ERROR, errors);
841 stats_.SetCounter(Stats::DOOM_CACHE, full_dooms);
842 stats_.SetCounter(Stats::DOOM_RECENT, partial_dooms);
843 stats_.SetCounter(Stats::LAST_REPORT, last_report);
844 }
845 }
846
PrepareForRestart()847 void BackendImplV3::PrepareForRestart() {
848 if (!(user_flags_ & EVICTION_V2))
849 lru_eviction_ = true;
850
851 disabled_ = true;
852 data_->header.crash = 0;
853 index_->Flush();
854 index_ = NULL;
855 data_ = NULL;
856 block_files_.CloseFiles();
857 rankings_.Reset();
858 init_ = false;
859 restarted_ = true;
860 }
861
CleanupCache()862 void BackendImplV3::CleanupCache() {
863 Trace("Backend Cleanup");
864 eviction_.Stop();
865 timer_.reset();
866
867 if (init_) {
868 StoreStats();
869 if (data_)
870 data_->header.crash = 0;
871
872 if (user_flags_ & kNoRandom) {
873 // This is a net_unittest, verify that we are not 'leaking' entries.
874 File::WaitForPendingIO(&num_pending_io_);
875 DCHECK(!num_refs_);
876 } else {
877 File::DropPendingIO();
878 }
879 }
880 block_files_.CloseFiles();
881 FlushIndex();
882 index_ = NULL;
883 ptr_factory_.InvalidateWeakPtrs();
884 done_.Signal();
885 }
886
NewEntry(Addr address,EntryImplV3 ** entry)887 int BackendImplV3::NewEntry(Addr address, EntryImplV3** entry) {
888 EntriesMap::iterator it = open_entries_.find(address.value());
889 if (it != open_entries_.end()) {
890 // Easy job. This entry is already in memory.
891 EntryImpl* this_entry = it->second;
892 this_entry->AddRef();
893 *entry = this_entry;
894 return 0;
895 }
896
897 STRESS_DCHECK(block_files_.IsValid(address));
898
899 if (!address.SanityCheckForEntry()) {
900 LOG(WARNING) << "Wrong entry address.";
901 STRESS_NOTREACHED();
902 return ERR_INVALID_ADDRESS;
903 }
904
905 scoped_refptr<EntryImpl> cache_entry(
906 new EntryImpl(this, address, read_only_));
907 IncreaseNumRefs();
908 *entry = NULL;
909
910 TimeTicks start = TimeTicks::Now();
911 if (!cache_entry->entry()->Load())
912 return ERR_READ_FAILURE;
913
914 if (IsLoaded()) {
915 CACHE_UMA(AGE_MS, "LoadTime", 0, start);
916 }
917
918 if (!cache_entry->SanityCheck()) {
919 LOG(WARNING) << "Messed up entry found.";
920 STRESS_NOTREACHED();
921 return ERR_INVALID_ENTRY;
922 }
923
924 STRESS_DCHECK(block_files_.IsValid(
925 Addr(cache_entry->entry()->Data()->rankings_node)));
926
927 if (!cache_entry->LoadNodeAddress())
928 return ERR_READ_FAILURE;
929
930 if (!rankings_.SanityCheck(cache_entry->rankings(), false)) {
931 STRESS_NOTREACHED();
932 cache_entry->SetDirtyFlag(0);
933 // Don't remove this from the list (it is not linked properly). Instead,
934 // break the link back to the entry because it is going away, and leave the
935 // rankings node to be deleted if we find it through a list.
936 rankings_.SetContents(cache_entry->rankings(), 0);
937 } else if (!rankings_.DataSanityCheck(cache_entry->rankings(), false)) {
938 STRESS_NOTREACHED();
939 cache_entry->SetDirtyFlag(0);
940 rankings_.SetContents(cache_entry->rankings(), address.value());
941 }
942
943 if (!cache_entry->DataSanityCheck()) {
944 LOG(WARNING) << "Messed up entry found.";
945 cache_entry->SetDirtyFlag(0);
946 cache_entry->FixForDelete();
947 }
948
949 // Prevent overwriting the dirty flag on the destructor.
950 cache_entry->SetDirtyFlag(GetCurrentEntryId());
951
952 if (cache_entry->dirty()) {
953 Trace("Dirty entry 0x%p 0x%x", reinterpret_cast<void*>(cache_entry.get()),
954 address.value());
955 }
956
957 open_entries_[address.value()] = cache_entry.get();
958
959 cache_entry->BeginLogging(net_log_, false);
960 cache_entry.swap(entry);
961 return 0;
962 }
963
964 // This is the actual implementation for OpenNextEntry and OpenPrevEntry.
OpenFollowingEntry(bool forward,void ** iter,Entry ** next_entry,const CompletionCallback & callback)965 int BackendImplV3::OpenFollowingEntry(bool forward, void** iter,
966 Entry** next_entry,
967 const CompletionCallback& callback) {
968 if (disabled_)
969 return net::ERR_FAILED;
970
971 DCHECK(iter);
972
973 const int kListsToSearch = 3;
974 scoped_refptr<EntryImpl> entries[kListsToSearch];
975 scoped_ptr<Rankings::Iterator> iterator(
976 reinterpret_cast<Rankings::Iterator*>(*iter));
977 *iter = NULL;
978
979 if (!iterator.get()) {
980 iterator.reset(new Rankings::Iterator(&rankings_));
981 bool ret = false;
982
983 // Get an entry from each list.
984 for (int i = 0; i < kListsToSearch; i++) {
985 EntryImpl* temp = NULL;
986 ret |= OpenFollowingEntryFromList(forward, static_cast<Rankings::List>(i),
987 &iterator->nodes[i], &temp);
988 entries[i].swap(&temp); // The entry was already addref'd.
989 }
990 if (!ret)
991 return NULL;
992 } else {
993 // Get the next entry from the last list, and the actual entries for the
994 // elements on the other lists.
995 for (int i = 0; i < kListsToSearch; i++) {
996 EntryImpl* temp = NULL;
997 if (iterator->list == i) {
998 OpenFollowingEntryFromList(forward, iterator->list,
999 &iterator->nodes[i], &temp);
1000 } else {
1001 temp = GetEnumeratedEntry(iterator->nodes[i],
1002 static_cast<Rankings::List>(i));
1003 }
1004
1005 entries[i].swap(&temp); // The entry was already addref'd.
1006 }
1007 }
1008
1009 int newest = -1;
1010 int oldest = -1;
1011 Time access_times[kListsToSearch];
1012 for (int i = 0; i < kListsToSearch; i++) {
1013 if (entries[i].get()) {
1014 access_times[i] = entries[i]->GetLastUsed();
1015 if (newest < 0) {
1016 DCHECK_LT(oldest, 0);
1017 newest = oldest = i;
1018 continue;
1019 }
1020 if (access_times[i] > access_times[newest])
1021 newest = i;
1022 if (access_times[i] < access_times[oldest])
1023 oldest = i;
1024 }
1025 }
1026
1027 if (newest < 0 || oldest < 0)
1028 return NULL;
1029
1030 EntryImpl* next_entry;
1031 if (forward) {
1032 next_entry = entries[newest].get();
1033 iterator->list = static_cast<Rankings::List>(newest);
1034 } else {
1035 next_entry = entries[oldest].get();
1036 iterator->list = static_cast<Rankings::List>(oldest);
1037 }
1038
1039 *iter = iterator.release();
1040 next_entry->AddRef();
1041 return next_entry;
1042 }
1043
AddStorageSize(int32 bytes)1044 void BackendImplV3::AddStorageSize(int32 bytes) {
1045 data_->header.num_bytes += bytes;
1046 DCHECK_GE(data_->header.num_bytes, 0);
1047 }
1048
SubstractStorageSize(int32 bytes)1049 void BackendImplV3::SubstractStorageSize(int32 bytes) {
1050 data_->header.num_bytes -= bytes;
1051 DCHECK_GE(data_->header.num_bytes, 0);
1052 }
1053
IncreaseNumRefs()1054 void BackendImplV3::IncreaseNumRefs() {
1055 num_refs_++;
1056 if (max_refs_ < num_refs_)
1057 max_refs_ = num_refs_;
1058 }
1059
DecreaseNumRefs()1060 void BackendImplV3::DecreaseNumRefs() {
1061 DCHECK(num_refs_);
1062 num_refs_--;
1063
1064 if (!num_refs_ && disabled_)
1065 base::MessageLoop::current()->PostTask(
1066 FROM_HERE, base::Bind(&BackendImpl::RestartCache, GetWeakPtr(), true));
1067 }
1068
IncreaseNumEntries()1069 void BackendImplV3::IncreaseNumEntries() {
1070 index_.header()->num_entries++;
1071 DCHECK_GT(index_.header()->num_entries, 0);
1072 }
1073
DecreaseNumEntries()1074 void BackendImplV3::DecreaseNumEntries() {
1075 index_.header()->num_entries--;
1076 if (index_.header()->num_entries < 0) {
1077 NOTREACHED();
1078 index_.header()->num_entries = 0;
1079 }
1080 }
1081
SyncInit()1082 int BackendImplV3::SyncInit() {
1083 #if defined(NET_BUILD_STRESS_CACHE)
1084 // Start evictions right away.
1085 up_ticks_ = kTrimDelay * 2;
1086 #endif
1087 DCHECK(!init_);
1088 if (init_)
1089 return net::ERR_FAILED;
1090
1091 bool create_files = false;
1092 if (!InitBackingStore(&create_files)) {
1093 ReportError(ERR_STORAGE_ERROR);
1094 return net::ERR_FAILED;
1095 }
1096
1097 num_refs_ = num_pending_io_ = max_refs_ = 0;
1098 entry_count_ = byte_count_ = 0;
1099
1100 if (!restarted_) {
1101 buffer_bytes_ = 0;
1102 trace_object_ = TraceObject::GetTraceObject();
1103 // Create a recurrent timer of 30 secs.
1104 int timer_delay = unit_test_ ? 1000 : 30000;
1105 timer_.reset(new base::RepeatingTimer<BackendImplV3>());
1106 timer_->Start(FROM_HERE, TimeDelta::FromMilliseconds(timer_delay), this,
1107 &BackendImplV3::OnStatsTimer);
1108 }
1109
1110 init_ = true;
1111 Trace("Init");
1112
1113 if (data_->header.experiment != NO_EXPERIMENT &&
1114 cache_type_ != net::DISK_CACHE) {
1115 // No experiment for other caches.
1116 return net::ERR_FAILED;
1117 }
1118
1119 if (!(user_flags_ & kNoRandom)) {
1120 // The unit test controls directly what to test.
1121 new_eviction_ = (cache_type_ == net::DISK_CACHE);
1122 }
1123
1124 if (!CheckIndex()) {
1125 ReportError(ERR_INIT_FAILED);
1126 return net::ERR_FAILED;
1127 }
1128
1129 if (!restarted_ && (create_files || !data_->header.num_entries))
1130 ReportError(ERR_CACHE_CREATED);
1131
1132 if (!(user_flags_ & kNoRandom) && cache_type_ == net::DISK_CACHE &&
1133 !InitExperiment(&data_->header, create_files)) {
1134 return net::ERR_FAILED;
1135 }
1136
1137 // We don't care if the value overflows. The only thing we care about is that
1138 // the id cannot be zero, because that value is used as "not dirty".
1139 // Increasing the value once per second gives us many years before we start
1140 // having collisions.
1141 data_->header.this_id++;
1142 if (!data_->header.this_id)
1143 data_->header.this_id++;
1144
1145 bool previous_crash = (data_->header.crash != 0);
1146 data_->header.crash = 1;
1147
1148 if (!block_files_.Init(create_files))
1149 return net::ERR_FAILED;
1150
1151 // We want to minimize the changes to cache for an AppCache.
1152 if (cache_type() == net::APP_CACHE) {
1153 DCHECK(!new_eviction_);
1154 read_only_ = true;
1155 } else if (cache_type() == net::SHADER_CACHE) {
1156 DCHECK(!new_eviction_);
1157 }
1158
1159 eviction_.Init(this);
1160
1161 // stats_ and rankings_ may end up calling back to us so we better be enabled.
1162 disabled_ = false;
1163 if (!InitStats())
1164 return net::ERR_FAILED;
1165
1166 disabled_ = !rankings_.Init(this, new_eviction_);
1167
1168 #if defined(STRESS_CACHE_EXTENDED_VALIDATION)
1169 trace_object_->EnableTracing(false);
1170 int sc = SelfCheck();
1171 if (sc < 0 && sc != ERR_NUM_ENTRIES_MISMATCH)
1172 NOTREACHED();
1173 trace_object_->EnableTracing(true);
1174 #endif
1175
1176 if (previous_crash) {
1177 ReportError(ERR_PREVIOUS_CRASH);
1178 } else if (!restarted_) {
1179 ReportError(ERR_NO_ERROR);
1180 }
1181
1182 FlushIndex();
1183
1184 return disabled_ ? net::ERR_FAILED : net::OK;
1185 }
1186
ResurrectEntry(EntryImpl * deleted_entry)1187 EntryImpl* BackendImplV3::ResurrectEntry(EntryImpl* deleted_entry) {
1188 if (ENTRY_NORMAL == deleted_entry->entry()->Data()->state) {
1189 deleted_entry->Release();
1190 stats_.OnEvent(Stats::CREATE_MISS);
1191 Trace("create entry miss ");
1192 return NULL;
1193 }
1194
1195 // We are attempting to create an entry and found out that the entry was
1196 // previously deleted.
1197
1198 eviction_.OnCreateEntry(deleted_entry);
1199 entry_count_++;
1200
1201 stats_.OnEvent(Stats::RESURRECT_HIT);
1202 Trace("Resurrect entry hit ");
1203 return deleted_entry;
1204 }
1205
CreateEntryImpl(const std::string & key)1206 EntryImpl* BackendImplV3::CreateEntryImpl(const std::string& key) {
1207 if (disabled_ || key.empty())
1208 return NULL;
1209
1210 TimeTicks start = TimeTicks::Now();
1211 Trace("Create hash 0x%x", hash);
1212
1213 scoped_refptr<EntryImpl> parent;
1214 Addr entry_address(data_->table[hash & mask_]);
1215 if (entry_address.is_initialized()) {
1216 // We have an entry already. It could be the one we are looking for, or just
1217 // a hash conflict.
1218 bool error;
1219 EntryImpl* old_entry = MatchEntry(key, hash, false, Addr(), &error);
1220 if (old_entry)
1221 return ResurrectEntry(old_entry);
1222
1223 EntryImpl* parent_entry = MatchEntry(key, hash, true, Addr(), &error);
1224 DCHECK(!error);
1225 if (parent_entry) {
1226 parent.swap(&parent_entry);
1227 } else if (data_->table[hash & mask_]) {
1228 // We should have corrected the problem.
1229 NOTREACHED();
1230 return NULL;
1231 }
1232 }
1233
1234 // The general flow is to allocate disk space and initialize the entry data,
1235 // followed by saving that to disk, then linking the entry though the index
1236 // and finally through the lists. If there is a crash in this process, we may
1237 // end up with:
1238 // a. Used, unreferenced empty blocks on disk (basically just garbage).
1239 // b. Used, unreferenced but meaningful data on disk (more garbage).
1240 // c. A fully formed entry, reachable only through the index.
1241 // d. A fully formed entry, also reachable through the lists, but still dirty.
1242 //
1243 // Anything after (b) can be automatically cleaned up. We may consider saving
1244 // the current operation (as we do while manipulating the lists) so that we
1245 // can detect and cleanup (a) and (b).
1246
1247 int num_blocks = EntryImpl::NumBlocksForEntry(key.size());
1248 if (!block_files_.CreateBlock(BLOCK_256, num_blocks, &entry_address)) {
1249 LOG(ERROR) << "Create entry failed " << key.c_str();
1250 stats_.OnEvent(Stats::CREATE_ERROR);
1251 return NULL;
1252 }
1253
1254 Addr node_address(0);
1255 if (!block_files_.CreateBlock(RANKINGS, 1, &node_address)) {
1256 block_files_.DeleteBlock(entry_address, false);
1257 LOG(ERROR) << "Create entry failed " << key.c_str();
1258 stats_.OnEvent(Stats::CREATE_ERROR);
1259 return NULL;
1260 }
1261
1262 scoped_refptr<EntryImpl> cache_entry(
1263 new EntryImpl(this, entry_address, false));
1264 IncreaseNumRefs();
1265
1266 if (!cache_entry->CreateEntry(node_address, key, hash)) {
1267 block_files_.DeleteBlock(entry_address, false);
1268 block_files_.DeleteBlock(node_address, false);
1269 LOG(ERROR) << "Create entry failed " << key.c_str();
1270 stats_.OnEvent(Stats::CREATE_ERROR);
1271 return NULL;
1272 }
1273
1274 cache_entry->BeginLogging(net_log_, true);
1275
1276 // We are not failing the operation; let's add this to the map.
1277 open_entries_[entry_address.value()] = cache_entry;
1278
1279 // Save the entry.
1280 cache_entry->entry()->Store();
1281 cache_entry->rankings()->Store();
1282 IncreaseNumEntries();
1283 entry_count_++;
1284
1285 // Link this entry through the index.
1286 if (parent.get()) {
1287 parent->SetNextAddress(entry_address);
1288 } else {
1289 data_->table[hash & mask_] = entry_address.value();
1290 }
1291
1292 // Link this entry through the lists.
1293 eviction_.OnCreateEntry(cache_entry);
1294
1295 CACHE_UMA(AGE_MS, "CreateTime", 0, start);
1296 stats_.OnEvent(Stats::CREATE_HIT);
1297 SIMPLE_STATS_COUNTER("disk_cache.miss");
1298 Trace("create entry hit ");
1299 FlushIndex();
1300 cache_entry->AddRef();
1301 return cache_entry.get();
1302 }
1303
LogStats()1304 void BackendImplV3::LogStats() {
1305 StatsItems stats;
1306 GetStats(&stats);
1307
1308 for (size_t index = 0; index < stats.size(); index++)
1309 VLOG(1) << stats[index].first << ": " << stats[index].second;
1310 }
1311
ReportStats()1312 void BackendImplV3::ReportStats() {
1313 IndexHeaderV3* header = index_.header();
1314 CACHE_UMA(COUNTS, "Entries", header->num_entries);
1315
1316 int current_size = header->num_bytes / (1024 * 1024);
1317 int max_size = max_size_ / (1024 * 1024);
1318
1319 CACHE_UMA(COUNTS_10000, "Size", current_size);
1320 CACHE_UMA(COUNTS_10000, "MaxSize", max_size);
1321 if (!max_size)
1322 max_size++;
1323 CACHE_UMA(PERCENTAGE, "UsedSpace", current_size * 100 / max_size);
1324
1325 CACHE_UMA(COUNTS_10000, "AverageOpenEntries",
1326 static_cast<int>(stats_.GetCounter(Stats::OPEN_ENTRIES)));
1327 CACHE_UMA(COUNTS_10000, "MaxOpenEntries",
1328 static_cast<int>(stats_.GetCounter(Stats::MAX_ENTRIES)));
1329 stats_.SetCounter(Stats::MAX_ENTRIES, 0);
1330
1331 CACHE_UMA(COUNTS_10000, "TotalFatalErrors",
1332 static_cast<int>(stats_.GetCounter(Stats::FATAL_ERROR)));
1333 CACHE_UMA(COUNTS_10000, "TotalDoomCache",
1334 static_cast<int>(stats_.GetCounter(Stats::DOOM_CACHE)));
1335 CACHE_UMA(COUNTS_10000, "TotalDoomRecentEntries",
1336 static_cast<int>(stats_.GetCounter(Stats::DOOM_RECENT)));
1337 stats_.SetCounter(Stats::FATAL_ERROR, 0);
1338 stats_.SetCounter(Stats::DOOM_CACHE, 0);
1339 stats_.SetCounter(Stats::DOOM_RECENT, 0);
1340
1341 int64 total_hours = stats_.GetCounter(Stats::TIMER) / 120;
1342 if (!(header->flags & CACHE_EVICTED)) {
1343 CACHE_UMA(HOURS, "TotalTimeNotFull", static_cast<int>(total_hours));
1344 return;
1345 }
1346
1347 // This is an up to date client that will report FirstEviction() data. After
1348 // that event, start reporting this:
1349
1350 CACHE_UMA(HOURS, "TotalTime", static_cast<int>(total_hours));
1351
1352 int64 use_hours = stats_.GetCounter(Stats::LAST_REPORT_TIMER) / 120;
1353 stats_.SetCounter(Stats::LAST_REPORT_TIMER, stats_.GetCounter(Stats::TIMER));
1354
1355 // We may see users with no use_hours at this point if this is the first time
1356 // we are running this code.
1357 if (use_hours)
1358 use_hours = total_hours - use_hours;
1359
1360 if (!use_hours || !GetEntryCount() || !header->num_bytes)
1361 return;
1362
1363 CACHE_UMA(HOURS, "UseTime", static_cast<int>(use_hours));
1364
1365 int64 trim_rate = stats_.GetCounter(Stats::TRIM_ENTRY) / use_hours;
1366 CACHE_UMA(COUNTS, "TrimRate", static_cast<int>(trim_rate));
1367
1368 int avg_size = header->num_bytes / GetEntryCount();
1369 CACHE_UMA(COUNTS, "EntrySize", avg_size);
1370 CACHE_UMA(COUNTS, "EntriesFull", header->num_entries);
1371
1372 int large_entries_bytes = stats_.GetLargeEntriesSize();
1373 int large_ratio = large_entries_bytes * 100 / header->num_bytes;
1374 CACHE_UMA(PERCENTAGE, "LargeEntriesRatio", large_ratio);
1375
1376 if (!lru_eviction_) {
1377 CACHE_UMA(PERCENTAGE, "ResurrectRatio", stats_.GetResurrectRatio());
1378 CACHE_UMA(PERCENTAGE, "NoUseRatio",
1379 header->num_no_use_entries * 100 / header->num_entries);
1380 CACHE_UMA(PERCENTAGE, "LowUseRatio",
1381 header->num_low_use_entries * 100 / header->num_entries);
1382 CACHE_UMA(PERCENTAGE, "HighUseRatio",
1383 header->num_high_use_entries * 100 / header->num_entries);
1384 CACHE_UMA(PERCENTAGE, "DeletedRatio",
1385 header->num_evicted_entries * 100 / header->num_entries);
1386 }
1387
1388 stats_.ResetRatios();
1389 stats_.SetCounter(Stats::TRIM_ENTRY, 0);
1390
1391 if (cache_type_ == net::DISK_CACHE)
1392 block_files_.ReportStats();
1393 }
1394
ReportError(int error)1395 void BackendImplV3::ReportError(int error) {
1396 STRESS_DCHECK(!error || error == ERR_PREVIOUS_CRASH ||
1397 error == ERR_CACHE_CREATED);
1398
1399 // We transmit positive numbers, instead of direct error codes.
1400 DCHECK_LE(error, 0);
1401 CACHE_UMA(CACHE_ERROR, "Error", error * -1);
1402 }
1403
CheckIndex()1404 bool BackendImplV3::CheckIndex() {
1405 DCHECK(data_);
1406
1407 size_t current_size = index_->GetLength();
1408 if (current_size < sizeof(Index)) {
1409 LOG(ERROR) << "Corrupt Index file";
1410 return false;
1411 }
1412
1413 if (new_eviction_) {
1414 // We support versions 2.0 and 2.1, upgrading 2.0 to 2.1.
1415 if (kIndexMagic != data_->header.magic ||
1416 kCurrentVersion >> 16 != data_->header.version >> 16) {
1417 LOG(ERROR) << "Invalid file version or magic";
1418 return false;
1419 }
1420 if (kCurrentVersion == data_->header.version) {
1421 // We need file version 2.1 for the new eviction algorithm.
1422 UpgradeTo2_1();
1423 }
1424 } else {
1425 if (kIndexMagic != data_->header.magic ||
1426 kCurrentVersion != data_->header.version) {
1427 LOG(ERROR) << "Invalid file version or magic";
1428 return false;
1429 }
1430 }
1431
1432 if (!data_->header.table_len) {
1433 LOG(ERROR) << "Invalid table size";
1434 return false;
1435 }
1436
1437 if (current_size < GetIndexSize(data_->header.table_len) ||
1438 data_->header.table_len & (kBaseTableLen - 1)) {
1439 LOG(ERROR) << "Corrupt Index file";
1440 return false;
1441 }
1442
1443 AdjustMaxCacheSize(data_->header.table_len);
1444
1445 #if !defined(NET_BUILD_STRESS_CACHE)
1446 if (data_->header.num_bytes < 0 ||
1447 (max_size_ < kint32max - kDefaultCacheSize &&
1448 data_->header.num_bytes > max_size_ + kDefaultCacheSize)) {
1449 LOG(ERROR) << "Invalid cache (current) size";
1450 return false;
1451 }
1452 #endif
1453
1454 if (data_->header.num_entries < 0) {
1455 LOG(ERROR) << "Invalid number of entries";
1456 return false;
1457 }
1458
1459 if (!mask_)
1460 mask_ = data_->header.table_len - 1;
1461
1462 // Load the table into memory with a single read.
1463 scoped_ptr<char[]> buf(new char[current_size]);
1464 return index_->Read(buf.get(), current_size, 0);
1465 }
1466
CheckAllEntries()1467 int BackendImplV3::CheckAllEntries() {
1468 int num_dirty = 0;
1469 int num_entries = 0;
1470 DCHECK(mask_ < kuint32max);
1471 for (unsigned int i = 0; i <= mask_; i++) {
1472 Addr address(data_->table[i]);
1473 if (!address.is_initialized())
1474 continue;
1475 for (;;) {
1476 EntryImpl* tmp;
1477 int ret = NewEntry(address, &tmp);
1478 if (ret) {
1479 STRESS_NOTREACHED();
1480 return ret;
1481 }
1482 scoped_refptr<EntryImpl> cache_entry;
1483 cache_entry.swap(&tmp);
1484
1485 if (cache_entry->dirty())
1486 num_dirty++;
1487 else if (CheckEntry(cache_entry.get()))
1488 num_entries++;
1489 else
1490 return ERR_INVALID_ENTRY;
1491
1492 DCHECK_EQ(i, cache_entry->entry()->Data()->hash & mask_);
1493 address.set_value(cache_entry->GetNextAddress());
1494 if (!address.is_initialized())
1495 break;
1496 }
1497 }
1498
1499 Trace("CheckAllEntries End");
1500 if (num_entries + num_dirty != data_->header.num_entries) {
1501 LOG(ERROR) << "Number of entries " << num_entries << " " << num_dirty <<
1502 " " << data_->header.num_entries;
1503 DCHECK_LT(num_entries, data_->header.num_entries);
1504 return ERR_NUM_ENTRIES_MISMATCH;
1505 }
1506
1507 return num_dirty;
1508 }
1509
CheckEntry(EntryImpl * cache_entry)1510 bool BackendImplV3::CheckEntry(EntryImpl* cache_entry) {
1511 bool ok = block_files_.IsValid(cache_entry->entry()->address());
1512 ok = ok && block_files_.IsValid(cache_entry->rankings()->address());
1513 EntryStore* data = cache_entry->entry()->Data();
1514 for (size_t i = 0; i < arraysize(data->data_addr); i++) {
1515 if (data->data_addr[i]) {
1516 Addr address(data->data_addr[i]);
1517 if (address.is_block_file())
1518 ok = ok && block_files_.IsValid(address);
1519 }
1520 }
1521
1522 return ok && cache_entry->rankings()->VerifyHash();
1523 }
1524
MaxBuffersSize()1525 int BackendImplV3::MaxBuffersSize() {
1526 static int64 total_memory = base::SysInfo::AmountOfPhysicalMemory();
1527 static bool done = false;
1528
1529 if (!done) {
1530 const int kMaxBuffersSize = 30 * 1024 * 1024;
1531
1532 // We want to use up to 2% of the computer's memory.
1533 total_memory = total_memory * 2 / 100;
1534 if (total_memory > kMaxBuffersSize || total_memory <= 0)
1535 total_memory = kMaxBuffersSize;
1536
1537 done = true;
1538 }
1539
1540 return static_cast<int>(total_memory);
1541 }
1542
1543 #endif // defined(V3_NOT_JUST_YET_READY).
1544
IsAllocAllowed(int current_size,int new_size)1545 bool BackendImplV3::IsAllocAllowed(int current_size, int new_size) {
1546 return false;
1547 }
1548
GetCacheType() const1549 net::CacheType BackendImplV3::GetCacheType() const {
1550 return cache_type_;
1551 }
1552
GetEntryCount() const1553 int32 BackendImplV3::GetEntryCount() const {
1554 return 0;
1555 }
1556
OpenEntry(const std::string & key,Entry ** entry,const CompletionCallback & callback)1557 int BackendImplV3::OpenEntry(const std::string& key, Entry** entry,
1558 const CompletionCallback& callback) {
1559 return net::ERR_FAILED;
1560 }
1561
CreateEntry(const std::string & key,Entry ** entry,const CompletionCallback & callback)1562 int BackendImplV3::CreateEntry(const std::string& key, Entry** entry,
1563 const CompletionCallback& callback) {
1564 return net::ERR_FAILED;
1565 }
1566
DoomEntry(const std::string & key,const CompletionCallback & callback)1567 int BackendImplV3::DoomEntry(const std::string& key,
1568 const CompletionCallback& callback) {
1569 return net::ERR_FAILED;
1570 }
1571
DoomAllEntries(const CompletionCallback & callback)1572 int BackendImplV3::DoomAllEntries(const CompletionCallback& callback) {
1573 return net::ERR_FAILED;
1574 }
1575
DoomEntriesBetween(base::Time initial_time,base::Time end_time,const CompletionCallback & callback)1576 int BackendImplV3::DoomEntriesBetween(base::Time initial_time,
1577 base::Time end_time,
1578 const CompletionCallback& callback) {
1579 return net::ERR_FAILED;
1580 }
1581
DoomEntriesSince(base::Time initial_time,const CompletionCallback & callback)1582 int BackendImplV3::DoomEntriesSince(base::Time initial_time,
1583 const CompletionCallback& callback) {
1584 return net::ERR_FAILED;
1585 }
1586
OpenNextEntry(void ** iter,Entry ** next_entry,const CompletionCallback & callback)1587 int BackendImplV3::OpenNextEntry(void** iter, Entry** next_entry,
1588 const CompletionCallback& callback) {
1589 return net::ERR_FAILED;
1590 }
1591
EndEnumeration(void ** iter)1592 void BackendImplV3::EndEnumeration(void** iter) {
1593 NOTIMPLEMENTED();
1594 }
1595
GetStats(StatsItems * stats)1596 void BackendImplV3::GetStats(StatsItems* stats) {
1597 NOTIMPLEMENTED();
1598 }
1599
OnExternalCacheHit(const std::string & key)1600 void BackendImplV3::OnExternalCacheHit(const std::string& key) {
1601 NOTIMPLEMENTED();
1602 }
1603
CleanupCache()1604 void BackendImplV3::CleanupCache() {
1605 }
1606
1607 } // namespace disk_cache
1608