1 // Copyright (c) 2010 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "net/disk_cache/backend_impl.h"
6
7 #include "base/file_path.h"
8 #include "base/file_util.h"
9 #include "base/message_loop.h"
10 #include "base/metrics/field_trial.h"
11 #include "base/metrics/histogram.h"
12 #include "base/metrics/stats_counters.h"
13 #include "base/rand_util.h"
14 #include "base/string_util.h"
15 #include "base/stringprintf.h"
16 #include "base/sys_info.h"
17 #include "base/threading/worker_pool.h"
18 #include "base/threading/thread_restrictions.h"
19 #include "base/time.h"
20 #include "base/timer.h"
21 #include "net/base/net_errors.h"
22 #include "net/disk_cache/cache_util.h"
23 #include "net/disk_cache/entry_impl.h"
24 #include "net/disk_cache/errors.h"
25 #include "net/disk_cache/experiments.h"
26 #include "net/disk_cache/file.h"
27 #include "net/disk_cache/hash.h"
28 #include "net/disk_cache/mem_backend_impl.h"
29
30 // This has to be defined before including histogram_macros.h from this file.
31 #define NET_DISK_CACHE_BACKEND_IMPL_CC_
32 #include "net/disk_cache/histogram_macros.h"
33
34 using base::Time;
35 using base::TimeDelta;
36 using base::TimeTicks;
37
38 namespace {
39
40 const char* kIndexName = "index";
41 const int kMaxOldFolders = 100;
42
43 // Seems like ~240 MB correspond to less than 50k entries for 99% of the people.
44 // Note that the actual target is to keep the index table load factor under 55%
45 // for most users.
46 const int k64kEntriesStore = 240 * 1000 * 1000;
47 const int kBaseTableLen = 64 * 1024;
48 const int kDefaultCacheSize = 80 * 1024 * 1024;
49
DesiredIndexTableLen(int32 storage_size)50 int DesiredIndexTableLen(int32 storage_size) {
51 if (storage_size <= k64kEntriesStore)
52 return kBaseTableLen;
53 if (storage_size <= k64kEntriesStore * 2)
54 return kBaseTableLen * 2;
55 if (storage_size <= k64kEntriesStore * 4)
56 return kBaseTableLen * 4;
57 if (storage_size <= k64kEntriesStore * 8)
58 return kBaseTableLen * 8;
59
60 // The biggest storage_size for int32 requires a 4 MB table.
61 return kBaseTableLen * 16;
62 }
63
MaxStorageSizeForTable(int table_len)64 int MaxStorageSizeForTable(int table_len) {
65 return table_len * (k64kEntriesStore / kBaseTableLen);
66 }
67
GetIndexSize(int table_len)68 size_t GetIndexSize(int table_len) {
69 size_t table_size = sizeof(disk_cache::CacheAddr) * table_len;
70 return sizeof(disk_cache::IndexHeader) + table_size;
71 }
72
73 // ------------------------------------------------------------------------
74
75 // Returns a fully qualified name from path and name, using a given name prefix
76 // and index number. For instance, if the arguments are "/foo", "bar" and 5, it
77 // will return "/foo/old_bar_005".
GetPrefixedName(const FilePath & path,const std::string & name,int index)78 FilePath GetPrefixedName(const FilePath& path, const std::string& name,
79 int index) {
80 std::string tmp = base::StringPrintf("%s%s_%03d", "old_",
81 name.c_str(), index);
82 return path.AppendASCII(tmp);
83 }
84
85 // This is a simple Task to cleanup old caches.
86 class CleanupTask : public Task {
87 public:
CleanupTask(const FilePath & path,const std::string & name)88 CleanupTask(const FilePath& path, const std::string& name)
89 : path_(path), name_(name) {}
90
91 virtual void Run();
92
93 private:
94 FilePath path_;
95 std::string name_;
96 DISALLOW_COPY_AND_ASSIGN(CleanupTask);
97 };
98
Run()99 void CleanupTask::Run() {
100 for (int i = 0; i < kMaxOldFolders; i++) {
101 FilePath to_delete = GetPrefixedName(path_, name_, i);
102 disk_cache::DeleteCache(to_delete, true);
103 }
104 }
105
106 // Returns a full path to rename the current cache, in order to delete it. path
107 // is the current folder location, and name is the current folder name.
GetTempCacheName(const FilePath & path,const std::string & name)108 FilePath GetTempCacheName(const FilePath& path, const std::string& name) {
109 // We'll attempt to have up to kMaxOldFolders folders for deletion.
110 for (int i = 0; i < kMaxOldFolders; i++) {
111 FilePath to_delete = GetPrefixedName(path, name, i);
112 if (!file_util::PathExists(to_delete))
113 return to_delete;
114 }
115 return FilePath();
116 }
117
118 // Moves the cache files to a new folder and creates a task to delete them.
DelayedCacheCleanup(const FilePath & full_path)119 bool DelayedCacheCleanup(const FilePath& full_path) {
120 // GetTempCacheName() and MoveCache() use synchronous file
121 // operations.
122 base::ThreadRestrictions::ScopedAllowIO allow_io;
123
124 FilePath current_path = full_path.StripTrailingSeparators();
125
126 FilePath path = current_path.DirName();
127 FilePath name = current_path.BaseName();
128 #if defined(OS_POSIX)
129 std::string name_str = name.value();
130 #elif defined(OS_WIN)
131 // We created this file so it should only contain ASCII.
132 std::string name_str = WideToASCII(name.value());
133 #endif
134
135 FilePath to_delete = GetTempCacheName(path, name_str);
136 if (to_delete.empty()) {
137 LOG(ERROR) << "Unable to get another cache folder";
138 return false;
139 }
140
141 if (!disk_cache::MoveCache(full_path, to_delete)) {
142 LOG(ERROR) << "Unable to move cache folder";
143 return false;
144 }
145
146 base::WorkerPool::PostTask(FROM_HERE, new CleanupTask(path, name_str), true);
147 return true;
148 }
149
150 // Sets group for the current experiment. Returns false if the files should be
151 // discarded.
InitExperiment(disk_cache::IndexHeader * header)152 bool InitExperiment(disk_cache::IndexHeader* header) {
153 if (header->experiment == disk_cache::EXPERIMENT_OLD_FILE1 ||
154 header->experiment == disk_cache::EXPERIMENT_OLD_FILE2) {
155 // Discard current cache.
156 return false;
157 }
158
159 // See if we already defined the group for this profile.
160 if (header->experiment >= disk_cache::EXPERIMENT_DELETED_LIST_OUT)
161 return true;
162
163 // The experiment is closed.
164 header->experiment = disk_cache::EXPERIMENT_DELETED_LIST_OUT;
165 return true;
166 }
167
168 // Initializes the field trial structures to allow performance measurements
169 // for the current cache configuration.
SetFieldTrialInfo(int size_group)170 void SetFieldTrialInfo(int size_group) {
171 static bool first = true;
172 if (!first)
173 return;
174
175 // Field trials involve static objects so we have to do this only once.
176 first = false;
177 std::string group1 = base::StringPrintf("CacheSizeGroup_%d", size_group);
178 int totalProbability = 10;
179 scoped_refptr<base::FieldTrial> trial1(
180 new base::FieldTrial("CacheSize", totalProbability, group1, 2011, 6, 30));
181 trial1->AppendGroup(group1, totalProbability);
182 }
183
184 // ------------------------------------------------------------------------
185
186 // This class takes care of building an instance of the backend.
187 class CacheCreator {
188 public:
CacheCreator(const FilePath & path,bool force,int max_bytes,net::CacheType type,uint32 flags,base::MessageLoopProxy * thread,net::NetLog * net_log,disk_cache::Backend ** backend,net::CompletionCallback * callback)189 CacheCreator(const FilePath& path, bool force, int max_bytes,
190 net::CacheType type, uint32 flags,
191 base::MessageLoopProxy* thread, net::NetLog* net_log,
192 disk_cache::Backend** backend,
193 net::CompletionCallback* callback)
194 : path_(path), force_(force), retry_(false), max_bytes_(max_bytes),
195 type_(type), flags_(flags), thread_(thread), backend_(backend),
196 callback_(callback), cache_(NULL), net_log_(net_log),
197 ALLOW_THIS_IN_INITIALIZER_LIST(
198 my_callback_(this, &CacheCreator::OnIOComplete)) {
199 }
~CacheCreator()200 ~CacheCreator() {}
201
202 // Creates the backend.
203 int Run();
204
205 // Callback implementation.
206 void OnIOComplete(int result);
207
208 private:
209 void DoCallback(int result);
210
211 const FilePath& path_;
212 bool force_;
213 bool retry_;
214 int max_bytes_;
215 net::CacheType type_;
216 uint32 flags_;
217 scoped_refptr<base::MessageLoopProxy> thread_;
218 disk_cache::Backend** backend_;
219 net::CompletionCallback* callback_;
220 disk_cache::BackendImpl* cache_;
221 net::NetLog* net_log_;
222 net::CompletionCallbackImpl<CacheCreator> my_callback_;
223
224 DISALLOW_COPY_AND_ASSIGN(CacheCreator);
225 };
226
Run()227 int CacheCreator::Run() {
228 cache_ = new disk_cache::BackendImpl(path_, thread_, net_log_);
229 cache_->SetMaxSize(max_bytes_);
230 cache_->SetType(type_);
231 cache_->SetFlags(flags_);
232 int rv = cache_->Init(&my_callback_);
233 DCHECK_EQ(net::ERR_IO_PENDING, rv);
234 return rv;
235 }
236
OnIOComplete(int result)237 void CacheCreator::OnIOComplete(int result) {
238 if (result == net::OK || !force_ || retry_)
239 return DoCallback(result);
240
241 // This is a failure and we are supposed to try again, so delete the object,
242 // delete all the files, and try again.
243 retry_ = true;
244 delete cache_;
245 cache_ = NULL;
246 if (!DelayedCacheCleanup(path_))
247 return DoCallback(result);
248
249 // The worker thread will start deleting files soon, but the original folder
250 // is not there anymore... let's create a new set of files.
251 int rv = Run();
252 DCHECK_EQ(net::ERR_IO_PENDING, rv);
253 }
254
DoCallback(int result)255 void CacheCreator::DoCallback(int result) {
256 DCHECK_NE(net::ERR_IO_PENDING, result);
257 if (result == net::OK) {
258 *backend_ = cache_;
259 } else {
260 LOG(ERROR) << "Unable to create cache";
261 *backend_ = NULL;
262 delete cache_;
263 }
264 callback_->Run(result);
265 delete this;
266 }
267
268 // ------------------------------------------------------------------------
269
270 // A task to perform final cleanup on the background thread.
271 class FinalCleanup : public Task {
272 public:
FinalCleanup(disk_cache::BackendImpl * backend)273 explicit FinalCleanup(disk_cache::BackendImpl* backend) : backend_(backend) {}
~FinalCleanup()274 ~FinalCleanup() {}
275
276 virtual void Run();
277 private:
278 disk_cache::BackendImpl* backend_;
279 DISALLOW_EVIL_CONSTRUCTORS(FinalCleanup);
280 };
281
Run()282 void FinalCleanup::Run() {
283 backend_->CleanupCache();
284 }
285
286 } // namespace
287
288 // ------------------------------------------------------------------------
289
290 namespace disk_cache {
291
CreateCacheBackend(net::CacheType type,const FilePath & path,int max_bytes,bool force,base::MessageLoopProxy * thread,net::NetLog * net_log,Backend ** backend,CompletionCallback * callback)292 int CreateCacheBackend(net::CacheType type, const FilePath& path, int max_bytes,
293 bool force, base::MessageLoopProxy* thread,
294 net::NetLog* net_log, Backend** backend,
295 CompletionCallback* callback) {
296 DCHECK(callback);
297 if (type == net::MEMORY_CACHE) {
298 *backend = MemBackendImpl::CreateBackend(max_bytes, net_log);
299 return *backend ? net::OK : net::ERR_FAILED;
300 }
301 DCHECK(thread);
302
303 return BackendImpl::CreateBackend(path, force, max_bytes, type, kNone, thread,
304 net_log, backend, callback);
305 }
306
307 // Returns the preferred maximum number of bytes for the cache given the
308 // number of available bytes.
PreferedCacheSize(int64 available)309 int PreferedCacheSize(int64 available) {
310 // Return 80% of the available space if there is not enough space to use
311 // kDefaultCacheSize.
312 if (available < kDefaultCacheSize * 10 / 8)
313 return static_cast<int32>(available * 8 / 10);
314
315 // Return kDefaultCacheSize if it uses 80% to 10% of the available space.
316 if (available < kDefaultCacheSize * 10)
317 return kDefaultCacheSize;
318
319 // Return 10% of the available space if the target size
320 // (2.5 * kDefaultCacheSize) is more than 10%.
321 if (available < static_cast<int64>(kDefaultCacheSize) * 25)
322 return static_cast<int32>(available / 10);
323
324 // Return the target size (2.5 * kDefaultCacheSize) if it uses 10% to 1%
325 // of the available space.
326 if (available < static_cast<int64>(kDefaultCacheSize) * 250)
327 return kDefaultCacheSize * 5 / 2;
328
329 // Return 1% of the available space if it does not exceed kint32max.
330 if (available < static_cast<int64>(kint32max) * 100)
331 return static_cast<int32>(available / 100);
332
333 return kint32max;
334 }
335
336 // ------------------------------------------------------------------------
337
BackendImpl(const FilePath & path,base::MessageLoopProxy * cache_thread,net::NetLog * net_log)338 BackendImpl::BackendImpl(const FilePath& path,
339 base::MessageLoopProxy* cache_thread,
340 net::NetLog* net_log)
341 : ALLOW_THIS_IN_INITIALIZER_LIST(background_queue_(this, cache_thread)),
342 path_(path),
343 block_files_(path),
344 mask_(0),
345 max_size_(0),
346 io_delay_(0),
347 cache_type_(net::DISK_CACHE),
348 uma_report_(0),
349 user_flags_(0),
350 init_(false),
351 restarted_(false),
352 unit_test_(false),
353 read_only_(false),
354 disabled_(false),
355 new_eviction_(false),
356 first_timer_(true),
357 net_log_(net_log),
358 done_(true, false),
359 ALLOW_THIS_IN_INITIALIZER_LIST(factory_(this)),
360 ALLOW_THIS_IN_INITIALIZER_LIST(ptr_factory_(this)) {
361 }
362
BackendImpl(const FilePath & path,uint32 mask,base::MessageLoopProxy * cache_thread,net::NetLog * net_log)363 BackendImpl::BackendImpl(const FilePath& path,
364 uint32 mask,
365 base::MessageLoopProxy* cache_thread,
366 net::NetLog* net_log)
367 : ALLOW_THIS_IN_INITIALIZER_LIST(background_queue_(this, cache_thread)),
368 path_(path),
369 block_files_(path),
370 mask_(mask),
371 max_size_(0),
372 io_delay_(0),
373 cache_type_(net::DISK_CACHE),
374 uma_report_(0),
375 user_flags_(kMask),
376 init_(false),
377 restarted_(false),
378 unit_test_(false),
379 read_only_(false),
380 disabled_(false),
381 new_eviction_(false),
382 first_timer_(true),
383 net_log_(net_log),
384 done_(true, false),
385 ALLOW_THIS_IN_INITIALIZER_LIST(factory_(this)),
386 ALLOW_THIS_IN_INITIALIZER_LIST(ptr_factory_(this)) {
387 }
388
~BackendImpl()389 BackendImpl::~BackendImpl() {
390 background_queue_.WaitForPendingIO();
391
392 if (background_queue_.BackgroundIsCurrentThread()) {
393 // Unit tests may use the same thread for everything.
394 CleanupCache();
395 } else {
396 background_queue_.background_thread()->PostTask(FROM_HERE,
397 new FinalCleanup(this));
398 done_.Wait();
399 }
400 }
401
402 // If the initialization of the cache fails, and force is true, we will discard
403 // the whole cache and create a new one. In order to process a potentially large
404 // number of files, we'll rename the cache folder to old_ + original_name +
405 // number, (located on the same parent folder), and spawn a worker thread to
406 // delete all the files on all the stale cache folders. The whole process can
407 // still fail if we are not able to rename the cache folder (for instance due to
408 // a sharing violation), and in that case a cache for this profile (on the
409 // desired path) cannot be created.
410 //
411 // Static.
CreateBackend(const FilePath & full_path,bool force,int max_bytes,net::CacheType type,uint32 flags,base::MessageLoopProxy * thread,net::NetLog * net_log,Backend ** backend,CompletionCallback * callback)412 int BackendImpl::CreateBackend(const FilePath& full_path, bool force,
413 int max_bytes, net::CacheType type,
414 uint32 flags, base::MessageLoopProxy* thread,
415 net::NetLog* net_log, Backend** backend,
416 CompletionCallback* callback) {
417 DCHECK(callback);
418 CacheCreator* creator = new CacheCreator(full_path, force, max_bytes, type,
419 flags, thread, net_log, backend,
420 callback);
421 // This object will self-destroy when finished.
422 return creator->Run();
423 }
424
Init(CompletionCallback * callback)425 int BackendImpl::Init(CompletionCallback* callback) {
426 background_queue_.Init(callback);
427 return net::ERR_IO_PENDING;
428 }
429
SyncInit()430 int BackendImpl::SyncInit() {
431 DCHECK(!init_);
432 if (init_)
433 return net::ERR_FAILED;
434
435 bool create_files = false;
436 if (!InitBackingStore(&create_files)) {
437 ReportError(ERR_STORAGE_ERROR);
438 return net::ERR_FAILED;
439 }
440
441 num_refs_ = num_pending_io_ = max_refs_ = 0;
442 entry_count_ = byte_count_ = 0;
443
444 if (!restarted_) {
445 buffer_bytes_ = 0;
446 trace_object_ = TraceObject::GetTraceObject();
447 // Create a recurrent timer of 30 secs.
448 int timer_delay = unit_test_ ? 1000 : 30000;
449 timer_.Start(TimeDelta::FromMilliseconds(timer_delay), this,
450 &BackendImpl::OnStatsTimer);
451 }
452
453 init_ = true;
454
455 if (data_->header.experiment != NO_EXPERIMENT &&
456 cache_type_ != net::DISK_CACHE) {
457 // No experiment for other caches.
458 return net::ERR_FAILED;
459 }
460
461 if (!(user_flags_ & disk_cache::kNoRandom)) {
462 // The unit test controls directly what to test.
463 new_eviction_ = (cache_type_ == net::DISK_CACHE);
464 }
465
466 if (!CheckIndex()) {
467 ReportError(ERR_INIT_FAILED);
468 return net::ERR_FAILED;
469 }
470
471 if (!(user_flags_ & disk_cache::kNoRandom) &&
472 cache_type_ == net::DISK_CACHE &&
473 !InitExperiment(&data_->header))
474 return net::ERR_FAILED;
475
476 // We don't care if the value overflows. The only thing we care about is that
477 // the id cannot be zero, because that value is used as "not dirty".
478 // Increasing the value once per second gives us many years before we start
479 // having collisions.
480 data_->header.this_id++;
481 if (!data_->header.this_id)
482 data_->header.this_id++;
483
484 if (data_->header.crash) {
485 ReportError(ERR_PREVIOUS_CRASH);
486 } else {
487 ReportError(0);
488 data_->header.crash = 1;
489 }
490
491 if (!block_files_.Init(create_files))
492 return net::ERR_FAILED;
493
494 // We want to minimize the changes to cache for an AppCache.
495 if (cache_type() == net::APP_CACHE) {
496 DCHECK(!new_eviction_);
497 read_only_ = true;
498 }
499
500 // Setup load-time data only for the main cache.
501 if (cache_type() == net::DISK_CACHE)
502 SetFieldTrialInfo(GetSizeGroup());
503
504 eviction_.Init(this);
505
506 // stats_ and rankings_ may end up calling back to us so we better be enabled.
507 disabled_ = false;
508 if (!stats_.Init(this, &data_->header.stats))
509 return net::ERR_FAILED;
510
511 disabled_ = !rankings_.Init(this, new_eviction_);
512
513 return disabled_ ? net::ERR_FAILED : net::OK;
514 }
515
CleanupCache()516 void BackendImpl::CleanupCache() {
517 Trace("Backend Cleanup");
518 eviction_.Stop();
519 timer_.Stop();
520
521 if (init_) {
522 stats_.Store();
523 if (data_)
524 data_->header.crash = 0;
525
526 File::WaitForPendingIO(&num_pending_io_);
527 if (user_flags_ & kNoRandom) {
528 // This is a net_unittest, verify that we are not 'leaking' entries.
529 DCHECK(!num_refs_);
530 }
531 }
532 block_files_.CloseFiles();
533 factory_.RevokeAll();
534 ptr_factory_.InvalidateWeakPtrs();
535 done_.Signal();
536 }
537
538 // ------------------------------------------------------------------------
539
OpenPrevEntry(void ** iter,Entry ** prev_entry,CompletionCallback * callback)540 int BackendImpl::OpenPrevEntry(void** iter, Entry** prev_entry,
541 CompletionCallback* callback) {
542 DCHECK(callback);
543 background_queue_.OpenPrevEntry(iter, prev_entry, callback);
544 return net::ERR_IO_PENDING;
545 }
546
SyncOpenEntry(const std::string & key,Entry ** entry)547 int BackendImpl::SyncOpenEntry(const std::string& key, Entry** entry) {
548 DCHECK(entry);
549 *entry = OpenEntryImpl(key);
550 return (*entry) ? net::OK : net::ERR_FAILED;
551 }
552
SyncCreateEntry(const std::string & key,Entry ** entry)553 int BackendImpl::SyncCreateEntry(const std::string& key, Entry** entry) {
554 DCHECK(entry);
555 *entry = CreateEntryImpl(key);
556 return (*entry) ? net::OK : net::ERR_FAILED;
557 }
558
SyncDoomEntry(const std::string & key)559 int BackendImpl::SyncDoomEntry(const std::string& key) {
560 if (disabled_)
561 return net::ERR_FAILED;
562
563 EntryImpl* entry = OpenEntryImpl(key);
564 if (!entry)
565 return net::ERR_FAILED;
566
567 entry->DoomImpl();
568 entry->Release();
569 return net::OK;
570 }
571
SyncDoomAllEntries()572 int BackendImpl::SyncDoomAllEntries() {
573 // This is not really an error, but it is an interesting condition.
574 ReportError(ERR_CACHE_DOOMED);
575 stats_.OnEvent(Stats::DOOM_CACHE);
576 if (!num_refs_) {
577 RestartCache(false);
578 return disabled_ ? net::ERR_FAILED : net::OK;
579 } else {
580 if (disabled_)
581 return net::ERR_FAILED;
582
583 eviction_.TrimCache(true);
584 return net::OK;
585 }
586 }
587
SyncDoomEntriesBetween(const base::Time initial_time,const base::Time end_time)588 int BackendImpl::SyncDoomEntriesBetween(const base::Time initial_time,
589 const base::Time end_time) {
590 DCHECK_NE(net::APP_CACHE, cache_type_);
591 if (end_time.is_null())
592 return SyncDoomEntriesSince(initial_time);
593
594 DCHECK(end_time >= initial_time);
595
596 if (disabled_)
597 return net::ERR_FAILED;
598
599 EntryImpl* node;
600 void* iter = NULL;
601 EntryImpl* next = OpenNextEntryImpl(&iter);
602 if (!next)
603 return net::OK;
604
605 while (next) {
606 node = next;
607 next = OpenNextEntryImpl(&iter);
608
609 if (node->GetLastUsed() >= initial_time &&
610 node->GetLastUsed() < end_time) {
611 node->DoomImpl();
612 } else if (node->GetLastUsed() < initial_time) {
613 if (next)
614 next->Release();
615 next = NULL;
616 SyncEndEnumeration(iter);
617 }
618
619 node->Release();
620 }
621
622 return net::OK;
623 }
624
625 // We use OpenNextEntryImpl to retrieve elements from the cache, until we get
626 // entries that are too old.
SyncDoomEntriesSince(const base::Time initial_time)627 int BackendImpl::SyncDoomEntriesSince(const base::Time initial_time) {
628 DCHECK_NE(net::APP_CACHE, cache_type_);
629 if (disabled_)
630 return net::ERR_FAILED;
631
632 stats_.OnEvent(Stats::DOOM_RECENT);
633 for (;;) {
634 void* iter = NULL;
635 EntryImpl* entry = OpenNextEntryImpl(&iter);
636 if (!entry)
637 return net::OK;
638
639 if (initial_time > entry->GetLastUsed()) {
640 entry->Release();
641 SyncEndEnumeration(iter);
642 return net::OK;
643 }
644
645 entry->DoomImpl();
646 entry->Release();
647 SyncEndEnumeration(iter); // Dooming the entry invalidates the iterator.
648 }
649 }
650
SyncOpenNextEntry(void ** iter,Entry ** next_entry)651 int BackendImpl::SyncOpenNextEntry(void** iter, Entry** next_entry) {
652 *next_entry = OpenNextEntryImpl(iter);
653 return (*next_entry) ? net::OK : net::ERR_FAILED;
654 }
655
SyncOpenPrevEntry(void ** iter,Entry ** prev_entry)656 int BackendImpl::SyncOpenPrevEntry(void** iter, Entry** prev_entry) {
657 *prev_entry = OpenPrevEntryImpl(iter);
658 return (*prev_entry) ? net::OK : net::ERR_FAILED;
659 }
660
SyncEndEnumeration(void * iter)661 void BackendImpl::SyncEndEnumeration(void* iter) {
662 scoped_ptr<Rankings::Iterator> iterator(
663 reinterpret_cast<Rankings::Iterator*>(iter));
664 }
665
OpenEntryImpl(const std::string & key)666 EntryImpl* BackendImpl::OpenEntryImpl(const std::string& key) {
667 if (disabled_)
668 return NULL;
669
670 TimeTicks start = TimeTicks::Now();
671 uint32 hash = Hash(key);
672 Trace("Open hash 0x%x", hash);
673
674 bool error;
675 EntryImpl* cache_entry = MatchEntry(key, hash, false, Addr(), &error);
676 if (!cache_entry) {
677 stats_.OnEvent(Stats::OPEN_MISS);
678 return NULL;
679 }
680
681 if (ENTRY_NORMAL != cache_entry->entry()->Data()->state) {
682 // The entry was already evicted.
683 cache_entry->Release();
684 stats_.OnEvent(Stats::OPEN_MISS);
685 return NULL;
686 }
687
688 eviction_.OnOpenEntry(cache_entry);
689 entry_count_++;
690
691 CACHE_UMA(AGE_MS, "OpenTime", GetSizeGroup(), start);
692 stats_.OnEvent(Stats::OPEN_HIT);
693 SIMPLE_STATS_COUNTER("disk_cache.hit");
694 return cache_entry;
695 }
696
CreateEntryImpl(const std::string & key)697 EntryImpl* BackendImpl::CreateEntryImpl(const std::string& key) {
698 if (disabled_ || key.empty())
699 return NULL;
700
701 TimeTicks start = TimeTicks::Now();
702 uint32 hash = Hash(key);
703 Trace("Create hash 0x%x", hash);
704
705 scoped_refptr<EntryImpl> parent;
706 Addr entry_address(data_->table[hash & mask_]);
707 if (entry_address.is_initialized()) {
708 // We have an entry already. It could be the one we are looking for, or just
709 // a hash conflict.
710 bool error;
711 EntryImpl* old_entry = MatchEntry(key, hash, false, Addr(), &error);
712 if (old_entry)
713 return ResurrectEntry(old_entry);
714
715 EntryImpl* parent_entry = MatchEntry(key, hash, true, Addr(), &error);
716 DCHECK(!error);
717 if (parent_entry) {
718 parent.swap(&parent_entry);
719 } else if (data_->table[hash & mask_]) {
720 // We should have corrected the problem.
721 NOTREACHED();
722 return NULL;
723 }
724 }
725
726 // The general flow is to allocate disk space and initialize the entry data,
727 // followed by saving that to disk, then linking the entry though the index
728 // and finally through the lists. If there is a crash in this process, we may
729 // end up with:
730 // a. Used, unreferenced empty blocks on disk (basically just garbage).
731 // b. Used, unreferenced but meaningful data on disk (more garbage).
732 // c. A fully formed entry, reachable only through the index.
733 // d. A fully formed entry, also reachable through the lists, but still dirty.
734 //
735 // Anything after (b) can be automatically cleaned up. We may consider saving
736 // the current operation (as we do while manipulating the lists) so that we
737 // can detect and cleanup (a) and (b).
738
739 int num_blocks = EntryImpl::NumBlocksForEntry(key.size());
740 if (!block_files_.CreateBlock(BLOCK_256, num_blocks, &entry_address)) {
741 LOG(ERROR) << "Create entry failed " << key.c_str();
742 stats_.OnEvent(Stats::CREATE_ERROR);
743 return NULL;
744 }
745
746 Addr node_address(0);
747 if (!block_files_.CreateBlock(RANKINGS, 1, &node_address)) {
748 block_files_.DeleteBlock(entry_address, false);
749 LOG(ERROR) << "Create entry failed " << key.c_str();
750 stats_.OnEvent(Stats::CREATE_ERROR);
751 return NULL;
752 }
753
754 scoped_refptr<EntryImpl> cache_entry(
755 new EntryImpl(this, entry_address, false));
756 IncreaseNumRefs();
757
758 if (!cache_entry->CreateEntry(node_address, key, hash)) {
759 block_files_.DeleteBlock(entry_address, false);
760 block_files_.DeleteBlock(node_address, false);
761 LOG(ERROR) << "Create entry failed " << key.c_str();
762 stats_.OnEvent(Stats::CREATE_ERROR);
763 return NULL;
764 }
765
766 cache_entry->BeginLogging(net_log_, true);
767
768 // We are not failing the operation; let's add this to the map.
769 open_entries_[entry_address.value()] = cache_entry;
770
771 // Save the entry.
772 block_files_.GetFile(entry_address)->Store(cache_entry->entry());
773 block_files_.GetFile(node_address)->Store(cache_entry->rankings());
774 IncreaseNumEntries();
775 entry_count_++;
776
777 // Link this entry through the index.
778 if (parent.get()) {
779 parent->SetNextAddress(entry_address);
780 } else {
781 data_->table[hash & mask_] = entry_address.value();
782 }
783
784 // Link this entry through the lists.
785 eviction_.OnCreateEntry(cache_entry);
786
787 CACHE_UMA(AGE_MS, "CreateTime", GetSizeGroup(), start);
788 stats_.OnEvent(Stats::CREATE_HIT);
789 SIMPLE_STATS_COUNTER("disk_cache.miss");
790 Trace("create entry hit ");
791 return cache_entry.release();
792 }
793
OpenNextEntryImpl(void ** iter)794 EntryImpl* BackendImpl::OpenNextEntryImpl(void** iter) {
795 return OpenFollowingEntry(true, iter);
796 }
797
OpenPrevEntryImpl(void ** iter)798 EntryImpl* BackendImpl::OpenPrevEntryImpl(void** iter) {
799 return OpenFollowingEntry(false, iter);
800 }
801
SetMaxSize(int max_bytes)802 bool BackendImpl::SetMaxSize(int max_bytes) {
803 COMPILE_ASSERT(sizeof(max_bytes) == sizeof(max_size_), unsupported_int_model);
804 if (max_bytes < 0)
805 return false;
806
807 // Zero size means use the default.
808 if (!max_bytes)
809 return true;
810
811 // Avoid a DCHECK later on.
812 if (max_bytes >= kint32max - kint32max / 10)
813 max_bytes = kint32max - kint32max / 10 - 1;
814
815 user_flags_ |= kMaxSize;
816 max_size_ = max_bytes;
817 return true;
818 }
819
SetType(net::CacheType type)820 void BackendImpl::SetType(net::CacheType type) {
821 DCHECK(type != net::MEMORY_CACHE);
822 cache_type_ = type;
823 }
824
GetFileName(Addr address) const825 FilePath BackendImpl::GetFileName(Addr address) const {
826 if (!address.is_separate_file() || !address.is_initialized()) {
827 NOTREACHED();
828 return FilePath();
829 }
830
831 std::string tmp = base::StringPrintf("f_%06x", address.FileNumber());
832 return path_.AppendASCII(tmp);
833 }
834
File(Addr address)835 MappedFile* BackendImpl::File(Addr address) {
836 if (disabled_)
837 return NULL;
838 return block_files_.GetFile(address);
839 }
840
CreateExternalFile(Addr * address)841 bool BackendImpl::CreateExternalFile(Addr* address) {
842 int file_number = data_->header.last_file + 1;
843 Addr file_address(0);
844 bool success = false;
845 for (int i = 0; i < 0x0fffffff; i++, file_number++) {
846 if (!file_address.SetFileNumber(file_number)) {
847 file_number = 1;
848 continue;
849 }
850 FilePath name = GetFileName(file_address);
851 int flags = base::PLATFORM_FILE_READ |
852 base::PLATFORM_FILE_WRITE |
853 base::PLATFORM_FILE_CREATE |
854 base::PLATFORM_FILE_EXCLUSIVE_WRITE;
855 base::PlatformFileError error;
856 scoped_refptr<disk_cache::File> file(new disk_cache::File(
857 base::CreatePlatformFile(name, flags, NULL, &error)));
858 if (!file->IsValid()) {
859 if (error != base::PLATFORM_FILE_ERROR_EXISTS)
860 return false;
861 continue;
862 }
863
864 success = true;
865 break;
866 }
867
868 DCHECK(success);
869 if (!success)
870 return false;
871
872 data_->header.last_file = file_number;
873 address->set_value(file_address.value());
874 return true;
875 }
876
CreateBlock(FileType block_type,int block_count,Addr * block_address)877 bool BackendImpl::CreateBlock(FileType block_type, int block_count,
878 Addr* block_address) {
879 return block_files_.CreateBlock(block_type, block_count, block_address);
880 }
881
DeleteBlock(Addr block_address,bool deep)882 void BackendImpl::DeleteBlock(Addr block_address, bool deep) {
883 block_files_.DeleteBlock(block_address, deep);
884 }
885
GetLruData()886 LruData* BackendImpl::GetLruData() {
887 return &data_->header.lru;
888 }
889
UpdateRank(EntryImpl * entry,bool modified)890 void BackendImpl::UpdateRank(EntryImpl* entry, bool modified) {
891 if (!read_only_) {
892 eviction_.UpdateRank(entry, modified);
893 }
894 }
895
RecoveredEntry(CacheRankingsBlock * rankings)896 void BackendImpl::RecoveredEntry(CacheRankingsBlock* rankings) {
897 Addr address(rankings->Data()->contents);
898 EntryImpl* cache_entry = NULL;
899 if (NewEntry(address, &cache_entry))
900 return;
901
902 uint32 hash = cache_entry->GetHash();
903 cache_entry->Release();
904
905 // Anything on the table means that this entry is there.
906 if (data_->table[hash & mask_])
907 return;
908
909 data_->table[hash & mask_] = address.value();
910 }
911
InternalDoomEntry(EntryImpl * entry)912 void BackendImpl::InternalDoomEntry(EntryImpl* entry) {
913 uint32 hash = entry->GetHash();
914 std::string key = entry->GetKey();
915 Addr entry_addr = entry->entry()->address();
916 bool error;
917 EntryImpl* parent_entry = MatchEntry(key, hash, true, entry_addr, &error);
918 CacheAddr child(entry->GetNextAddress());
919
920 Trace("Doom entry 0x%p", entry);
921
922 if (!entry->doomed()) {
923 // We may have doomed this entry from within MatchEntry.
924 eviction_.OnDoomEntry(entry);
925 entry->InternalDoom();
926 if (!new_eviction_) {
927 DecreaseNumEntries();
928 }
929 stats_.OnEvent(Stats::DOOM_ENTRY);
930 }
931
932 if (parent_entry) {
933 parent_entry->SetNextAddress(Addr(child));
934 parent_entry->Release();
935 } else if (!error) {
936 data_->table[hash & mask_] = child;
937 }
938 }
939
940 // An entry may be linked on the DELETED list for a while after being doomed.
941 // This function is called when we want to remove it.
RemoveEntry(EntryImpl * entry)942 void BackendImpl::RemoveEntry(EntryImpl* entry) {
943 if (!new_eviction_)
944 return;
945
946 DCHECK(ENTRY_NORMAL != entry->entry()->Data()->state);
947
948 Trace("Remove entry 0x%p", entry);
949 eviction_.OnDestroyEntry(entry);
950 DecreaseNumEntries();
951 }
952
OnEntryDestroyBegin(Addr address)953 void BackendImpl::OnEntryDestroyBegin(Addr address) {
954 EntriesMap::iterator it = open_entries_.find(address.value());
955 if (it != open_entries_.end())
956 open_entries_.erase(it);
957 }
958
OnEntryDestroyEnd()959 void BackendImpl::OnEntryDestroyEnd() {
960 DecreaseNumRefs();
961 if (data_->header.num_bytes > max_size_ && !read_only_)
962 eviction_.TrimCache(false);
963 }
964
GetOpenEntry(CacheRankingsBlock * rankings) const965 EntryImpl* BackendImpl::GetOpenEntry(CacheRankingsBlock* rankings) const {
966 DCHECK(rankings->HasData());
967 EntriesMap::const_iterator it =
968 open_entries_.find(rankings->Data()->contents);
969 if (it != open_entries_.end()) {
970 // We have this entry in memory.
971 return it->second;
972 }
973
974 return NULL;
975 }
976
GetCurrentEntryId() const977 int32 BackendImpl::GetCurrentEntryId() const {
978 return data_->header.this_id;
979 }
980
MaxFileSize() const981 int BackendImpl::MaxFileSize() const {
982 return max_size_ / 8;
983 }
984
ModifyStorageSize(int32 old_size,int32 new_size)985 void BackendImpl::ModifyStorageSize(int32 old_size, int32 new_size) {
986 if (disabled_ || old_size == new_size)
987 return;
988 if (old_size > new_size)
989 SubstractStorageSize(old_size - new_size);
990 else
991 AddStorageSize(new_size - old_size);
992
993 // Update the usage statistics.
994 stats_.ModifyStorageStats(old_size, new_size);
995 }
996
TooMuchStorageRequested(int32 size)997 void BackendImpl::TooMuchStorageRequested(int32 size) {
998 stats_.ModifyStorageStats(0, size);
999 }
1000
IsAllocAllowed(int current_size,int new_size)1001 bool BackendImpl::IsAllocAllowed(int current_size, int new_size) {
1002 DCHECK_GT(new_size, current_size);
1003 if (user_flags_ & kNoBuffering)
1004 return false;
1005
1006 int to_add = new_size - current_size;
1007 if (buffer_bytes_ + to_add > MaxBuffersSize())
1008 return false;
1009
1010 buffer_bytes_ += to_add;
1011 CACHE_UMA(COUNTS_50000, "BufferBytes", 0, buffer_bytes_ / 1024);
1012 return true;
1013 }
1014
BufferDeleted(int size)1015 void BackendImpl::BufferDeleted(int size) {
1016 buffer_bytes_ -= size;
1017 DCHECK_GE(size, 0);
1018 }
1019
IsLoaded() const1020 bool BackendImpl::IsLoaded() const {
1021 CACHE_UMA(COUNTS, "PendingIO", GetSizeGroup(), num_pending_io_);
1022 if (user_flags_ & kNoLoadProtection)
1023 return false;
1024
1025 return num_pending_io_ > 5;
1026 }
1027
HistogramName(const char * name,int experiment) const1028 std::string BackendImpl::HistogramName(const char* name, int experiment) const {
1029 if (!experiment)
1030 return base::StringPrintf("DiskCache.%d.%s", cache_type_, name);
1031 return base::StringPrintf("DiskCache.%d.%s_%d", cache_type_,
1032 name, experiment);
1033 }
1034
GetWeakPtr()1035 base::WeakPtr<BackendImpl> BackendImpl::GetWeakPtr() {
1036 return ptr_factory_.GetWeakPtr();
1037 }
1038
GetSizeGroup() const1039 int BackendImpl::GetSizeGroup() const {
1040 if (disabled_)
1041 return 0;
1042
1043 // We want to report times grouped by the current cache size (50 MB groups).
1044 int group = data_->header.num_bytes / (50 * 1024 * 1024);
1045 if (group > 6)
1046 group = 6; // Limit the number of groups, just in case.
1047 return group;
1048 }
1049
1050 // We want to remove biases from some histograms so we only send data once per
1051 // week.
ShouldReportAgain()1052 bool BackendImpl::ShouldReportAgain() {
1053 if (uma_report_)
1054 return uma_report_ == 2;
1055
1056 uma_report_++;
1057 int64 last_report = stats_.GetCounter(Stats::LAST_REPORT);
1058 Time last_time = Time::FromInternalValue(last_report);
1059 if (!last_report || (Time::Now() - last_time).InDays() >= 7) {
1060 stats_.SetCounter(Stats::LAST_REPORT, Time::Now().ToInternalValue());
1061 uma_report_++;
1062 return true;
1063 }
1064 return false;
1065 }
1066
FirstEviction()1067 void BackendImpl::FirstEviction() {
1068 DCHECK(data_->header.create_time);
1069 if (!GetEntryCount())
1070 return; // This is just for unit tests.
1071
1072 Time create_time = Time::FromInternalValue(data_->header.create_time);
1073 CACHE_UMA(AGE, "FillupAge", 0, create_time);
1074
1075 int64 use_time = stats_.GetCounter(Stats::TIMER);
1076 CACHE_UMA(HOURS, "FillupTime", 0, static_cast<int>(use_time / 120));
1077 CACHE_UMA(PERCENTAGE, "FirstHitRatio", 0, stats_.GetHitRatio());
1078
1079 if (!use_time)
1080 use_time = 1;
1081 CACHE_UMA(COUNTS_10000, "FirstEntryAccessRate", 0,
1082 static_cast<int>(data_->header.num_entries / use_time));
1083 CACHE_UMA(COUNTS, "FirstByteIORate", 0,
1084 static_cast<int>((data_->header.num_bytes / 1024) / use_time));
1085
1086 int avg_size = data_->header.num_bytes / GetEntryCount();
1087 CACHE_UMA(COUNTS, "FirstEntrySize", 0, avg_size);
1088
1089 int large_entries_bytes = stats_.GetLargeEntriesSize();
1090 int large_ratio = large_entries_bytes * 100 / data_->header.num_bytes;
1091 CACHE_UMA(PERCENTAGE, "FirstLargeEntriesRatio", 0, large_ratio);
1092
1093 if (new_eviction_) {
1094 CACHE_UMA(PERCENTAGE, "FirstResurrectRatio", 0, stats_.GetResurrectRatio());
1095 CACHE_UMA(PERCENTAGE, "FirstNoUseRatio", 0,
1096 data_->header.lru.sizes[0] * 100 / data_->header.num_entries);
1097 CACHE_UMA(PERCENTAGE, "FirstLowUseRatio", 0,
1098 data_->header.lru.sizes[1] * 100 / data_->header.num_entries);
1099 CACHE_UMA(PERCENTAGE, "FirstHighUseRatio", 0,
1100 data_->header.lru.sizes[2] * 100 / data_->header.num_entries);
1101 }
1102
1103 stats_.ResetRatios();
1104 }
1105
CriticalError(int error)1106 void BackendImpl::CriticalError(int error) {
1107 LOG(ERROR) << "Critical error found " << error;
1108 if (disabled_)
1109 return;
1110
1111 stats_.OnEvent(Stats::FATAL_ERROR);
1112 LogStats();
1113 ReportError(error);
1114
1115 // Setting the index table length to an invalid value will force re-creation
1116 // of the cache files.
1117 data_->header.table_len = 1;
1118 disabled_ = true;
1119
1120 if (!num_refs_)
1121 MessageLoop::current()->PostTask(FROM_HERE,
1122 factory_.NewRunnableMethod(&BackendImpl::RestartCache, true));
1123 }
1124
ReportError(int error)1125 void BackendImpl::ReportError(int error) {
1126 // We transmit positive numbers, instead of direct error codes.
1127 DCHECK_LE(error, 0);
1128 CACHE_UMA(CACHE_ERROR, "Error", 0, error * -1);
1129 }
1130
OnEvent(Stats::Counters an_event)1131 void BackendImpl::OnEvent(Stats::Counters an_event) {
1132 stats_.OnEvent(an_event);
1133 }
1134
OnRead(int32 bytes)1135 void BackendImpl::OnRead(int32 bytes) {
1136 DCHECK_GE(bytes, 0);
1137 byte_count_ += bytes;
1138 if (byte_count_ < 0)
1139 byte_count_ = kint32max;
1140 }
1141
OnWrite(int32 bytes)1142 void BackendImpl::OnWrite(int32 bytes) {
1143 // We use the same implementation as OnRead... just log the number of bytes.
1144 OnRead(bytes);
1145 }
1146
OnStatsTimer()1147 void BackendImpl::OnStatsTimer() {
1148 stats_.OnEvent(Stats::TIMER);
1149 int64 time = stats_.GetCounter(Stats::TIMER);
1150 int64 current = stats_.GetCounter(Stats::OPEN_ENTRIES);
1151
1152 // OPEN_ENTRIES is a sampled average of the number of open entries, avoiding
1153 // the bias towards 0.
1154 if (num_refs_ && (current != num_refs_)) {
1155 int64 diff = (num_refs_ - current) / 50;
1156 if (!diff)
1157 diff = num_refs_ > current ? 1 : -1;
1158 current = current + diff;
1159 stats_.SetCounter(Stats::OPEN_ENTRIES, current);
1160 stats_.SetCounter(Stats::MAX_ENTRIES, max_refs_);
1161 }
1162
1163 CACHE_UMA(COUNTS, "NumberOfReferences", 0, num_refs_);
1164
1165 CACHE_UMA(COUNTS_10000, "EntryAccessRate", 0, entry_count_);
1166 CACHE_UMA(COUNTS, "ByteIORate", 0, byte_count_ / 1024);
1167 entry_count_ = 0;
1168 byte_count_ = 0;
1169
1170 if (!data_)
1171 first_timer_ = false;
1172 if (first_timer_) {
1173 first_timer_ = false;
1174 if (ShouldReportAgain())
1175 ReportStats();
1176 }
1177
1178 // Save stats to disk at 5 min intervals.
1179 if (time % 10 == 0)
1180 stats_.Store();
1181 }
1182
IncrementIoCount()1183 void BackendImpl::IncrementIoCount() {
1184 num_pending_io_++;
1185 }
1186
DecrementIoCount()1187 void BackendImpl::DecrementIoCount() {
1188 num_pending_io_--;
1189 }
1190
SetUnitTestMode()1191 void BackendImpl::SetUnitTestMode() {
1192 user_flags_ |= kUnitTestMode;
1193 unit_test_ = true;
1194 }
1195
SetUpgradeMode()1196 void BackendImpl::SetUpgradeMode() {
1197 user_flags_ |= kUpgradeMode;
1198 read_only_ = true;
1199 }
1200
SetNewEviction()1201 void BackendImpl::SetNewEviction() {
1202 user_flags_ |= kNewEviction;
1203 new_eviction_ = true;
1204 }
1205
SetFlags(uint32 flags)1206 void BackendImpl::SetFlags(uint32 flags) {
1207 user_flags_ |= flags;
1208 }
1209
ClearRefCountForTest()1210 void BackendImpl::ClearRefCountForTest() {
1211 num_refs_ = 0;
1212 }
1213
FlushQueueForTest(CompletionCallback * callback)1214 int BackendImpl::FlushQueueForTest(CompletionCallback* callback) {
1215 background_queue_.FlushQueue(callback);
1216 return net::ERR_IO_PENDING;
1217 }
1218
RunTaskForTest(Task * task,CompletionCallback * callback)1219 int BackendImpl::RunTaskForTest(Task* task, CompletionCallback* callback) {
1220 background_queue_.RunTask(task, callback);
1221 return net::ERR_IO_PENDING;
1222 }
1223
TrimForTest(bool empty)1224 void BackendImpl::TrimForTest(bool empty) {
1225 eviction_.SetTestMode();
1226 eviction_.TrimCache(empty);
1227 }
1228
TrimDeletedListForTest(bool empty)1229 void BackendImpl::TrimDeletedListForTest(bool empty) {
1230 eviction_.SetTestMode();
1231 eviction_.TrimDeletedList(empty);
1232 }
1233
SelfCheck()1234 int BackendImpl::SelfCheck() {
1235 if (!init_) {
1236 LOG(ERROR) << "Init failed";
1237 return ERR_INIT_FAILED;
1238 }
1239
1240 int num_entries = rankings_.SelfCheck();
1241 if (num_entries < 0) {
1242 LOG(ERROR) << "Invalid rankings list, error " << num_entries;
1243 return num_entries;
1244 }
1245
1246 if (num_entries != data_->header.num_entries) {
1247 LOG(ERROR) << "Number of entries mismatch";
1248 return ERR_NUM_ENTRIES_MISMATCH;
1249 }
1250
1251 return CheckAllEntries();
1252 }
1253
1254 // ------------------------------------------------------------------------
1255
GetEntryCount() const1256 int32 BackendImpl::GetEntryCount() const {
1257 if (!index_ || disabled_)
1258 return 0;
1259 // num_entries includes entries already evicted.
1260 int32 not_deleted = data_->header.num_entries -
1261 data_->header.lru.sizes[Rankings::DELETED];
1262
1263 if (not_deleted < 0) {
1264 NOTREACHED();
1265 not_deleted = 0;
1266 }
1267
1268 return not_deleted;
1269 }
1270
OpenEntry(const std::string & key,Entry ** entry,CompletionCallback * callback)1271 int BackendImpl::OpenEntry(const std::string& key, Entry** entry,
1272 CompletionCallback* callback) {
1273 DCHECK(callback);
1274 background_queue_.OpenEntry(key, entry, callback);
1275 return net::ERR_IO_PENDING;
1276 }
1277
CreateEntry(const std::string & key,Entry ** entry,CompletionCallback * callback)1278 int BackendImpl::CreateEntry(const std::string& key, Entry** entry,
1279 CompletionCallback* callback) {
1280 DCHECK(callback);
1281 background_queue_.CreateEntry(key, entry, callback);
1282 return net::ERR_IO_PENDING;
1283 }
1284
DoomEntry(const std::string & key,CompletionCallback * callback)1285 int BackendImpl::DoomEntry(const std::string& key,
1286 CompletionCallback* callback) {
1287 DCHECK(callback);
1288 background_queue_.DoomEntry(key, callback);
1289 return net::ERR_IO_PENDING;
1290 }
1291
DoomAllEntries(CompletionCallback * callback)1292 int BackendImpl::DoomAllEntries(CompletionCallback* callback) {
1293 DCHECK(callback);
1294 background_queue_.DoomAllEntries(callback);
1295 return net::ERR_IO_PENDING;
1296 }
1297
DoomEntriesBetween(const base::Time initial_time,const base::Time end_time,CompletionCallback * callback)1298 int BackendImpl::DoomEntriesBetween(const base::Time initial_time,
1299 const base::Time end_time,
1300 CompletionCallback* callback) {
1301 DCHECK(callback);
1302 background_queue_.DoomEntriesBetween(initial_time, end_time, callback);
1303 return net::ERR_IO_PENDING;
1304 }
1305
DoomEntriesSince(const base::Time initial_time,CompletionCallback * callback)1306 int BackendImpl::DoomEntriesSince(const base::Time initial_time,
1307 CompletionCallback* callback) {
1308 DCHECK(callback);
1309 background_queue_.DoomEntriesSince(initial_time, callback);
1310 return net::ERR_IO_PENDING;
1311 }
1312
OpenNextEntry(void ** iter,Entry ** next_entry,CompletionCallback * callback)1313 int BackendImpl::OpenNextEntry(void** iter, Entry** next_entry,
1314 CompletionCallback* callback) {
1315 DCHECK(callback);
1316 background_queue_.OpenNextEntry(iter, next_entry, callback);
1317 return net::ERR_IO_PENDING;
1318 }
1319
EndEnumeration(void ** iter)1320 void BackendImpl::EndEnumeration(void** iter) {
1321 background_queue_.EndEnumeration(*iter);
1322 *iter = NULL;
1323 }
1324
GetStats(StatsItems * stats)1325 void BackendImpl::GetStats(StatsItems* stats) {
1326 if (disabled_)
1327 return;
1328
1329 std::pair<std::string, std::string> item;
1330
1331 item.first = "Entries";
1332 item.second = base::StringPrintf("%d", data_->header.num_entries);
1333 stats->push_back(item);
1334
1335 item.first = "Pending IO";
1336 item.second = base::StringPrintf("%d", num_pending_io_);
1337 stats->push_back(item);
1338
1339 item.first = "Max size";
1340 item.second = base::StringPrintf("%d", max_size_);
1341 stats->push_back(item);
1342
1343 item.first = "Current size";
1344 item.second = base::StringPrintf("%d", data_->header.num_bytes);
1345 stats->push_back(item);
1346
1347 stats_.GetItems(stats);
1348 }
1349
1350 // ------------------------------------------------------------------------
1351
1352 // We just created a new file so we're going to write the header and set the
1353 // file length to include the hash table (zero filled).
CreateBackingStore(disk_cache::File * file)1354 bool BackendImpl::CreateBackingStore(disk_cache::File* file) {
1355 AdjustMaxCacheSize(0);
1356
1357 IndexHeader header;
1358 header.table_len = DesiredIndexTableLen(max_size_);
1359
1360 // We need file version 2.1 for the new eviction algorithm.
1361 if (new_eviction_)
1362 header.version = 0x20001;
1363
1364 header.create_time = Time::Now().ToInternalValue();
1365
1366 if (!file->Write(&header, sizeof(header), 0))
1367 return false;
1368
1369 return file->SetLength(GetIndexSize(header.table_len));
1370 }
1371
InitBackingStore(bool * file_created)1372 bool BackendImpl::InitBackingStore(bool* file_created) {
1373 file_util::CreateDirectory(path_);
1374
1375 FilePath index_name = path_.AppendASCII(kIndexName);
1376
1377 int flags = base::PLATFORM_FILE_READ |
1378 base::PLATFORM_FILE_WRITE |
1379 base::PLATFORM_FILE_OPEN_ALWAYS |
1380 base::PLATFORM_FILE_EXCLUSIVE_WRITE;
1381 scoped_refptr<disk_cache::File> file(new disk_cache::File(
1382 base::CreatePlatformFile(index_name, flags, file_created, NULL)));
1383
1384 if (!file->IsValid())
1385 return false;
1386
1387 bool ret = true;
1388 if (*file_created)
1389 ret = CreateBackingStore(file);
1390
1391 file = NULL;
1392 if (!ret)
1393 return false;
1394
1395 index_ = new MappedFile();
1396 data_ = reinterpret_cast<Index*>(index_->Init(index_name, 0));
1397 if (!data_) {
1398 LOG(ERROR) << "Unable to map Index file";
1399 return false;
1400 }
1401
1402 if (index_->GetLength() < sizeof(Index)) {
1403 // We verify this again on CheckIndex() but it's easier to make sure now
1404 // that the header is there.
1405 LOG(ERROR) << "Corrupt Index file";
1406 return false;
1407 }
1408
1409 return true;
1410 }
1411
1412 // The maximum cache size will be either set explicitly by the caller, or
1413 // calculated by this code.
AdjustMaxCacheSize(int table_len)1414 void BackendImpl::AdjustMaxCacheSize(int table_len) {
1415 if (max_size_)
1416 return;
1417
1418 // If table_len is provided, the index file exists.
1419 DCHECK(!table_len || data_->header.magic);
1420
1421 // The user is not setting the size, let's figure it out.
1422 #ifdef ANDROID
1423 int64 available = 10 * 1024 * 1024; // 10 MB
1424 #else
1425 int64 available = base::SysInfo::AmountOfFreeDiskSpace(path_);
1426 #endif
1427 if (available < 0) {
1428 max_size_ = kDefaultCacheSize;
1429 return;
1430 }
1431
1432 if (table_len)
1433 available += data_->header.num_bytes;
1434
1435 max_size_ = PreferedCacheSize(available);
1436
1437 // Let's not use more than the default size while we tune-up the performance
1438 // of bigger caches. TODO(rvargas): remove this limit.
1439 if (max_size_ > kDefaultCacheSize * 4)
1440 max_size_ = kDefaultCacheSize * 4;
1441
1442 if (!table_len)
1443 return;
1444
1445 // If we already have a table, adjust the size to it.
1446 int current_max_size = MaxStorageSizeForTable(table_len);
1447 if (max_size_ > current_max_size)
1448 max_size_= current_max_size;
1449 }
1450
RestartCache(bool failure)1451 void BackendImpl::RestartCache(bool failure) {
1452 int64 errors = stats_.GetCounter(Stats::FATAL_ERROR);
1453 int64 full_dooms = stats_.GetCounter(Stats::DOOM_CACHE);
1454 int64 partial_dooms = stats_.GetCounter(Stats::DOOM_RECENT);
1455 int64 last_report = stats_.GetCounter(Stats::LAST_REPORT);
1456
1457 PrepareForRestart();
1458 if (failure) {
1459 DCHECK(!num_refs_);
1460 DCHECK(!open_entries_.size());
1461 DelayedCacheCleanup(path_);
1462 } else {
1463 DeleteCache(path_, false);
1464 }
1465
1466 // Don't call Init() if directed by the unit test: we are simulating a failure
1467 // trying to re-enable the cache.
1468 if (unit_test_)
1469 init_ = true; // Let the destructor do proper cleanup.
1470 else if (SyncInit() == net::OK) {
1471 stats_.SetCounter(Stats::FATAL_ERROR, errors);
1472 stats_.SetCounter(Stats::DOOM_CACHE, full_dooms);
1473 stats_.SetCounter(Stats::DOOM_RECENT, partial_dooms);
1474 stats_.SetCounter(Stats::LAST_REPORT, last_report);
1475 }
1476 }
1477
PrepareForRestart()1478 void BackendImpl::PrepareForRestart() {
1479 // Reset the mask_ if it was not given by the user.
1480 if (!(user_flags_ & kMask))
1481 mask_ = 0;
1482
1483 if (!(user_flags_ & kNewEviction))
1484 new_eviction_ = false;
1485
1486 disabled_ = true;
1487 #ifdef ANDROID
1488 if (data_) {
1489 #endif
1490 data_->header.crash = 0;
1491 #ifdef ANDROID
1492 }
1493 #endif
1494 index_ = NULL;
1495 data_ = NULL;
1496 block_files_.CloseFiles();
1497 rankings_.Reset();
1498 init_ = false;
1499 restarted_ = true;
1500 }
1501
NewEntry(Addr address,EntryImpl ** entry)1502 int BackendImpl::NewEntry(Addr address, EntryImpl** entry) {
1503 EntriesMap::iterator it = open_entries_.find(address.value());
1504 if (it != open_entries_.end()) {
1505 // Easy job. This entry is already in memory.
1506 EntryImpl* this_entry = it->second;
1507 this_entry->AddRef();
1508 *entry = this_entry;
1509 return 0;
1510 }
1511
1512 scoped_refptr<EntryImpl> cache_entry(
1513 new EntryImpl(this, address, read_only_));
1514 IncreaseNumRefs();
1515 *entry = NULL;
1516
1517 if (!address.is_initialized() || address.is_separate_file() ||
1518 address.file_type() != BLOCK_256) {
1519 LOG(WARNING) << "Wrong entry address.";
1520 return ERR_INVALID_ADDRESS;
1521 }
1522
1523 TimeTicks start = TimeTicks::Now();
1524 if (!cache_entry->entry()->Load())
1525 return ERR_READ_FAILURE;
1526
1527 if (IsLoaded()) {
1528 CACHE_UMA(AGE_MS, "LoadTime", GetSizeGroup(), start);
1529 }
1530
1531 if (!cache_entry->SanityCheck()) {
1532 LOG(WARNING) << "Messed up entry found.";
1533 return ERR_INVALID_ENTRY;
1534 }
1535
1536 if (!cache_entry->LoadNodeAddress())
1537 return ERR_READ_FAILURE;
1538
1539 // Prevent overwriting the dirty flag on the destructor.
1540 cache_entry->SetDirtyFlag(GetCurrentEntryId());
1541
1542 if (!rankings_.SanityCheck(cache_entry->rankings(), false)) {
1543 cache_entry->SetDirtyFlag(0);
1544 // Don't remove this from the list (it is not linked properly). Instead,
1545 // break the link back to the entry because it is going away, and leave the
1546 // rankings node to be deleted if we find it through a list.
1547 rankings_.SetContents(cache_entry->rankings(), 0);
1548 } else if (!rankings_.DataSanityCheck(cache_entry->rankings(), false)) {
1549 cache_entry->SetDirtyFlag(0);
1550 rankings_.SetContents(cache_entry->rankings(), address.value());
1551 }
1552
1553 if (!cache_entry->DataSanityCheck()) {
1554 LOG(WARNING) << "Messed up entry found.";
1555 cache_entry->SetDirtyFlag(0);
1556 cache_entry->FixForDelete();
1557 }
1558
1559 if (cache_entry->dirty()) {
1560 Trace("Dirty entry 0x%p 0x%x", reinterpret_cast<void*>(cache_entry.get()),
1561 address.value());
1562 }
1563
1564 open_entries_[address.value()] = cache_entry;
1565
1566 cache_entry->BeginLogging(net_log_, false);
1567 cache_entry.swap(entry);
1568 return 0;
1569 }
1570
MatchEntry(const std::string & key,uint32 hash,bool find_parent,Addr entry_addr,bool * match_error)1571 EntryImpl* BackendImpl::MatchEntry(const std::string& key, uint32 hash,
1572 bool find_parent, Addr entry_addr,
1573 bool* match_error) {
1574 Addr address(data_->table[hash & mask_]);
1575 scoped_refptr<EntryImpl> cache_entry, parent_entry;
1576 EntryImpl* tmp = NULL;
1577 bool found = false;
1578 std::set<CacheAddr> visited;
1579 *match_error = false;
1580
1581 for (;;) {
1582 if (disabled_)
1583 break;
1584
1585 if (visited.find(address.value()) != visited.end()) {
1586 // It's possible for a buggy version of the code to write a loop. Just
1587 // break it.
1588 Trace("Hash collision loop 0x%x", address.value());
1589 address.set_value(0);
1590 parent_entry->SetNextAddress(address);
1591 }
1592 visited.insert(address.value());
1593
1594 if (!address.is_initialized()) {
1595 if (find_parent)
1596 found = true;
1597 break;
1598 }
1599
1600 int error = NewEntry(address, &tmp);
1601 cache_entry.swap(&tmp);
1602
1603 if (error || cache_entry->dirty()) {
1604 // This entry is dirty on disk (it was not properly closed): we cannot
1605 // trust it.
1606 Addr child(0);
1607 if (!error)
1608 child.set_value(cache_entry->GetNextAddress());
1609
1610 if (parent_entry) {
1611 parent_entry->SetNextAddress(child);
1612 parent_entry = NULL;
1613 } else {
1614 data_->table[hash & mask_] = child.value();
1615 }
1616
1617 Trace("MatchEntry dirty %d 0x%x 0x%x", find_parent, entry_addr.value(),
1618 address.value());
1619
1620 if (!error) {
1621 // It is important to call DestroyInvalidEntry after removing this
1622 // entry from the table.
1623 DestroyInvalidEntry(cache_entry);
1624 cache_entry = NULL;
1625 } else {
1626 Trace("NewEntry failed on MatchEntry 0x%x", address.value());
1627 }
1628
1629 // Restart the search.
1630 address.set_value(data_->table[hash & mask_]);
1631 visited.clear();
1632 continue;
1633 }
1634
1635 DCHECK_EQ(hash & mask_, cache_entry->entry()->Data()->hash & mask_);
1636 if (cache_entry->IsSameEntry(key, hash)) {
1637 if (!cache_entry->Update())
1638 cache_entry = NULL;
1639 found = true;
1640 if (find_parent && entry_addr.value() != address.value()) {
1641 Trace("Entry not on the index 0x%x", address.value());
1642 *match_error = true;
1643 parent_entry = NULL;
1644 }
1645 break;
1646 }
1647 if (!cache_entry->Update())
1648 cache_entry = NULL;
1649 parent_entry = cache_entry;
1650 cache_entry = NULL;
1651 if (!parent_entry)
1652 break;
1653
1654 address.set_value(parent_entry->GetNextAddress());
1655 }
1656
1657 if (parent_entry && (!find_parent || !found))
1658 parent_entry = NULL;
1659
1660 if (find_parent && entry_addr.is_initialized() && !cache_entry) {
1661 *match_error = true;
1662 parent_entry = NULL;
1663 }
1664
1665 if (cache_entry && (find_parent || !found))
1666 cache_entry = NULL;
1667
1668 find_parent ? parent_entry.swap(&tmp) : cache_entry.swap(&tmp);
1669 return tmp;
1670 }
1671
1672 // This is the actual implementation for OpenNextEntry and OpenPrevEntry.
OpenFollowingEntry(bool forward,void ** iter)1673 EntryImpl* BackendImpl::OpenFollowingEntry(bool forward, void** iter) {
1674 if (disabled_)
1675 return NULL;
1676
1677 DCHECK(iter);
1678
1679 const int kListsToSearch = 3;
1680 scoped_refptr<EntryImpl> entries[kListsToSearch];
1681 scoped_ptr<Rankings::Iterator> iterator(
1682 reinterpret_cast<Rankings::Iterator*>(*iter));
1683 *iter = NULL;
1684
1685 if (!iterator.get()) {
1686 iterator.reset(new Rankings::Iterator(&rankings_));
1687 bool ret = false;
1688
1689 // Get an entry from each list.
1690 for (int i = 0; i < kListsToSearch; i++) {
1691 EntryImpl* temp = NULL;
1692 ret |= OpenFollowingEntryFromList(forward, static_cast<Rankings::List>(i),
1693 &iterator->nodes[i], &temp);
1694 entries[i].swap(&temp); // The entry was already addref'd.
1695 }
1696 if (!ret)
1697 return NULL;
1698 } else {
1699 // Get the next entry from the last list, and the actual entries for the
1700 // elements on the other lists.
1701 for (int i = 0; i < kListsToSearch; i++) {
1702 EntryImpl* temp = NULL;
1703 if (iterator->list == i) {
1704 OpenFollowingEntryFromList(forward, iterator->list,
1705 &iterator->nodes[i], &temp);
1706 } else {
1707 temp = GetEnumeratedEntry(iterator->nodes[i],
1708 static_cast<Rankings::List>(i));
1709 }
1710
1711 entries[i].swap(&temp); // The entry was already addref'd.
1712 }
1713 }
1714
1715 int newest = -1;
1716 int oldest = -1;
1717 Time access_times[kListsToSearch];
1718 for (int i = 0; i < kListsToSearch; i++) {
1719 if (entries[i].get()) {
1720 access_times[i] = entries[i]->GetLastUsed();
1721 if (newest < 0) {
1722 DCHECK_LT(oldest, 0);
1723 newest = oldest = i;
1724 continue;
1725 }
1726 if (access_times[i] > access_times[newest])
1727 newest = i;
1728 if (access_times[i] < access_times[oldest])
1729 oldest = i;
1730 }
1731 }
1732
1733 if (newest < 0 || oldest < 0)
1734 return NULL;
1735
1736 EntryImpl* next_entry;
1737 if (forward) {
1738 next_entry = entries[newest].release();
1739 iterator->list = static_cast<Rankings::List>(newest);
1740 } else {
1741 next_entry = entries[oldest].release();
1742 iterator->list = static_cast<Rankings::List>(oldest);
1743 }
1744
1745 *iter = iterator.release();
1746 return next_entry;
1747 }
1748
OpenFollowingEntryFromList(bool forward,Rankings::List list,CacheRankingsBlock ** from_entry,EntryImpl ** next_entry)1749 bool BackendImpl::OpenFollowingEntryFromList(bool forward, Rankings::List list,
1750 CacheRankingsBlock** from_entry,
1751 EntryImpl** next_entry) {
1752 if (disabled_)
1753 return false;
1754
1755 if (!new_eviction_ && Rankings::NO_USE != list)
1756 return false;
1757
1758 Rankings::ScopedRankingsBlock rankings(&rankings_, *from_entry);
1759 CacheRankingsBlock* next_block = forward ?
1760 rankings_.GetNext(rankings.get(), list) :
1761 rankings_.GetPrev(rankings.get(), list);
1762 Rankings::ScopedRankingsBlock next(&rankings_, next_block);
1763 *from_entry = NULL;
1764
1765 *next_entry = GetEnumeratedEntry(next.get(), list);
1766 if (!*next_entry)
1767 return false;
1768
1769 *from_entry = next.release();
1770 return true;
1771 }
1772
GetEnumeratedEntry(CacheRankingsBlock * next,Rankings::List list)1773 EntryImpl* BackendImpl::GetEnumeratedEntry(CacheRankingsBlock* next,
1774 Rankings::List list) {
1775 if (!next || disabled_)
1776 return NULL;
1777
1778 EntryImpl* entry;
1779 int rv = NewEntry(Addr(next->Data()->contents), &entry);
1780 if (rv) {
1781 rankings_.Remove(next, list, false);
1782 if (rv == ERR_INVALID_ADDRESS) {
1783 // There is nothing linked from the index. Delete the rankings node.
1784 DeleteBlock(next->address(), true);
1785 }
1786 return NULL;
1787 }
1788
1789 if (entry->dirty()) {
1790 // We cannot trust this entry.
1791 InternalDoomEntry(entry);
1792 entry->Release();
1793 return NULL;
1794 }
1795
1796 if (!entry->Update()) {
1797 entry->Release();
1798 return NULL;
1799 }
1800
1801 // Note that it is unfortunate (but possible) for this entry to be clean, but
1802 // not actually the real entry. In other words, we could have lost this entry
1803 // from the index, and it could have been replaced with a newer one. It's not
1804 // worth checking that this entry is "the real one", so we just return it and
1805 // let the enumeration continue; this entry will be evicted at some point, and
1806 // the regular path will work with the real entry. With time, this problem
1807 // will disasappear because this scenario is just a bug.
1808
1809 // Make sure that we save the key for later.
1810 entry->GetKey();
1811
1812 return entry;
1813 }
1814
ResurrectEntry(EntryImpl * deleted_entry)1815 EntryImpl* BackendImpl::ResurrectEntry(EntryImpl* deleted_entry) {
1816 if (ENTRY_NORMAL == deleted_entry->entry()->Data()->state) {
1817 deleted_entry->Release();
1818 stats_.OnEvent(Stats::CREATE_MISS);
1819 Trace("create entry miss ");
1820 return NULL;
1821 }
1822
1823 // We are attempting to create an entry and found out that the entry was
1824 // previously deleted.
1825
1826 eviction_.OnCreateEntry(deleted_entry);
1827 entry_count_++;
1828
1829 stats_.OnEvent(Stats::RESURRECT_HIT);
1830 Trace("Resurrect entry hit ");
1831 return deleted_entry;
1832 }
1833
DestroyInvalidEntry(EntryImpl * entry)1834 void BackendImpl::DestroyInvalidEntry(EntryImpl* entry) {
1835 LOG(WARNING) << "Destroying invalid entry.";
1836 Trace("Destroying invalid entry 0x%p", entry);
1837
1838 entry->SetPointerForInvalidEntry(GetCurrentEntryId());
1839
1840 eviction_.OnDoomEntry(entry);
1841 entry->InternalDoom();
1842
1843 if (!new_eviction_)
1844 DecreaseNumEntries();
1845 stats_.OnEvent(Stats::INVALID_ENTRY);
1846 }
1847
AddStorageSize(int32 bytes)1848 void BackendImpl::AddStorageSize(int32 bytes) {
1849 data_->header.num_bytes += bytes;
1850 DCHECK_GE(data_->header.num_bytes, 0);
1851 }
1852
SubstractStorageSize(int32 bytes)1853 void BackendImpl::SubstractStorageSize(int32 bytes) {
1854 data_->header.num_bytes -= bytes;
1855 DCHECK_GE(data_->header.num_bytes, 0);
1856 }
1857
IncreaseNumRefs()1858 void BackendImpl::IncreaseNumRefs() {
1859 num_refs_++;
1860 if (max_refs_ < num_refs_)
1861 max_refs_ = num_refs_;
1862 }
1863
DecreaseNumRefs()1864 void BackendImpl::DecreaseNumRefs() {
1865 DCHECK(num_refs_);
1866 num_refs_--;
1867
1868 if (!num_refs_ && disabled_)
1869 MessageLoop::current()->PostTask(FROM_HERE,
1870 factory_.NewRunnableMethod(&BackendImpl::RestartCache, true));
1871 }
1872
IncreaseNumEntries()1873 void BackendImpl::IncreaseNumEntries() {
1874 data_->header.num_entries++;
1875 DCHECK_GT(data_->header.num_entries, 0);
1876 }
1877
DecreaseNumEntries()1878 void BackendImpl::DecreaseNumEntries() {
1879 data_->header.num_entries--;
1880 if (data_->header.num_entries < 0) {
1881 NOTREACHED();
1882 data_->header.num_entries = 0;
1883 }
1884 }
1885
LogStats()1886 void BackendImpl::LogStats() {
1887 StatsItems stats;
1888 GetStats(&stats);
1889
1890 for (size_t index = 0; index < stats.size(); index++)
1891 VLOG(1) << stats[index].first << ": " << stats[index].second;
1892 }
1893
ReportStats()1894 void BackendImpl::ReportStats() {
1895 CACHE_UMA(COUNTS, "Entries", 0, data_->header.num_entries);
1896
1897 int current_size = data_->header.num_bytes / (1024 * 1024);
1898 int max_size = max_size_ / (1024 * 1024);
1899 CACHE_UMA(COUNTS_10000, "Size2", 0, current_size);
1900 CACHE_UMA(COUNTS_10000, "MaxSize2", 0, max_size);
1901 if (!max_size)
1902 max_size++;
1903 CACHE_UMA(PERCENTAGE, "UsedSpace", 0, current_size * 100 / max_size);
1904
1905 CACHE_UMA(COUNTS_10000, "AverageOpenEntries2", 0,
1906 static_cast<int>(stats_.GetCounter(Stats::OPEN_ENTRIES)));
1907 CACHE_UMA(COUNTS_10000, "MaxOpenEntries2", 0,
1908 static_cast<int>(stats_.GetCounter(Stats::MAX_ENTRIES)));
1909 stats_.SetCounter(Stats::MAX_ENTRIES, 0);
1910
1911 CACHE_UMA(COUNTS_10000, "TotalFatalErrors", 0,
1912 static_cast<int>(stats_.GetCounter(Stats::FATAL_ERROR)));
1913 CACHE_UMA(COUNTS_10000, "TotalDoomCache", 0,
1914 static_cast<int>(stats_.GetCounter(Stats::DOOM_CACHE)));
1915 CACHE_UMA(COUNTS_10000, "TotalDoomRecentEntries", 0,
1916 static_cast<int>(stats_.GetCounter(Stats::DOOM_RECENT)));
1917 stats_.SetCounter(Stats::FATAL_ERROR, 0);
1918 stats_.SetCounter(Stats::DOOM_CACHE, 0);
1919 stats_.SetCounter(Stats::DOOM_RECENT, 0);
1920
1921 int64 total_hours = stats_.GetCounter(Stats::TIMER) / 120;
1922 if (!data_->header.create_time || !data_->header.lru.filled) {
1923 int cause = data_->header.create_time ? 0 : 1;
1924 if (!data_->header.lru.filled)
1925 cause |= 2;
1926 CACHE_UMA(CACHE_ERROR, "ShortReport", 0, cause);
1927 CACHE_UMA(HOURS, "TotalTimeNotFull", 0, static_cast<int>(total_hours));
1928 return;
1929 }
1930
1931 // This is an up to date client that will report FirstEviction() data. After
1932 // that event, start reporting this:
1933
1934 CACHE_UMA(HOURS, "TotalTime", 0, static_cast<int>(total_hours));
1935
1936 int64 use_hours = stats_.GetCounter(Stats::LAST_REPORT_TIMER) / 120;
1937 stats_.SetCounter(Stats::LAST_REPORT_TIMER, stats_.GetCounter(Stats::TIMER));
1938
1939 // We may see users with no use_hours at this point if this is the first time
1940 // we are running this code.
1941 if (use_hours)
1942 use_hours = total_hours - use_hours;
1943
1944 if (!use_hours || !GetEntryCount() || !data_->header.num_bytes)
1945 return;
1946
1947 CACHE_UMA(HOURS, "UseTime", 0, static_cast<int>(use_hours));
1948 CACHE_UMA(PERCENTAGE, "HitRatio", data_->header.experiment,
1949 stats_.GetHitRatio());
1950
1951 int64 trim_rate = stats_.GetCounter(Stats::TRIM_ENTRY) / use_hours;
1952 CACHE_UMA(COUNTS, "TrimRate", 0, static_cast<int>(trim_rate));
1953
1954 int avg_size = data_->header.num_bytes / GetEntryCount();
1955 CACHE_UMA(COUNTS, "EntrySize", 0, avg_size);
1956 CACHE_UMA(COUNTS, "EntriesFull", 0, data_->header.num_entries);
1957
1958 CACHE_UMA(PERCENTAGE, "IndexLoad", 0,
1959 data_->header.num_entries * 100 / (mask_ + 1));
1960
1961 int large_entries_bytes = stats_.GetLargeEntriesSize();
1962 int large_ratio = large_entries_bytes * 100 / data_->header.num_bytes;
1963 CACHE_UMA(PERCENTAGE, "LargeEntriesRatio", 0, large_ratio);
1964
1965 if (new_eviction_) {
1966 CACHE_UMA(PERCENTAGE, "ResurrectRatio", data_->header.experiment,
1967 stats_.GetResurrectRatio());
1968 CACHE_UMA(PERCENTAGE, "NoUseRatio", 0,
1969 data_->header.lru.sizes[0] * 100 / data_->header.num_entries);
1970 CACHE_UMA(PERCENTAGE, "LowUseRatio", 0,
1971 data_->header.lru.sizes[1] * 100 / data_->header.num_entries);
1972 CACHE_UMA(PERCENTAGE, "HighUseRatio", 0,
1973 data_->header.lru.sizes[2] * 100 / data_->header.num_entries);
1974 CACHE_UMA(PERCENTAGE, "DeletedRatio", data_->header.experiment,
1975 data_->header.lru.sizes[4] * 100 / data_->header.num_entries);
1976 }
1977
1978 stats_.ResetRatios();
1979 stats_.SetCounter(Stats::TRIM_ENTRY, 0);
1980
1981 if (cache_type_ == net::DISK_CACHE)
1982 block_files_.ReportStats();
1983 }
1984
UpgradeTo2_1()1985 void BackendImpl::UpgradeTo2_1() {
1986 // 2.1 is basically the same as 2.0, except that new fields are actually
1987 // updated by the new eviction algorithm.
1988 DCHECK(0x20000 == data_->header.version);
1989 data_->header.version = 0x20001;
1990 data_->header.lru.sizes[Rankings::NO_USE] = data_->header.num_entries;
1991 }
1992
CheckIndex()1993 bool BackendImpl::CheckIndex() {
1994 DCHECK(data_);
1995
1996 size_t current_size = index_->GetLength();
1997 if (current_size < sizeof(Index)) {
1998 LOG(ERROR) << "Corrupt Index file";
1999 return false;
2000 }
2001
2002 if (new_eviction_) {
2003 // We support versions 2.0 and 2.1, upgrading 2.0 to 2.1.
2004 if (kIndexMagic != data_->header.magic ||
2005 kCurrentVersion >> 16 != data_->header.version >> 16) {
2006 LOG(ERROR) << "Invalid file version or magic";
2007 return false;
2008 }
2009 if (kCurrentVersion == data_->header.version) {
2010 // We need file version 2.1 for the new eviction algorithm.
2011 UpgradeTo2_1();
2012 }
2013 } else {
2014 if (kIndexMagic != data_->header.magic ||
2015 kCurrentVersion != data_->header.version) {
2016 LOG(ERROR) << "Invalid file version or magic";
2017 return false;
2018 }
2019 }
2020
2021 if (!data_->header.table_len) {
2022 LOG(ERROR) << "Invalid table size";
2023 return false;
2024 }
2025
2026 if (current_size < GetIndexSize(data_->header.table_len) ||
2027 data_->header.table_len & (kBaseTableLen - 1)) {
2028 LOG(ERROR) << "Corrupt Index file";
2029 return false;
2030 }
2031
2032 AdjustMaxCacheSize(data_->header.table_len);
2033
2034 if (data_->header.num_bytes < 0 ||
2035 (max_size_ < kint32max - kDefaultCacheSize &&
2036 data_->header.num_bytes > max_size_ + kDefaultCacheSize)) {
2037 LOG(ERROR) << "Invalid cache (current) size";
2038 return false;
2039 }
2040
2041 if (data_->header.num_entries < 0) {
2042 LOG(ERROR) << "Invalid number of entries";
2043 return false;
2044 }
2045
2046 if (!mask_)
2047 mask_ = data_->header.table_len - 1;
2048
2049 // Load the table into memory with a single read.
2050 scoped_array<char> buf(new char[current_size]);
2051 return index_->Read(buf.get(), current_size, 0);
2052 }
2053
CheckAllEntries()2054 int BackendImpl::CheckAllEntries() {
2055 int num_dirty = 0;
2056 int num_entries = 0;
2057 DCHECK(mask_ < kuint32max);
2058 for (int i = 0; i <= static_cast<int>(mask_); i++) {
2059 Addr address(data_->table[i]);
2060 if (!address.is_initialized())
2061 continue;
2062 for (;;) {
2063 EntryImpl* tmp;
2064 int ret = NewEntry(address, &tmp);
2065 if (ret)
2066 return ret;
2067 scoped_refptr<EntryImpl> cache_entry;
2068 cache_entry.swap(&tmp);
2069
2070 if (cache_entry->dirty())
2071 num_dirty++;
2072 else if (CheckEntry(cache_entry.get()))
2073 num_entries++;
2074 else
2075 return ERR_INVALID_ENTRY;
2076
2077 address.set_value(cache_entry->GetNextAddress());
2078 if (!address.is_initialized())
2079 break;
2080 }
2081 }
2082
2083 Trace("CheckAllEntries End");
2084 if (num_entries + num_dirty != data_->header.num_entries) {
2085 LOG(ERROR) << "Number of entries mismatch";
2086 return ERR_NUM_ENTRIES_MISMATCH;
2087 }
2088
2089 return num_dirty;
2090 }
2091
CheckEntry(EntryImpl * cache_entry)2092 bool BackendImpl::CheckEntry(EntryImpl* cache_entry) {
2093 bool ok = block_files_.IsValid(cache_entry->entry()->address());
2094 ok = ok && block_files_.IsValid(cache_entry->rankings()->address());
2095 EntryStore* data = cache_entry->entry()->Data();
2096 for (size_t i = 0; i < arraysize(data->data_addr); i++) {
2097 if (data->data_addr[i]) {
2098 Addr address(data->data_addr[i]);
2099 if (address.is_block_file())
2100 ok = ok && block_files_.IsValid(address);
2101 }
2102 }
2103
2104 RankingsNode* rankings = cache_entry->rankings()->Data();
2105 return ok && !rankings->dummy;
2106 }
2107
MaxBuffersSize()2108 int BackendImpl::MaxBuffersSize() {
2109 static int64 total_memory = base::SysInfo::AmountOfPhysicalMemory();
2110 static bool done = false;
2111
2112 if (!done) {
2113 const int kMaxBuffersSize = 30 * 1024 * 1024;
2114
2115 // We want to use up to 2% of the computer's memory.
2116 total_memory = total_memory * 2 / 100;
2117 if (total_memory > kMaxBuffersSize || total_memory <= 0)
2118 total_memory = kMaxBuffersSize;
2119
2120 done = true;
2121 }
2122
2123 return static_cast<int>(total_memory);
2124 }
2125
2126 } // namespace disk_cache
2127