1 // Copyright (c) 2010 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "net/disk_cache/backend_impl.h"
6
7 #include "base/file_path.h"
8 #include "base/file_util.h"
9 #include "base/message_loop.h"
10 #include "base/metrics/field_trial.h"
11 #include "base/metrics/histogram.h"
12 #include "base/metrics/stats_counters.h"
13 #include "base/rand_util.h"
14 #include "base/string_util.h"
15 #include "base/stringprintf.h"
16 #include "base/sys_info.h"
17 #include "base/threading/worker_pool.h"
18 #include "base/threading/thread_restrictions.h"
19 #include "base/time.h"
20 #include "base/timer.h"
21 #include "net/base/net_errors.h"
22 #include "net/disk_cache/cache_util.h"
23 #include "net/disk_cache/entry_impl.h"
24 #include "net/disk_cache/errors.h"
25 #include "net/disk_cache/experiments.h"
26 #include "net/disk_cache/file.h"
27 #include "net/disk_cache/hash.h"
28 #include "net/disk_cache/mem_backend_impl.h"
29
30 // This has to be defined before including histogram_macros.h from this file.
31 #define NET_DISK_CACHE_BACKEND_IMPL_CC_
32 #include "net/disk_cache/histogram_macros.h"
33
34 using base::Time;
35 using base::TimeDelta;
36 using base::TimeTicks;
37
38 namespace {
39
40 const char* kIndexName = "index";
41 const int kMaxOldFolders = 100;
42
43 // Seems like ~240 MB correspond to less than 50k entries for 99% of the people.
44 // Note that the actual target is to keep the index table load factor under 55%
45 // for most users.
46 const int k64kEntriesStore = 240 * 1000 * 1000;
47 const int kBaseTableLen = 64 * 1024;
48 const int kDefaultCacheSize = 80 * 1024 * 1024;
49
DesiredIndexTableLen(int32 storage_size)50 int DesiredIndexTableLen(int32 storage_size) {
51 if (storage_size <= k64kEntriesStore)
52 return kBaseTableLen;
53 if (storage_size <= k64kEntriesStore * 2)
54 return kBaseTableLen * 2;
55 if (storage_size <= k64kEntriesStore * 4)
56 return kBaseTableLen * 4;
57 if (storage_size <= k64kEntriesStore * 8)
58 return kBaseTableLen * 8;
59
60 // The biggest storage_size for int32 requires a 4 MB table.
61 return kBaseTableLen * 16;
62 }
63
MaxStorageSizeForTable(int table_len)64 int MaxStorageSizeForTable(int table_len) {
65 return table_len * (k64kEntriesStore / kBaseTableLen);
66 }
67
GetIndexSize(int table_len)68 size_t GetIndexSize(int table_len) {
69 size_t table_size = sizeof(disk_cache::CacheAddr) * table_len;
70 return sizeof(disk_cache::IndexHeader) + table_size;
71 }
72
73 // ------------------------------------------------------------------------
74
75 // Returns a fully qualified name from path and name, using a given name prefix
76 // and index number. For instance, if the arguments are "/foo", "bar" and 5, it
77 // will return "/foo/old_bar_005".
GetPrefixedName(const FilePath & path,const std::string & name,int index)78 FilePath GetPrefixedName(const FilePath& path, const std::string& name,
79 int index) {
80 std::string tmp = base::StringPrintf("%s%s_%03d", "old_",
81 name.c_str(), index);
82 return path.AppendASCII(tmp);
83 }
84
85 // This is a simple Task to cleanup old caches.
86 class CleanupTask : public Task {
87 public:
CleanupTask(const FilePath & path,const std::string & name)88 CleanupTask(const FilePath& path, const std::string& name)
89 : path_(path), name_(name) {}
90
91 virtual void Run();
92
93 private:
94 FilePath path_;
95 std::string name_;
96 DISALLOW_COPY_AND_ASSIGN(CleanupTask);
97 };
98
Run()99 void CleanupTask::Run() {
100 for (int i = 0; i < kMaxOldFolders; i++) {
101 FilePath to_delete = GetPrefixedName(path_, name_, i);
102 disk_cache::DeleteCache(to_delete, true);
103 }
104 }
105
106 // Returns a full path to rename the current cache, in order to delete it. path
107 // is the current folder location, and name is the current folder name.
GetTempCacheName(const FilePath & path,const std::string & name)108 FilePath GetTempCacheName(const FilePath& path, const std::string& name) {
109 // We'll attempt to have up to kMaxOldFolders folders for deletion.
110 for (int i = 0; i < kMaxOldFolders; i++) {
111 FilePath to_delete = GetPrefixedName(path, name, i);
112 if (!file_util::PathExists(to_delete))
113 return to_delete;
114 }
115 return FilePath();
116 }
117
118 // Moves the cache files to a new folder and creates a task to delete them.
DelayedCacheCleanup(const FilePath & full_path)119 bool DelayedCacheCleanup(const FilePath& full_path) {
120 // GetTempCacheName() and MoveCache() use synchronous file
121 // operations.
122 base::ThreadRestrictions::ScopedAllowIO allow_io;
123
124 FilePath current_path = full_path.StripTrailingSeparators();
125
126 FilePath path = current_path.DirName();
127 FilePath name = current_path.BaseName();
128 #if defined(OS_POSIX)
129 std::string name_str = name.value();
130 #elif defined(OS_WIN)
131 // We created this file so it should only contain ASCII.
132 std::string name_str = WideToASCII(name.value());
133 #endif
134
135 FilePath to_delete = GetTempCacheName(path, name_str);
136 if (to_delete.empty()) {
137 LOG(ERROR) << "Unable to get another cache folder";
138 return false;
139 }
140
141 if (!disk_cache::MoveCache(full_path, to_delete)) {
142 LOG(ERROR) << "Unable to move cache folder";
143 return false;
144 }
145
146 base::WorkerPool::PostTask(FROM_HERE, new CleanupTask(path, name_str), true);
147 return true;
148 }
149
150 // Sets group for the current experiment. Returns false if the files should be
151 // discarded.
InitExperiment(disk_cache::IndexHeader * header)152 bool InitExperiment(disk_cache::IndexHeader* header) {
153 if (header->experiment == disk_cache::EXPERIMENT_OLD_FILE1 ||
154 header->experiment == disk_cache::EXPERIMENT_OLD_FILE2) {
155 // Discard current cache.
156 return false;
157 }
158
159 // See if we already defined the group for this profile.
160 if (header->experiment >= disk_cache::EXPERIMENT_DELETED_LIST_OUT)
161 return true;
162
163 // The experiment is closed.
164 header->experiment = disk_cache::EXPERIMENT_DELETED_LIST_OUT;
165 return true;
166 }
167
168 // Initializes the field trial structures to allow performance measurements
169 // for the current cache configuration.
SetFieldTrialInfo(int size_group)170 void SetFieldTrialInfo(int size_group) {
171 static bool first = true;
172 if (!first)
173 return;
174
175 // Field trials involve static objects so we have to do this only once.
176 first = false;
177 std::string group1 = base::StringPrintf("CacheSizeGroup_%d", size_group);
178 int totalProbability = 10;
179 scoped_refptr<base::FieldTrial> trial1(
180 new base::FieldTrial("CacheSize", totalProbability, group1, 2011, 6, 30));
181 trial1->AppendGroup(group1, totalProbability);
182 }
183
184 // ------------------------------------------------------------------------
185
186 // This class takes care of building an instance of the backend.
187 class CacheCreator {
188 public:
CacheCreator(const FilePath & path,bool force,int max_bytes,net::CacheType type,uint32 flags,base::MessageLoopProxy * thread,net::NetLog * net_log,disk_cache::Backend ** backend,net::CompletionCallback * callback)189 CacheCreator(const FilePath& path, bool force, int max_bytes,
190 net::CacheType type, uint32 flags,
191 base::MessageLoopProxy* thread, net::NetLog* net_log,
192 disk_cache::Backend** backend,
193 net::CompletionCallback* callback)
194 : path_(path), force_(force), retry_(false), max_bytes_(max_bytes),
195 type_(type), flags_(flags), thread_(thread), backend_(backend),
196 callback_(callback), cache_(NULL), net_log_(net_log),
197 ALLOW_THIS_IN_INITIALIZER_LIST(
198 my_callback_(this, &CacheCreator::OnIOComplete)) {
199 }
~CacheCreator()200 ~CacheCreator() {}
201
202 // Creates the backend.
203 int Run();
204
205 // Callback implementation.
206 void OnIOComplete(int result);
207
208 private:
209 void DoCallback(int result);
210
211 const FilePath& path_;
212 bool force_;
213 bool retry_;
214 int max_bytes_;
215 net::CacheType type_;
216 uint32 flags_;
217 scoped_refptr<base::MessageLoopProxy> thread_;
218 disk_cache::Backend** backend_;
219 net::CompletionCallback* callback_;
220 disk_cache::BackendImpl* cache_;
221 net::NetLog* net_log_;
222 net::CompletionCallbackImpl<CacheCreator> my_callback_;
223
224 DISALLOW_COPY_AND_ASSIGN(CacheCreator);
225 };
226
Run()227 int CacheCreator::Run() {
228 cache_ = new disk_cache::BackendImpl(path_, thread_, net_log_);
229 cache_->SetMaxSize(max_bytes_);
230 cache_->SetType(type_);
231 cache_->SetFlags(flags_);
232 int rv = cache_->Init(&my_callback_);
233 DCHECK_EQ(net::ERR_IO_PENDING, rv);
234 return rv;
235 }
236
OnIOComplete(int result)237 void CacheCreator::OnIOComplete(int result) {
238 if (result == net::OK || !force_ || retry_)
239 return DoCallback(result);
240
241 // This is a failure and we are supposed to try again, so delete the object,
242 // delete all the files, and try again.
243 retry_ = true;
244 delete cache_;
245 cache_ = NULL;
246 if (!DelayedCacheCleanup(path_))
247 return DoCallback(result);
248
249 // The worker thread will start deleting files soon, but the original folder
250 // is not there anymore... let's create a new set of files.
251 int rv = Run();
252 DCHECK_EQ(net::ERR_IO_PENDING, rv);
253 }
254
DoCallback(int result)255 void CacheCreator::DoCallback(int result) {
256 DCHECK_NE(net::ERR_IO_PENDING, result);
257 if (result == net::OK) {
258 *backend_ = cache_;
259 } else {
260 LOG(ERROR) << "Unable to create cache";
261 *backend_ = NULL;
262 delete cache_;
263 }
264 callback_->Run(result);
265 delete this;
266 }
267
268 // ------------------------------------------------------------------------
269
270 // A task to perform final cleanup on the background thread.
271 class FinalCleanup : public Task {
272 public:
FinalCleanup(disk_cache::BackendImpl * backend)273 explicit FinalCleanup(disk_cache::BackendImpl* backend) : backend_(backend) {}
~FinalCleanup()274 ~FinalCleanup() {}
275
276 virtual void Run();
277 private:
278 disk_cache::BackendImpl* backend_;
279 DISALLOW_EVIL_CONSTRUCTORS(FinalCleanup);
280 };
281
Run()282 void FinalCleanup::Run() {
283 backend_->CleanupCache();
284 }
285
286 } // namespace
287
288 // ------------------------------------------------------------------------
289
290 namespace disk_cache {
291
CreateCacheBackend(net::CacheType type,const FilePath & path,int max_bytes,bool force,base::MessageLoopProxy * thread,net::NetLog * net_log,Backend ** backend,CompletionCallback * callback)292 int CreateCacheBackend(net::CacheType type, const FilePath& path, int max_bytes,
293 bool force, base::MessageLoopProxy* thread,
294 net::NetLog* net_log, Backend** backend,
295 CompletionCallback* callback) {
296 DCHECK(callback);
297 if (type == net::MEMORY_CACHE) {
298 *backend = MemBackendImpl::CreateBackend(max_bytes, net_log);
299 return *backend ? net::OK : net::ERR_FAILED;
300 }
301 DCHECK(thread);
302
303 return BackendImpl::CreateBackend(path, force, max_bytes, type, kNone, thread,
304 net_log, backend, callback);
305 }
306
307 // Returns the preferred maximum number of bytes for the cache given the
308 // number of available bytes.
PreferedCacheSize(int64 available)309 int PreferedCacheSize(int64 available) {
310 // Return 80% of the available space if there is not enough space to use
311 // kDefaultCacheSize.
312 if (available < kDefaultCacheSize * 10 / 8)
313 return static_cast<int32>(available * 8 / 10);
314
315 // Return kDefaultCacheSize if it uses 80% to 10% of the available space.
316 if (available < kDefaultCacheSize * 10)
317 return kDefaultCacheSize;
318
319 // Return 10% of the available space if the target size
320 // (2.5 * kDefaultCacheSize) is more than 10%.
321 if (available < static_cast<int64>(kDefaultCacheSize) * 25)
322 return static_cast<int32>(available / 10);
323
324 // Return the target size (2.5 * kDefaultCacheSize) if it uses 10% to 1%
325 // of the available space.
326 if (available < static_cast<int64>(kDefaultCacheSize) * 250)
327 return kDefaultCacheSize * 5 / 2;
328
329 // Return 1% of the available space if it does not exceed kint32max.
330 if (available < static_cast<int64>(kint32max) * 100)
331 return static_cast<int32>(available / 100);
332
333 return kint32max;
334 }
335
336 // ------------------------------------------------------------------------
337
BackendImpl(const FilePath & path,base::MessageLoopProxy * cache_thread,net::NetLog * net_log)338 BackendImpl::BackendImpl(const FilePath& path,
339 base::MessageLoopProxy* cache_thread,
340 net::NetLog* net_log)
341 : ALLOW_THIS_IN_INITIALIZER_LIST(background_queue_(this, cache_thread)),
342 path_(path),
343 block_files_(path),
344 mask_(0),
345 max_size_(0),
346 io_delay_(0),
347 cache_type_(net::DISK_CACHE),
348 uma_report_(0),
349 user_flags_(0),
350 init_(false),
351 restarted_(false),
352 unit_test_(false),
353 read_only_(false),
354 disabled_(false),
355 new_eviction_(false),
356 first_timer_(true),
357 net_log_(net_log),
358 done_(true, false),
359 ALLOW_THIS_IN_INITIALIZER_LIST(factory_(this)),
360 ALLOW_THIS_IN_INITIALIZER_LIST(ptr_factory_(this)) {
361 }
362
BackendImpl(const FilePath & path,uint32 mask,base::MessageLoopProxy * cache_thread,net::NetLog * net_log)363 BackendImpl::BackendImpl(const FilePath& path,
364 uint32 mask,
365 base::MessageLoopProxy* cache_thread,
366 net::NetLog* net_log)
367 : ALLOW_THIS_IN_INITIALIZER_LIST(background_queue_(this, cache_thread)),
368 path_(path),
369 block_files_(path),
370 mask_(mask),
371 max_size_(0),
372 io_delay_(0),
373 cache_type_(net::DISK_CACHE),
374 uma_report_(0),
375 user_flags_(kMask),
376 init_(false),
377 restarted_(false),
378 unit_test_(false),
379 read_only_(false),
380 disabled_(false),
381 new_eviction_(false),
382 first_timer_(true),
383 net_log_(net_log),
384 done_(true, false),
385 ALLOW_THIS_IN_INITIALIZER_LIST(factory_(this)),
386 ALLOW_THIS_IN_INITIALIZER_LIST(ptr_factory_(this)) {
387 }
388
~BackendImpl()389 BackendImpl::~BackendImpl() {
390 background_queue_.WaitForPendingIO();
391
392 if (background_queue_.BackgroundIsCurrentThread()) {
393 // Unit tests may use the same thread for everything.
394 CleanupCache();
395 } else {
396 background_queue_.background_thread()->PostTask(FROM_HERE,
397 new FinalCleanup(this));
398 done_.Wait();
399 }
400 }
401
402 // If the initialization of the cache fails, and force is true, we will discard
403 // the whole cache and create a new one. In order to process a potentially large
404 // number of files, we'll rename the cache folder to old_ + original_name +
405 // number, (located on the same parent folder), and spawn a worker thread to
406 // delete all the files on all the stale cache folders. The whole process can
407 // still fail if we are not able to rename the cache folder (for instance due to
408 // a sharing violation), and in that case a cache for this profile (on the
409 // desired path) cannot be created.
410 //
411 // Static.
CreateBackend(const FilePath & full_path,bool force,int max_bytes,net::CacheType type,uint32 flags,base::MessageLoopProxy * thread,net::NetLog * net_log,Backend ** backend,CompletionCallback * callback)412 int BackendImpl::CreateBackend(const FilePath& full_path, bool force,
413 int max_bytes, net::CacheType type,
414 uint32 flags, base::MessageLoopProxy* thread,
415 net::NetLog* net_log, Backend** backend,
416 CompletionCallback* callback) {
417 DCHECK(callback);
418 CacheCreator* creator = new CacheCreator(full_path, force, max_bytes, type,
419 flags, thread, net_log, backend,
420 callback);
421 // This object will self-destroy when finished.
422 return creator->Run();
423 }
424
Init(CompletionCallback * callback)425 int BackendImpl::Init(CompletionCallback* callback) {
426 background_queue_.Init(callback);
427 return net::ERR_IO_PENDING;
428 }
429
SyncInit()430 int BackendImpl::SyncInit() {
431 DCHECK(!init_);
432 if (init_)
433 return net::ERR_FAILED;
434
435 bool create_files = false;
436 if (!InitBackingStore(&create_files)) {
437 ReportError(ERR_STORAGE_ERROR);
438 return net::ERR_FAILED;
439 }
440
441 num_refs_ = num_pending_io_ = max_refs_ = 0;
442 entry_count_ = byte_count_ = 0;
443
444 if (!restarted_) {
445 buffer_bytes_ = 0;
446 trace_object_ = TraceObject::GetTraceObject();
447 // Create a recurrent timer of 30 secs.
448 int timer_delay = unit_test_ ? 1000 : 30000;
449 timer_.Start(TimeDelta::FromMilliseconds(timer_delay), this,
450 &BackendImpl::OnStatsTimer);
451 }
452
453 init_ = true;
454
455 if (data_->header.experiment != NO_EXPERIMENT &&
456 cache_type_ != net::DISK_CACHE) {
457 // No experiment for other caches.
458 return net::ERR_FAILED;
459 }
460
461 if (!(user_flags_ & disk_cache::kNoRandom)) {
462 // The unit test controls directly what to test.
463 new_eviction_ = (cache_type_ == net::DISK_CACHE);
464 }
465
466 if (!CheckIndex()) {
467 ReportError(ERR_INIT_FAILED);
468 return net::ERR_FAILED;
469 }
470
471 if (!(user_flags_ & disk_cache::kNoRandom) &&
472 cache_type_ == net::DISK_CACHE &&
473 !InitExperiment(&data_->header))
474 return net::ERR_FAILED;
475
476 // We don't care if the value overflows. The only thing we care about is that
477 // the id cannot be zero, because that value is used as "not dirty".
478 // Increasing the value once per second gives us many years before we start
479 // having collisions.
480 data_->header.this_id++;
481 if (!data_->header.this_id)
482 data_->header.this_id++;
483
484 if (data_->header.crash) {
485 ReportError(ERR_PREVIOUS_CRASH);
486 } else {
487 ReportError(0);
488 data_->header.crash = 1;
489 }
490
491 if (!block_files_.Init(create_files))
492 return net::ERR_FAILED;
493
494 // We want to minimize the changes to cache for an AppCache.
495 if (cache_type() == net::APP_CACHE) {
496 DCHECK(!new_eviction_);
497 read_only_ = true;
498 }
499
500 // Setup load-time data only for the main cache.
501 if (cache_type() == net::DISK_CACHE)
502 SetFieldTrialInfo(GetSizeGroup());
503
504 eviction_.Init(this);
505
506 // stats_ and rankings_ may end up calling back to us so we better be enabled.
507 disabled_ = false;
508 if (!stats_.Init(this, &data_->header.stats))
509 return net::ERR_FAILED;
510
511 disabled_ = !rankings_.Init(this, new_eviction_);
512
513 return disabled_ ? net::ERR_FAILED : net::OK;
514 }
515
CleanupCache()516 void BackendImpl::CleanupCache() {
517 Trace("Backend Cleanup");
518 eviction_.Stop();
519 timer_.Stop();
520
521 if (init_) {
522 stats_.Store();
523 if (data_)
524 data_->header.crash = 0;
525
526 File::WaitForPendingIO(&num_pending_io_);
527 if (user_flags_ & kNoRandom) {
528 // This is a net_unittest, verify that we are not 'leaking' entries.
529 DCHECK(!num_refs_);
530 }
531 }
532 block_files_.CloseFiles();
533 factory_.RevokeAll();
534 ptr_factory_.InvalidateWeakPtrs();
535 done_.Signal();
536 }
537
538 // ------------------------------------------------------------------------
539
OpenPrevEntry(void ** iter,Entry ** prev_entry,CompletionCallback * callback)540 int BackendImpl::OpenPrevEntry(void** iter, Entry** prev_entry,
541 CompletionCallback* callback) {
542 DCHECK(callback);
543 background_queue_.OpenPrevEntry(iter, prev_entry, callback);
544 return net::ERR_IO_PENDING;
545 }
546
SyncOpenEntry(const std::string & key,Entry ** entry)547 int BackendImpl::SyncOpenEntry(const std::string& key, Entry** entry) {
548 DCHECK(entry);
549 *entry = OpenEntryImpl(key);
550 return (*entry) ? net::OK : net::ERR_FAILED;
551 }
552
SyncCreateEntry(const std::string & key,Entry ** entry)553 int BackendImpl::SyncCreateEntry(const std::string& key, Entry** entry) {
554 DCHECK(entry);
555 *entry = CreateEntryImpl(key);
556 return (*entry) ? net::OK : net::ERR_FAILED;
557 }
558
SyncDoomEntry(const std::string & key)559 int BackendImpl::SyncDoomEntry(const std::string& key) {
560 if (disabled_)
561 return net::ERR_FAILED;
562
563 EntryImpl* entry = OpenEntryImpl(key);
564 if (!entry)
565 return net::ERR_FAILED;
566
567 entry->DoomImpl();
568 entry->Release();
569 return net::OK;
570 }
571
SyncDoomAllEntries()572 int BackendImpl::SyncDoomAllEntries() {
573 // This is not really an error, but it is an interesting condition.
574 ReportError(ERR_CACHE_DOOMED);
575 stats_.OnEvent(Stats::DOOM_CACHE);
576 if (!num_refs_) {
577 RestartCache(false);
578 return disabled_ ? net::ERR_FAILED : net::OK;
579 } else {
580 if (disabled_)
581 return net::ERR_FAILED;
582
583 eviction_.TrimCache(true);
584 return net::OK;
585 }
586 }
587
SyncDoomEntriesBetween(const base::Time initial_time,const base::Time end_time)588 int BackendImpl::SyncDoomEntriesBetween(const base::Time initial_time,
589 const base::Time end_time) {
590 DCHECK_NE(net::APP_CACHE, cache_type_);
591 if (end_time.is_null())
592 return SyncDoomEntriesSince(initial_time);
593
594 DCHECK(end_time >= initial_time);
595
596 if (disabled_)
597 return net::ERR_FAILED;
598
599 EntryImpl* node;
600 void* iter = NULL;
601 EntryImpl* next = OpenNextEntryImpl(&iter);
602 if (!next)
603 return net::OK;
604
605 while (next) {
606 node = next;
607 next = OpenNextEntryImpl(&iter);
608
609 if (node->GetLastUsed() >= initial_time &&
610 node->GetLastUsed() < end_time) {
611 node->DoomImpl();
612 } else if (node->GetLastUsed() < initial_time) {
613 if (next)
614 next->Release();
615 next = NULL;
616 SyncEndEnumeration(iter);
617 }
618
619 node->Release();
620 }
621
622 return net::OK;
623 }
624
625 // We use OpenNextEntryImpl to retrieve elements from the cache, until we get
626 // entries that are too old.
SyncDoomEntriesSince(const base::Time initial_time)627 int BackendImpl::SyncDoomEntriesSince(const base::Time initial_time) {
628 DCHECK_NE(net::APP_CACHE, cache_type_);
629 if (disabled_)
630 return net::ERR_FAILED;
631
632 stats_.OnEvent(Stats::DOOM_RECENT);
633 for (;;) {
634 void* iter = NULL;
635 EntryImpl* entry = OpenNextEntryImpl(&iter);
636 if (!entry)
637 return net::OK;
638
639 if (initial_time > entry->GetLastUsed()) {
640 entry->Release();
641 SyncEndEnumeration(iter);
642 return net::OK;
643 }
644
645 entry->DoomImpl();
646 entry->Release();
647 SyncEndEnumeration(iter); // Dooming the entry invalidates the iterator.
648 }
649 }
650
SyncOpenNextEntry(void ** iter,Entry ** next_entry)651 int BackendImpl::SyncOpenNextEntry(void** iter, Entry** next_entry) {
652 *next_entry = OpenNextEntryImpl(iter);
653 return (*next_entry) ? net::OK : net::ERR_FAILED;
654 }
655
SyncOpenPrevEntry(void ** iter,Entry ** prev_entry)656 int BackendImpl::SyncOpenPrevEntry(void** iter, Entry** prev_entry) {
657 *prev_entry = OpenPrevEntryImpl(iter);
658 return (*prev_entry) ? net::OK : net::ERR_FAILED;
659 }
660
SyncEndEnumeration(void * iter)661 void BackendImpl::SyncEndEnumeration(void* iter) {
662 scoped_ptr<Rankings::Iterator> iterator(
663 reinterpret_cast<Rankings::Iterator*>(iter));
664 }
665
OpenEntryImpl(const std::string & key)666 EntryImpl* BackendImpl::OpenEntryImpl(const std::string& key) {
667 if (disabled_)
668 return NULL;
669
670 TimeTicks start = TimeTicks::Now();
671 uint32 hash = Hash(key);
672 Trace("Open hash 0x%x", hash);
673
674 bool error;
675 EntryImpl* cache_entry = MatchEntry(key, hash, false, Addr(), &error);
676 if (!cache_entry) {
677 stats_.OnEvent(Stats::OPEN_MISS);
678 return NULL;
679 }
680
681 if (ENTRY_NORMAL != cache_entry->entry()->Data()->state) {
682 // The entry was already evicted.
683 cache_entry->Release();
684 stats_.OnEvent(Stats::OPEN_MISS);
685 return NULL;
686 }
687
688 eviction_.OnOpenEntry(cache_entry);
689 entry_count_++;
690
691 CACHE_UMA(AGE_MS, "OpenTime", GetSizeGroup(), start);
692 stats_.OnEvent(Stats::OPEN_HIT);
693 SIMPLE_STATS_COUNTER("disk_cache.hit");
694 return cache_entry;
695 }
696
CreateEntryImpl(const std::string & key)697 EntryImpl* BackendImpl::CreateEntryImpl(const std::string& key) {
698 if (disabled_ || key.empty())
699 return NULL;
700
701 TimeTicks start = TimeTicks::Now();
702 uint32 hash = Hash(key);
703 Trace("Create hash 0x%x", hash);
704
705 scoped_refptr<EntryImpl> parent;
706 Addr entry_address(data_->table[hash & mask_]);
707 if (entry_address.is_initialized()) {
708 // We have an entry already. It could be the one we are looking for, or just
709 // a hash conflict.
710 bool error;
711 EntryImpl* old_entry = MatchEntry(key, hash, false, Addr(), &error);
712 if (old_entry)
713 return ResurrectEntry(old_entry);
714
715 EntryImpl* parent_entry = MatchEntry(key, hash, true, Addr(), &error);
716 DCHECK(!error);
717 if (parent_entry) {
718 parent.swap(&parent_entry);
719 } else if (data_->table[hash & mask_]) {
720 // We should have corrected the problem.
721 NOTREACHED();
722 return NULL;
723 }
724 }
725
726 int num_blocks;
727 size_t key1_len = sizeof(EntryStore) - offsetof(EntryStore, key);
728 if (key.size() < key1_len ||
729 key.size() > static_cast<size_t>(kMaxInternalKeyLength))
730 num_blocks = 1;
731 else
732 num_blocks = static_cast<int>((key.size() - key1_len) / 256 + 2);
733
734 if (!block_files_.CreateBlock(BLOCK_256, num_blocks, &entry_address)) {
735 LOG(ERROR) << "Create entry failed " << key.c_str();
736 stats_.OnEvent(Stats::CREATE_ERROR);
737 return NULL;
738 }
739
740 Addr node_address(0);
741 if (!block_files_.CreateBlock(RANKINGS, 1, &node_address)) {
742 block_files_.DeleteBlock(entry_address, false);
743 LOG(ERROR) << "Create entry failed " << key.c_str();
744 stats_.OnEvent(Stats::CREATE_ERROR);
745 return NULL;
746 }
747
748 scoped_refptr<EntryImpl> cache_entry(
749 new EntryImpl(this, entry_address, false));
750 IncreaseNumRefs();
751
752 if (!cache_entry->CreateEntry(node_address, key, hash)) {
753 block_files_.DeleteBlock(entry_address, false);
754 block_files_.DeleteBlock(node_address, false);
755 LOG(ERROR) << "Create entry failed " << key.c_str();
756 stats_.OnEvent(Stats::CREATE_ERROR);
757 return NULL;
758 }
759
760 cache_entry->BeginLogging(net_log_, true);
761
762 // We are not failing the operation; let's add this to the map.
763 open_entries_[entry_address.value()] = cache_entry;
764
765 if (parent.get())
766 parent->SetNextAddress(entry_address);
767
768 block_files_.GetFile(entry_address)->Store(cache_entry->entry());
769 block_files_.GetFile(node_address)->Store(cache_entry->rankings());
770
771 IncreaseNumEntries();
772 eviction_.OnCreateEntry(cache_entry);
773 entry_count_++;
774 if (!parent.get())
775 data_->table[hash & mask_] = entry_address.value();
776
777 CACHE_UMA(AGE_MS, "CreateTime", GetSizeGroup(), start);
778 stats_.OnEvent(Stats::CREATE_HIT);
779 SIMPLE_STATS_COUNTER("disk_cache.miss");
780 Trace("create entry hit ");
781 return cache_entry.release();
782 }
783
OpenNextEntryImpl(void ** iter)784 EntryImpl* BackendImpl::OpenNextEntryImpl(void** iter) {
785 return OpenFollowingEntry(true, iter);
786 }
787
OpenPrevEntryImpl(void ** iter)788 EntryImpl* BackendImpl::OpenPrevEntryImpl(void** iter) {
789 return OpenFollowingEntry(false, iter);
790 }
791
SetMaxSize(int max_bytes)792 bool BackendImpl::SetMaxSize(int max_bytes) {
793 COMPILE_ASSERT(sizeof(max_bytes) == sizeof(max_size_), unsupported_int_model);
794 if (max_bytes < 0)
795 return false;
796
797 // Zero size means use the default.
798 if (!max_bytes)
799 return true;
800
801 // Avoid a DCHECK later on.
802 if (max_bytes >= kint32max - kint32max / 10)
803 max_bytes = kint32max - kint32max / 10 - 1;
804
805 user_flags_ |= kMaxSize;
806 max_size_ = max_bytes;
807 return true;
808 }
809
SetType(net::CacheType type)810 void BackendImpl::SetType(net::CacheType type) {
811 DCHECK(type != net::MEMORY_CACHE);
812 cache_type_ = type;
813 }
814
GetFileName(Addr address) const815 FilePath BackendImpl::GetFileName(Addr address) const {
816 if (!address.is_separate_file() || !address.is_initialized()) {
817 NOTREACHED();
818 return FilePath();
819 }
820
821 std::string tmp = base::StringPrintf("f_%06x", address.FileNumber());
822 return path_.AppendASCII(tmp);
823 }
824
File(Addr address)825 MappedFile* BackendImpl::File(Addr address) {
826 if (disabled_)
827 return NULL;
828 return block_files_.GetFile(address);
829 }
830
CreateExternalFile(Addr * address)831 bool BackendImpl::CreateExternalFile(Addr* address) {
832 int file_number = data_->header.last_file + 1;
833 Addr file_address(0);
834 bool success = false;
835 for (int i = 0; i < 0x0fffffff; i++, file_number++) {
836 if (!file_address.SetFileNumber(file_number)) {
837 file_number = 1;
838 continue;
839 }
840 FilePath name = GetFileName(file_address);
841 int flags = base::PLATFORM_FILE_READ |
842 base::PLATFORM_FILE_WRITE |
843 base::PLATFORM_FILE_CREATE |
844 base::PLATFORM_FILE_EXCLUSIVE_WRITE;
845 base::PlatformFileError error;
846 scoped_refptr<disk_cache::File> file(new disk_cache::File(
847 base::CreatePlatformFile(name, flags, NULL, &error)));
848 if (!file->IsValid()) {
849 if (error != base::PLATFORM_FILE_ERROR_EXISTS)
850 return false;
851 continue;
852 }
853
854 success = true;
855 break;
856 }
857
858 DCHECK(success);
859 if (!success)
860 return false;
861
862 data_->header.last_file = file_number;
863 address->set_value(file_address.value());
864 return true;
865 }
866
CreateBlock(FileType block_type,int block_count,Addr * block_address)867 bool BackendImpl::CreateBlock(FileType block_type, int block_count,
868 Addr* block_address) {
869 return block_files_.CreateBlock(block_type, block_count, block_address);
870 }
871
DeleteBlock(Addr block_address,bool deep)872 void BackendImpl::DeleteBlock(Addr block_address, bool deep) {
873 block_files_.DeleteBlock(block_address, deep);
874 }
875
GetLruData()876 LruData* BackendImpl::GetLruData() {
877 return &data_->header.lru;
878 }
879
UpdateRank(EntryImpl * entry,bool modified)880 void BackendImpl::UpdateRank(EntryImpl* entry, bool modified) {
881 if (!read_only_) {
882 eviction_.UpdateRank(entry, modified);
883 }
884 }
885
RecoveredEntry(CacheRankingsBlock * rankings)886 void BackendImpl::RecoveredEntry(CacheRankingsBlock* rankings) {
887 Addr address(rankings->Data()->contents);
888 EntryImpl* cache_entry = NULL;
889 if (NewEntry(address, &cache_entry))
890 return;
891
892 uint32 hash = cache_entry->GetHash();
893 cache_entry->Release();
894
895 // Anything on the table means that this entry is there.
896 if (data_->table[hash & mask_])
897 return;
898
899 data_->table[hash & mask_] = address.value();
900 }
901
InternalDoomEntry(EntryImpl * entry)902 void BackendImpl::InternalDoomEntry(EntryImpl* entry) {
903 uint32 hash = entry->GetHash();
904 std::string key = entry->GetKey();
905 Addr entry_addr = entry->entry()->address();
906 bool error;
907 EntryImpl* parent_entry = MatchEntry(key, hash, true, entry_addr, &error);
908 CacheAddr child(entry->GetNextAddress());
909
910 Trace("Doom entry 0x%p", entry);
911
912 if (!entry->doomed()) {
913 // We may have doomed this entry from within MatchEntry.
914 eviction_.OnDoomEntry(entry);
915 entry->InternalDoom();
916 if (!new_eviction_) {
917 DecreaseNumEntries();
918 }
919 stats_.OnEvent(Stats::DOOM_ENTRY);
920 }
921
922 if (parent_entry) {
923 parent_entry->SetNextAddress(Addr(child));
924 parent_entry->Release();
925 } else if (!error) {
926 data_->table[hash & mask_] = child;
927 }
928 }
929
930 // An entry may be linked on the DELETED list for a while after being doomed.
931 // This function is called when we want to remove it.
RemoveEntry(EntryImpl * entry)932 void BackendImpl::RemoveEntry(EntryImpl* entry) {
933 if (!new_eviction_)
934 return;
935
936 DCHECK(ENTRY_NORMAL != entry->entry()->Data()->state);
937
938 Trace("Remove entry 0x%p", entry);
939 eviction_.OnDestroyEntry(entry);
940 DecreaseNumEntries();
941 }
942
OnEntryDestroyBegin(Addr address)943 void BackendImpl::OnEntryDestroyBegin(Addr address) {
944 EntriesMap::iterator it = open_entries_.find(address.value());
945 if (it != open_entries_.end())
946 open_entries_.erase(it);
947 }
948
OnEntryDestroyEnd()949 void BackendImpl::OnEntryDestroyEnd() {
950 DecreaseNumRefs();
951 if (data_->header.num_bytes > max_size_ && !read_only_)
952 eviction_.TrimCache(false);
953 }
954
GetOpenEntry(CacheRankingsBlock * rankings) const955 EntryImpl* BackendImpl::GetOpenEntry(CacheRankingsBlock* rankings) const {
956 DCHECK(rankings->HasData());
957 EntriesMap::const_iterator it =
958 open_entries_.find(rankings->Data()->contents);
959 if (it != open_entries_.end()) {
960 // We have this entry in memory.
961 return it->second;
962 }
963
964 return NULL;
965 }
966
GetCurrentEntryId() const967 int32 BackendImpl::GetCurrentEntryId() const {
968 return data_->header.this_id;
969 }
970
MaxFileSize() const971 int BackendImpl::MaxFileSize() const {
972 return max_size_ / 8;
973 }
974
ModifyStorageSize(int32 old_size,int32 new_size)975 void BackendImpl::ModifyStorageSize(int32 old_size, int32 new_size) {
976 if (disabled_ || old_size == new_size)
977 return;
978 if (old_size > new_size)
979 SubstractStorageSize(old_size - new_size);
980 else
981 AddStorageSize(new_size - old_size);
982
983 // Update the usage statistics.
984 stats_.ModifyStorageStats(old_size, new_size);
985 }
986
TooMuchStorageRequested(int32 size)987 void BackendImpl::TooMuchStorageRequested(int32 size) {
988 stats_.ModifyStorageStats(0, size);
989 }
990
IsAllocAllowed(int current_size,int new_size)991 bool BackendImpl::IsAllocAllowed(int current_size, int new_size) {
992 DCHECK_GT(new_size, current_size);
993 if (user_flags_ & kNoBuffering)
994 return false;
995
996 int to_add = new_size - current_size;
997 if (buffer_bytes_ + to_add > MaxBuffersSize())
998 return false;
999
1000 buffer_bytes_ += to_add;
1001 CACHE_UMA(COUNTS_50000, "BufferBytes", 0, buffer_bytes_ / 1024);
1002 return true;
1003 }
1004
BufferDeleted(int size)1005 void BackendImpl::BufferDeleted(int size) {
1006 buffer_bytes_ -= size;
1007 DCHECK_GE(size, 0);
1008 }
1009
IsLoaded() const1010 bool BackendImpl::IsLoaded() const {
1011 CACHE_UMA(COUNTS, "PendingIO", GetSizeGroup(), num_pending_io_);
1012 if (user_flags_ & kNoLoadProtection)
1013 return false;
1014
1015 return num_pending_io_ > 5;
1016 }
1017
HistogramName(const char * name,int experiment) const1018 std::string BackendImpl::HistogramName(const char* name, int experiment) const {
1019 if (!experiment)
1020 return base::StringPrintf("DiskCache.%d.%s", cache_type_, name);
1021 return base::StringPrintf("DiskCache.%d.%s_%d", cache_type_,
1022 name, experiment);
1023 }
1024
GetWeakPtr()1025 base::WeakPtr<BackendImpl> BackendImpl::GetWeakPtr() {
1026 return ptr_factory_.GetWeakPtr();
1027 }
1028
GetSizeGroup() const1029 int BackendImpl::GetSizeGroup() const {
1030 if (disabled_)
1031 return 0;
1032
1033 // We want to report times grouped by the current cache size (50 MB groups).
1034 int group = data_->header.num_bytes / (50 * 1024 * 1024);
1035 if (group > 6)
1036 group = 6; // Limit the number of groups, just in case.
1037 return group;
1038 }
1039
1040 // We want to remove biases from some histograms so we only send data once per
1041 // week.
ShouldReportAgain()1042 bool BackendImpl::ShouldReportAgain() {
1043 if (uma_report_)
1044 return uma_report_ == 2;
1045
1046 uma_report_++;
1047 int64 last_report = stats_.GetCounter(Stats::LAST_REPORT);
1048 Time last_time = Time::FromInternalValue(last_report);
1049 if (!last_report || (Time::Now() - last_time).InDays() >= 7) {
1050 stats_.SetCounter(Stats::LAST_REPORT, Time::Now().ToInternalValue());
1051 uma_report_++;
1052 return true;
1053 }
1054 return false;
1055 }
1056
FirstEviction()1057 void BackendImpl::FirstEviction() {
1058 DCHECK(data_->header.create_time);
1059 if (!GetEntryCount())
1060 return; // This is just for unit tests.
1061
1062 Time create_time = Time::FromInternalValue(data_->header.create_time);
1063 CACHE_UMA(AGE, "FillupAge", 0, create_time);
1064
1065 int64 use_time = stats_.GetCounter(Stats::TIMER);
1066 CACHE_UMA(HOURS, "FillupTime", 0, static_cast<int>(use_time / 120));
1067 CACHE_UMA(PERCENTAGE, "FirstHitRatio", 0, stats_.GetHitRatio());
1068
1069 if (!use_time)
1070 use_time = 1;
1071 CACHE_UMA(COUNTS_10000, "FirstEntryAccessRate", 0,
1072 static_cast<int>(data_->header.num_entries / use_time));
1073 CACHE_UMA(COUNTS, "FirstByteIORate", 0,
1074 static_cast<int>((data_->header.num_bytes / 1024) / use_time));
1075
1076 int avg_size = data_->header.num_bytes / GetEntryCount();
1077 CACHE_UMA(COUNTS, "FirstEntrySize", 0, avg_size);
1078
1079 int large_entries_bytes = stats_.GetLargeEntriesSize();
1080 int large_ratio = large_entries_bytes * 100 / data_->header.num_bytes;
1081 CACHE_UMA(PERCENTAGE, "FirstLargeEntriesRatio", 0, large_ratio);
1082
1083 if (new_eviction_) {
1084 CACHE_UMA(PERCENTAGE, "FirstResurrectRatio", 0, stats_.GetResurrectRatio());
1085 CACHE_UMA(PERCENTAGE, "FirstNoUseRatio", 0,
1086 data_->header.lru.sizes[0] * 100 / data_->header.num_entries);
1087 CACHE_UMA(PERCENTAGE, "FirstLowUseRatio", 0,
1088 data_->header.lru.sizes[1] * 100 / data_->header.num_entries);
1089 CACHE_UMA(PERCENTAGE, "FirstHighUseRatio", 0,
1090 data_->header.lru.sizes[2] * 100 / data_->header.num_entries);
1091 }
1092
1093 stats_.ResetRatios();
1094 }
1095
CriticalError(int error)1096 void BackendImpl::CriticalError(int error) {
1097 LOG(ERROR) << "Critical error found " << error;
1098 if (disabled_)
1099 return;
1100
1101 stats_.OnEvent(Stats::FATAL_ERROR);
1102 LogStats();
1103 ReportError(error);
1104
1105 // Setting the index table length to an invalid value will force re-creation
1106 // of the cache files.
1107 data_->header.table_len = 1;
1108 disabled_ = true;
1109
1110 if (!num_refs_)
1111 MessageLoop::current()->PostTask(FROM_HERE,
1112 factory_.NewRunnableMethod(&BackendImpl::RestartCache, true));
1113 }
1114
ReportError(int error)1115 void BackendImpl::ReportError(int error) {
1116 // We transmit positive numbers, instead of direct error codes.
1117 DCHECK_LE(error, 0);
1118 CACHE_UMA(CACHE_ERROR, "Error", 0, error * -1);
1119 }
1120
OnEvent(Stats::Counters an_event)1121 void BackendImpl::OnEvent(Stats::Counters an_event) {
1122 stats_.OnEvent(an_event);
1123 }
1124
OnRead(int32 bytes)1125 void BackendImpl::OnRead(int32 bytes) {
1126 DCHECK_GE(bytes, 0);
1127 byte_count_ += bytes;
1128 if (byte_count_ < 0)
1129 byte_count_ = kint32max;
1130 }
1131
OnWrite(int32 bytes)1132 void BackendImpl::OnWrite(int32 bytes) {
1133 // We use the same implementation as OnRead... just log the number of bytes.
1134 OnRead(bytes);
1135 }
1136
OnStatsTimer()1137 void BackendImpl::OnStatsTimer() {
1138 stats_.OnEvent(Stats::TIMER);
1139 int64 time = stats_.GetCounter(Stats::TIMER);
1140 int64 current = stats_.GetCounter(Stats::OPEN_ENTRIES);
1141
1142 // OPEN_ENTRIES is a sampled average of the number of open entries, avoiding
1143 // the bias towards 0.
1144 if (num_refs_ && (current != num_refs_)) {
1145 int64 diff = (num_refs_ - current) / 50;
1146 if (!diff)
1147 diff = num_refs_ > current ? 1 : -1;
1148 current = current + diff;
1149 stats_.SetCounter(Stats::OPEN_ENTRIES, current);
1150 stats_.SetCounter(Stats::MAX_ENTRIES, max_refs_);
1151 }
1152
1153 CACHE_UMA(COUNTS, "NumberOfReferences", 0, num_refs_);
1154
1155 CACHE_UMA(COUNTS_10000, "EntryAccessRate", 0, entry_count_);
1156 CACHE_UMA(COUNTS, "ByteIORate", 0, byte_count_ / 1024);
1157 entry_count_ = 0;
1158 byte_count_ = 0;
1159
1160 if (!data_)
1161 first_timer_ = false;
1162 if (first_timer_) {
1163 first_timer_ = false;
1164 if (ShouldReportAgain())
1165 ReportStats();
1166 }
1167
1168 // Save stats to disk at 5 min intervals.
1169 if (time % 10 == 0)
1170 stats_.Store();
1171 }
1172
IncrementIoCount()1173 void BackendImpl::IncrementIoCount() {
1174 num_pending_io_++;
1175 }
1176
DecrementIoCount()1177 void BackendImpl::DecrementIoCount() {
1178 num_pending_io_--;
1179 }
1180
SetUnitTestMode()1181 void BackendImpl::SetUnitTestMode() {
1182 user_flags_ |= kUnitTestMode;
1183 unit_test_ = true;
1184 }
1185
SetUpgradeMode()1186 void BackendImpl::SetUpgradeMode() {
1187 user_flags_ |= kUpgradeMode;
1188 read_only_ = true;
1189 }
1190
SetNewEviction()1191 void BackendImpl::SetNewEviction() {
1192 user_flags_ |= kNewEviction;
1193 new_eviction_ = true;
1194 }
1195
SetFlags(uint32 flags)1196 void BackendImpl::SetFlags(uint32 flags) {
1197 user_flags_ |= flags;
1198 }
1199
ClearRefCountForTest()1200 void BackendImpl::ClearRefCountForTest() {
1201 num_refs_ = 0;
1202 }
1203
FlushQueueForTest(CompletionCallback * callback)1204 int BackendImpl::FlushQueueForTest(CompletionCallback* callback) {
1205 background_queue_.FlushQueue(callback);
1206 return net::ERR_IO_PENDING;
1207 }
1208
RunTaskForTest(Task * task,CompletionCallback * callback)1209 int BackendImpl::RunTaskForTest(Task* task, CompletionCallback* callback) {
1210 background_queue_.RunTask(task, callback);
1211 return net::ERR_IO_PENDING;
1212 }
1213
TrimForTest(bool empty)1214 void BackendImpl::TrimForTest(bool empty) {
1215 eviction_.SetTestMode();
1216 eviction_.TrimCache(empty);
1217 }
1218
TrimDeletedListForTest(bool empty)1219 void BackendImpl::TrimDeletedListForTest(bool empty) {
1220 eviction_.SetTestMode();
1221 eviction_.TrimDeletedList(empty);
1222 }
1223
SelfCheck()1224 int BackendImpl::SelfCheck() {
1225 if (!init_) {
1226 LOG(ERROR) << "Init failed";
1227 return ERR_INIT_FAILED;
1228 }
1229
1230 int num_entries = rankings_.SelfCheck();
1231 if (num_entries < 0) {
1232 LOG(ERROR) << "Invalid rankings list, error " << num_entries;
1233 return num_entries;
1234 }
1235
1236 if (num_entries != data_->header.num_entries) {
1237 LOG(ERROR) << "Number of entries mismatch";
1238 return ERR_NUM_ENTRIES_MISMATCH;
1239 }
1240
1241 return CheckAllEntries();
1242 }
1243
1244 // ------------------------------------------------------------------------
1245
GetEntryCount() const1246 int32 BackendImpl::GetEntryCount() const {
1247 if (!index_ || disabled_)
1248 return 0;
1249 // num_entries includes entries already evicted.
1250 int32 not_deleted = data_->header.num_entries -
1251 data_->header.lru.sizes[Rankings::DELETED];
1252
1253 if (not_deleted < 0) {
1254 NOTREACHED();
1255 not_deleted = 0;
1256 }
1257
1258 return not_deleted;
1259 }
1260
OpenEntry(const std::string & key,Entry ** entry,CompletionCallback * callback)1261 int BackendImpl::OpenEntry(const std::string& key, Entry** entry,
1262 CompletionCallback* callback) {
1263 DCHECK(callback);
1264 background_queue_.OpenEntry(key, entry, callback);
1265 return net::ERR_IO_PENDING;
1266 }
1267
CreateEntry(const std::string & key,Entry ** entry,CompletionCallback * callback)1268 int BackendImpl::CreateEntry(const std::string& key, Entry** entry,
1269 CompletionCallback* callback) {
1270 DCHECK(callback);
1271 background_queue_.CreateEntry(key, entry, callback);
1272 return net::ERR_IO_PENDING;
1273 }
1274
DoomEntry(const std::string & key,CompletionCallback * callback)1275 int BackendImpl::DoomEntry(const std::string& key,
1276 CompletionCallback* callback) {
1277 DCHECK(callback);
1278 background_queue_.DoomEntry(key, callback);
1279 return net::ERR_IO_PENDING;
1280 }
1281
DoomAllEntries(CompletionCallback * callback)1282 int BackendImpl::DoomAllEntries(CompletionCallback* callback) {
1283 DCHECK(callback);
1284 background_queue_.DoomAllEntries(callback);
1285 return net::ERR_IO_PENDING;
1286 }
1287
DoomEntriesBetween(const base::Time initial_time,const base::Time end_time,CompletionCallback * callback)1288 int BackendImpl::DoomEntriesBetween(const base::Time initial_time,
1289 const base::Time end_time,
1290 CompletionCallback* callback) {
1291 DCHECK(callback);
1292 background_queue_.DoomEntriesBetween(initial_time, end_time, callback);
1293 return net::ERR_IO_PENDING;
1294 }
1295
DoomEntriesSince(const base::Time initial_time,CompletionCallback * callback)1296 int BackendImpl::DoomEntriesSince(const base::Time initial_time,
1297 CompletionCallback* callback) {
1298 DCHECK(callback);
1299 background_queue_.DoomEntriesSince(initial_time, callback);
1300 return net::ERR_IO_PENDING;
1301 }
1302
OpenNextEntry(void ** iter,Entry ** next_entry,CompletionCallback * callback)1303 int BackendImpl::OpenNextEntry(void** iter, Entry** next_entry,
1304 CompletionCallback* callback) {
1305 DCHECK(callback);
1306 background_queue_.OpenNextEntry(iter, next_entry, callback);
1307 return net::ERR_IO_PENDING;
1308 }
1309
EndEnumeration(void ** iter)1310 void BackendImpl::EndEnumeration(void** iter) {
1311 background_queue_.EndEnumeration(*iter);
1312 *iter = NULL;
1313 }
1314
GetStats(StatsItems * stats)1315 void BackendImpl::GetStats(StatsItems* stats) {
1316 if (disabled_)
1317 return;
1318
1319 std::pair<std::string, std::string> item;
1320
1321 item.first = "Entries";
1322 item.second = base::StringPrintf("%d", data_->header.num_entries);
1323 stats->push_back(item);
1324
1325 item.first = "Pending IO";
1326 item.second = base::StringPrintf("%d", num_pending_io_);
1327 stats->push_back(item);
1328
1329 item.first = "Max size";
1330 item.second = base::StringPrintf("%d", max_size_);
1331 stats->push_back(item);
1332
1333 item.first = "Current size";
1334 item.second = base::StringPrintf("%d", data_->header.num_bytes);
1335 stats->push_back(item);
1336
1337 stats_.GetItems(stats);
1338 }
1339
1340 // ------------------------------------------------------------------------
1341
1342 // We just created a new file so we're going to write the header and set the
1343 // file length to include the hash table (zero filled).
CreateBackingStore(disk_cache::File * file)1344 bool BackendImpl::CreateBackingStore(disk_cache::File* file) {
1345 AdjustMaxCacheSize(0);
1346
1347 IndexHeader header;
1348 header.table_len = DesiredIndexTableLen(max_size_);
1349
1350 // We need file version 2.1 for the new eviction algorithm.
1351 if (new_eviction_)
1352 header.version = 0x20001;
1353
1354 header.create_time = Time::Now().ToInternalValue();
1355
1356 if (!file->Write(&header, sizeof(header), 0))
1357 return false;
1358
1359 return file->SetLength(GetIndexSize(header.table_len));
1360 }
1361
InitBackingStore(bool * file_created)1362 bool BackendImpl::InitBackingStore(bool* file_created) {
1363 file_util::CreateDirectory(path_);
1364
1365 FilePath index_name = path_.AppendASCII(kIndexName);
1366
1367 int flags = base::PLATFORM_FILE_READ |
1368 base::PLATFORM_FILE_WRITE |
1369 base::PLATFORM_FILE_OPEN_ALWAYS |
1370 base::PLATFORM_FILE_EXCLUSIVE_WRITE;
1371 scoped_refptr<disk_cache::File> file(new disk_cache::File(
1372 base::CreatePlatformFile(index_name, flags, file_created, NULL)));
1373
1374 if (!file->IsValid())
1375 return false;
1376
1377 bool ret = true;
1378 if (*file_created)
1379 ret = CreateBackingStore(file);
1380
1381 file = NULL;
1382 if (!ret)
1383 return false;
1384
1385 index_ = new MappedFile();
1386 data_ = reinterpret_cast<Index*>(index_->Init(index_name, 0));
1387 if (!data_) {
1388 LOG(ERROR) << "Unable to map Index file";
1389 return false;
1390 }
1391
1392 if (index_->GetLength() < sizeof(Index)) {
1393 // We verify this again on CheckIndex() but it's easier to make sure now
1394 // that the header is there.
1395 LOG(ERROR) << "Corrupt Index file";
1396 return false;
1397 }
1398
1399 return true;
1400 }
1401
1402 // The maximum cache size will be either set explicitly by the caller, or
1403 // calculated by this code.
AdjustMaxCacheSize(int table_len)1404 void BackendImpl::AdjustMaxCacheSize(int table_len) {
1405 if (max_size_)
1406 return;
1407
1408 // If table_len is provided, the index file exists.
1409 DCHECK(!table_len || data_->header.magic);
1410
1411 // The user is not setting the size, let's figure it out.
1412 #ifdef ANDROID
1413 int64 available = 10 * 1024 * 1024; // 10 MB
1414 #else
1415 int64 available = base::SysInfo::AmountOfFreeDiskSpace(path_);
1416 #endif
1417 if (available < 0) {
1418 max_size_ = kDefaultCacheSize;
1419 return;
1420 }
1421
1422 if (table_len)
1423 available += data_->header.num_bytes;
1424
1425 max_size_ = PreferedCacheSize(available);
1426
1427 // Let's not use more than the default size while we tune-up the performance
1428 // of bigger caches. TODO(rvargas): remove this limit.
1429 if (max_size_ > kDefaultCacheSize * 4)
1430 max_size_ = kDefaultCacheSize * 4;
1431
1432 if (!table_len)
1433 return;
1434
1435 // If we already have a table, adjust the size to it.
1436 int current_max_size = MaxStorageSizeForTable(table_len);
1437 if (max_size_ > current_max_size)
1438 max_size_= current_max_size;
1439 }
1440
RestartCache(bool failure)1441 void BackendImpl::RestartCache(bool failure) {
1442 int64 errors = stats_.GetCounter(Stats::FATAL_ERROR);
1443 int64 full_dooms = stats_.GetCounter(Stats::DOOM_CACHE);
1444 int64 partial_dooms = stats_.GetCounter(Stats::DOOM_RECENT);
1445 int64 last_report = stats_.GetCounter(Stats::LAST_REPORT);
1446
1447 PrepareForRestart();
1448 if (failure) {
1449 DCHECK(!num_refs_);
1450 DCHECK(!open_entries_.size());
1451 DelayedCacheCleanup(path_);
1452 } else {
1453 DeleteCache(path_, false);
1454 }
1455
1456 // Don't call Init() if directed by the unit test: we are simulating a failure
1457 // trying to re-enable the cache.
1458 if (unit_test_)
1459 init_ = true; // Let the destructor do proper cleanup.
1460 else if (SyncInit() == net::OK) {
1461 stats_.SetCounter(Stats::FATAL_ERROR, errors);
1462 stats_.SetCounter(Stats::DOOM_CACHE, full_dooms);
1463 stats_.SetCounter(Stats::DOOM_RECENT, partial_dooms);
1464 stats_.SetCounter(Stats::LAST_REPORT, last_report);
1465 }
1466 }
1467
PrepareForRestart()1468 void BackendImpl::PrepareForRestart() {
1469 // Reset the mask_ if it was not given by the user.
1470 if (!(user_flags_ & kMask))
1471 mask_ = 0;
1472
1473 if (!(user_flags_ & kNewEviction))
1474 new_eviction_ = false;
1475
1476 disabled_ = true;
1477 data_->header.crash = 0;
1478 index_ = NULL;
1479 data_ = NULL;
1480 block_files_.CloseFiles();
1481 rankings_.Reset();
1482 init_ = false;
1483 restarted_ = true;
1484 }
1485
NewEntry(Addr address,EntryImpl ** entry)1486 int BackendImpl::NewEntry(Addr address, EntryImpl** entry) {
1487 EntriesMap::iterator it = open_entries_.find(address.value());
1488 if (it != open_entries_.end()) {
1489 // Easy job. This entry is already in memory.
1490 EntryImpl* this_entry = it->second;
1491 this_entry->AddRef();
1492 *entry = this_entry;
1493 return 0;
1494 }
1495
1496 scoped_refptr<EntryImpl> cache_entry(
1497 new EntryImpl(this, address, read_only_));
1498 IncreaseNumRefs();
1499 *entry = NULL;
1500
1501 if (!address.is_initialized() || address.is_separate_file() ||
1502 address.file_type() != BLOCK_256) {
1503 LOG(WARNING) << "Wrong entry address.";
1504 return ERR_INVALID_ADDRESS;
1505 }
1506
1507 TimeTicks start = TimeTicks::Now();
1508 if (!cache_entry->entry()->Load())
1509 return ERR_READ_FAILURE;
1510
1511 if (IsLoaded()) {
1512 CACHE_UMA(AGE_MS, "LoadTime", GetSizeGroup(), start);
1513 }
1514
1515 if (!cache_entry->SanityCheck()) {
1516 LOG(WARNING) << "Messed up entry found.";
1517 return ERR_INVALID_ENTRY;
1518 }
1519
1520 if (!cache_entry->LoadNodeAddress())
1521 return ERR_READ_FAILURE;
1522
1523 // Prevent overwriting the dirty flag on the destructor.
1524 cache_entry->SetDirtyFlag(GetCurrentEntryId());
1525
1526 if (!rankings_.SanityCheck(cache_entry->rankings(), false))
1527 return ERR_INVALID_LINKS;
1528
1529 if (cache_entry->dirty()) {
1530 Trace("Dirty entry 0x%p 0x%x", reinterpret_cast<void*>(cache_entry.get()),
1531 address.value());
1532 }
1533
1534 open_entries_[address.value()] = cache_entry;
1535
1536 cache_entry->BeginLogging(net_log_, false);
1537 cache_entry.swap(entry);
1538 return 0;
1539 }
1540
MatchEntry(const std::string & key,uint32 hash,bool find_parent,Addr entry_addr,bool * match_error)1541 EntryImpl* BackendImpl::MatchEntry(const std::string& key, uint32 hash,
1542 bool find_parent, Addr entry_addr,
1543 bool* match_error) {
1544 Addr address(data_->table[hash & mask_]);
1545 scoped_refptr<EntryImpl> cache_entry, parent_entry;
1546 EntryImpl* tmp = NULL;
1547 bool found = false;
1548 std::set<CacheAddr> visited;
1549 *match_error = false;
1550
1551 for (;;) {
1552 if (disabled_)
1553 break;
1554
1555 if (visited.find(address.value()) != visited.end()) {
1556 // It's possible for a buggy version of the code to write a loop. Just
1557 // break it.
1558 Trace("Hash collision loop 0x%x", address.value());
1559 address.set_value(0);
1560 parent_entry->SetNextAddress(address);
1561 }
1562 visited.insert(address.value());
1563
1564 if (!address.is_initialized()) {
1565 if (find_parent)
1566 found = true;
1567 break;
1568 }
1569
1570 int error = NewEntry(address, &tmp);
1571 cache_entry.swap(&tmp);
1572
1573 if (error || cache_entry->dirty()) {
1574 // This entry is dirty on disk (it was not properly closed): we cannot
1575 // trust it.
1576 Addr child(0);
1577 if (!error)
1578 child.set_value(cache_entry->GetNextAddress());
1579
1580 if (parent_entry) {
1581 parent_entry->SetNextAddress(child);
1582 parent_entry = NULL;
1583 } else {
1584 data_->table[hash & mask_] = child.value();
1585 }
1586
1587 Trace("MatchEntry dirty %d 0x%x 0x%x", find_parent, entry_addr.value(),
1588 address.value());
1589
1590 if (!error) {
1591 // It is important to call DestroyInvalidEntry after removing this
1592 // entry from the table.
1593 DestroyInvalidEntry(cache_entry);
1594 cache_entry = NULL;
1595 } else {
1596 Trace("NewEntry failed on MatchEntry 0x%x", address.value());
1597 }
1598
1599 // Restart the search.
1600 address.set_value(data_->table[hash & mask_]);
1601 visited.clear();
1602 continue;
1603 }
1604
1605 DCHECK_EQ(hash & mask_, cache_entry->entry()->Data()->hash & mask_);
1606 if (cache_entry->IsSameEntry(key, hash)) {
1607 if (!cache_entry->Update())
1608 cache_entry = NULL;
1609 found = true;
1610 if (find_parent && entry_addr.value() != address.value()) {
1611 Trace("Entry not on the index 0x%x", address.value());
1612 *match_error = true;
1613 parent_entry = NULL;
1614 }
1615 break;
1616 }
1617 if (!cache_entry->Update())
1618 cache_entry = NULL;
1619 parent_entry = cache_entry;
1620 cache_entry = NULL;
1621 if (!parent_entry)
1622 break;
1623
1624 address.set_value(parent_entry->GetNextAddress());
1625 }
1626
1627 if (parent_entry && (!find_parent || !found))
1628 parent_entry = NULL;
1629
1630 if (find_parent && entry_addr.is_initialized() && !cache_entry) {
1631 *match_error = true;
1632 parent_entry = NULL;
1633 }
1634
1635 if (cache_entry && (find_parent || !found))
1636 cache_entry = NULL;
1637
1638 find_parent ? parent_entry.swap(&tmp) : cache_entry.swap(&tmp);
1639 return tmp;
1640 }
1641
1642 // This is the actual implementation for OpenNextEntry and OpenPrevEntry.
OpenFollowingEntry(bool forward,void ** iter)1643 EntryImpl* BackendImpl::OpenFollowingEntry(bool forward, void** iter) {
1644 if (disabled_)
1645 return NULL;
1646
1647 DCHECK(iter);
1648
1649 const int kListsToSearch = 3;
1650 scoped_refptr<EntryImpl> entries[kListsToSearch];
1651 scoped_ptr<Rankings::Iterator> iterator(
1652 reinterpret_cast<Rankings::Iterator*>(*iter));
1653 *iter = NULL;
1654
1655 if (!iterator.get()) {
1656 iterator.reset(new Rankings::Iterator(&rankings_));
1657 bool ret = false;
1658
1659 // Get an entry from each list.
1660 for (int i = 0; i < kListsToSearch; i++) {
1661 EntryImpl* temp = NULL;
1662 ret |= OpenFollowingEntryFromList(forward, static_cast<Rankings::List>(i),
1663 &iterator->nodes[i], &temp);
1664 entries[i].swap(&temp); // The entry was already addref'd.
1665 }
1666 if (!ret)
1667 return NULL;
1668 } else {
1669 // Get the next entry from the last list, and the actual entries for the
1670 // elements on the other lists.
1671 for (int i = 0; i < kListsToSearch; i++) {
1672 EntryImpl* temp = NULL;
1673 if (iterator->list == i) {
1674 OpenFollowingEntryFromList(forward, iterator->list,
1675 &iterator->nodes[i], &temp);
1676 } else {
1677 temp = GetEnumeratedEntry(iterator->nodes[i]);
1678 }
1679
1680 entries[i].swap(&temp); // The entry was already addref'd.
1681 }
1682 }
1683
1684 int newest = -1;
1685 int oldest = -1;
1686 Time access_times[kListsToSearch];
1687 for (int i = 0; i < kListsToSearch; i++) {
1688 if (entries[i].get()) {
1689 access_times[i] = entries[i]->GetLastUsed();
1690 if (newest < 0) {
1691 DCHECK_LT(oldest, 0);
1692 newest = oldest = i;
1693 continue;
1694 }
1695 if (access_times[i] > access_times[newest])
1696 newest = i;
1697 if (access_times[i] < access_times[oldest])
1698 oldest = i;
1699 }
1700 }
1701
1702 if (newest < 0 || oldest < 0)
1703 return NULL;
1704
1705 EntryImpl* next_entry;
1706 if (forward) {
1707 next_entry = entries[newest].release();
1708 iterator->list = static_cast<Rankings::List>(newest);
1709 } else {
1710 next_entry = entries[oldest].release();
1711 iterator->list = static_cast<Rankings::List>(oldest);
1712 }
1713
1714 *iter = iterator.release();
1715 return next_entry;
1716 }
1717
OpenFollowingEntryFromList(bool forward,Rankings::List list,CacheRankingsBlock ** from_entry,EntryImpl ** next_entry)1718 bool BackendImpl::OpenFollowingEntryFromList(bool forward, Rankings::List list,
1719 CacheRankingsBlock** from_entry,
1720 EntryImpl** next_entry) {
1721 if (disabled_)
1722 return false;
1723
1724 if (!new_eviction_ && Rankings::NO_USE != list)
1725 return false;
1726
1727 Rankings::ScopedRankingsBlock rankings(&rankings_, *from_entry);
1728 CacheRankingsBlock* next_block = forward ?
1729 rankings_.GetNext(rankings.get(), list) :
1730 rankings_.GetPrev(rankings.get(), list);
1731 Rankings::ScopedRankingsBlock next(&rankings_, next_block);
1732 *from_entry = NULL;
1733
1734 *next_entry = GetEnumeratedEntry(next.get());
1735 if (!*next_entry)
1736 return false;
1737
1738 *from_entry = next.release();
1739 return true;
1740 }
1741
GetEnumeratedEntry(CacheRankingsBlock * next)1742 EntryImpl* BackendImpl::GetEnumeratedEntry(CacheRankingsBlock* next) {
1743 if (!next || disabled_)
1744 return NULL;
1745
1746 EntryImpl* entry;
1747 if (NewEntry(Addr(next->Data()->contents), &entry)) {
1748 // TODO(rvargas) bug 73102: We should remove this node from the list, and
1749 // maybe do a better cleanup.
1750 return NULL;
1751 }
1752
1753 if (entry->dirty()) {
1754 // We cannot trust this entry.
1755 InternalDoomEntry(entry);
1756 entry->Release();
1757 return NULL;
1758 }
1759
1760 if (!entry->Update()) {
1761 entry->Release();
1762 return NULL;
1763 }
1764
1765 // Note that it is unfortunate (but possible) for this entry to be clean, but
1766 // not actually the real entry. In other words, we could have lost this entry
1767 // from the index, and it could have been replaced with a newer one. It's not
1768 // worth checking that this entry is "the real one", so we just return it and
1769 // let the enumeration continue; this entry will be evicted at some point, and
1770 // the regular path will work with the real entry. With time, this problem
1771 // will disasappear because this scenario is just a bug.
1772
1773 // Make sure that we save the key for later.
1774 entry->GetKey();
1775
1776 return entry;
1777 }
1778
ResurrectEntry(EntryImpl * deleted_entry)1779 EntryImpl* BackendImpl::ResurrectEntry(EntryImpl* deleted_entry) {
1780 if (ENTRY_NORMAL == deleted_entry->entry()->Data()->state) {
1781 deleted_entry->Release();
1782 stats_.OnEvent(Stats::CREATE_MISS);
1783 Trace("create entry miss ");
1784 return NULL;
1785 }
1786
1787 // We are attempting to create an entry and found out that the entry was
1788 // previously deleted.
1789
1790 eviction_.OnCreateEntry(deleted_entry);
1791 entry_count_++;
1792
1793 stats_.OnEvent(Stats::RESURRECT_HIT);
1794 Trace("Resurrect entry hit ");
1795 return deleted_entry;
1796 }
1797
DestroyInvalidEntry(EntryImpl * entry)1798 void BackendImpl::DestroyInvalidEntry(EntryImpl* entry) {
1799 LOG(WARNING) << "Destroying invalid entry.";
1800 Trace("Destroying invalid entry 0x%p", entry);
1801
1802 entry->SetPointerForInvalidEntry(GetCurrentEntryId());
1803
1804 eviction_.OnDoomEntry(entry);
1805 entry->InternalDoom();
1806
1807 if (!new_eviction_)
1808 DecreaseNumEntries();
1809 stats_.OnEvent(Stats::INVALID_ENTRY);
1810 }
1811
AddStorageSize(int32 bytes)1812 void BackendImpl::AddStorageSize(int32 bytes) {
1813 data_->header.num_bytes += bytes;
1814 DCHECK_GE(data_->header.num_bytes, 0);
1815 }
1816
SubstractStorageSize(int32 bytes)1817 void BackendImpl::SubstractStorageSize(int32 bytes) {
1818 data_->header.num_bytes -= bytes;
1819 DCHECK_GE(data_->header.num_bytes, 0);
1820 }
1821
IncreaseNumRefs()1822 void BackendImpl::IncreaseNumRefs() {
1823 num_refs_++;
1824 if (max_refs_ < num_refs_)
1825 max_refs_ = num_refs_;
1826 }
1827
DecreaseNumRefs()1828 void BackendImpl::DecreaseNumRefs() {
1829 DCHECK(num_refs_);
1830 num_refs_--;
1831
1832 if (!num_refs_ && disabled_)
1833 MessageLoop::current()->PostTask(FROM_HERE,
1834 factory_.NewRunnableMethod(&BackendImpl::RestartCache, true));
1835 }
1836
IncreaseNumEntries()1837 void BackendImpl::IncreaseNumEntries() {
1838 data_->header.num_entries++;
1839 DCHECK_GT(data_->header.num_entries, 0);
1840 }
1841
DecreaseNumEntries()1842 void BackendImpl::DecreaseNumEntries() {
1843 data_->header.num_entries--;
1844 if (data_->header.num_entries < 0) {
1845 NOTREACHED();
1846 data_->header.num_entries = 0;
1847 }
1848 }
1849
LogStats()1850 void BackendImpl::LogStats() {
1851 StatsItems stats;
1852 GetStats(&stats);
1853
1854 for (size_t index = 0; index < stats.size(); index++)
1855 VLOG(1) << stats[index].first << ": " << stats[index].second;
1856 }
1857
ReportStats()1858 void BackendImpl::ReportStats() {
1859 CACHE_UMA(COUNTS, "Entries", 0, data_->header.num_entries);
1860
1861 int current_size = data_->header.num_bytes / (1024 * 1024);
1862 int max_size = max_size_ / (1024 * 1024);
1863 CACHE_UMA(COUNTS_10000, "Size2", 0, current_size);
1864 CACHE_UMA(COUNTS_10000, "MaxSize2", 0, max_size);
1865 if (!max_size)
1866 max_size++;
1867 CACHE_UMA(PERCENTAGE, "UsedSpace", 0, current_size * 100 / max_size);
1868
1869 CACHE_UMA(COUNTS_10000, "AverageOpenEntries2", 0,
1870 static_cast<int>(stats_.GetCounter(Stats::OPEN_ENTRIES)));
1871 CACHE_UMA(COUNTS_10000, "MaxOpenEntries2", 0,
1872 static_cast<int>(stats_.GetCounter(Stats::MAX_ENTRIES)));
1873 stats_.SetCounter(Stats::MAX_ENTRIES, 0);
1874
1875 CACHE_UMA(COUNTS_10000, "TotalFatalErrors", 0,
1876 static_cast<int>(stats_.GetCounter(Stats::FATAL_ERROR)));
1877 CACHE_UMA(COUNTS_10000, "TotalDoomCache", 0,
1878 static_cast<int>(stats_.GetCounter(Stats::DOOM_CACHE)));
1879 CACHE_UMA(COUNTS_10000, "TotalDoomRecentEntries", 0,
1880 static_cast<int>(stats_.GetCounter(Stats::DOOM_RECENT)));
1881 stats_.SetCounter(Stats::FATAL_ERROR, 0);
1882 stats_.SetCounter(Stats::DOOM_CACHE, 0);
1883 stats_.SetCounter(Stats::DOOM_RECENT, 0);
1884
1885 int64 total_hours = stats_.GetCounter(Stats::TIMER) / 120;
1886 if (!data_->header.create_time || !data_->header.lru.filled) {
1887 int cause = data_->header.create_time ? 0 : 1;
1888 if (!data_->header.lru.filled)
1889 cause |= 2;
1890 CACHE_UMA(CACHE_ERROR, "ShortReport", 0, cause);
1891 CACHE_UMA(HOURS, "TotalTimeNotFull", 0, static_cast<int>(total_hours));
1892 return;
1893 }
1894
1895 // This is an up to date client that will report FirstEviction() data. After
1896 // that event, start reporting this:
1897
1898 CACHE_UMA(HOURS, "TotalTime", 0, static_cast<int>(total_hours));
1899
1900 int64 use_hours = stats_.GetCounter(Stats::LAST_REPORT_TIMER) / 120;
1901 stats_.SetCounter(Stats::LAST_REPORT_TIMER, stats_.GetCounter(Stats::TIMER));
1902
1903 // We may see users with no use_hours at this point if this is the first time
1904 // we are running this code.
1905 if (use_hours)
1906 use_hours = total_hours - use_hours;
1907
1908 if (!use_hours || !GetEntryCount() || !data_->header.num_bytes)
1909 return;
1910
1911 CACHE_UMA(HOURS, "UseTime", 0, static_cast<int>(use_hours));
1912 CACHE_UMA(PERCENTAGE, "HitRatio", data_->header.experiment,
1913 stats_.GetHitRatio());
1914
1915 int64 trim_rate = stats_.GetCounter(Stats::TRIM_ENTRY) / use_hours;
1916 CACHE_UMA(COUNTS, "TrimRate", 0, static_cast<int>(trim_rate));
1917
1918 int avg_size = data_->header.num_bytes / GetEntryCount();
1919 CACHE_UMA(COUNTS, "EntrySize", 0, avg_size);
1920 CACHE_UMA(COUNTS, "EntriesFull", 0, data_->header.num_entries);
1921
1922 CACHE_UMA(PERCENTAGE, "IndexLoad", 0,
1923 data_->header.num_entries * 100 / (mask_ + 1));
1924
1925 int large_entries_bytes = stats_.GetLargeEntriesSize();
1926 int large_ratio = large_entries_bytes * 100 / data_->header.num_bytes;
1927 CACHE_UMA(PERCENTAGE, "LargeEntriesRatio", 0, large_ratio);
1928
1929 if (new_eviction_) {
1930 CACHE_UMA(PERCENTAGE, "ResurrectRatio", data_->header.experiment,
1931 stats_.GetResurrectRatio());
1932 CACHE_UMA(PERCENTAGE, "NoUseRatio", 0,
1933 data_->header.lru.sizes[0] * 100 / data_->header.num_entries);
1934 CACHE_UMA(PERCENTAGE, "LowUseRatio", 0,
1935 data_->header.lru.sizes[1] * 100 / data_->header.num_entries);
1936 CACHE_UMA(PERCENTAGE, "HighUseRatio", 0,
1937 data_->header.lru.sizes[2] * 100 / data_->header.num_entries);
1938 CACHE_UMA(PERCENTAGE, "DeletedRatio", data_->header.experiment,
1939 data_->header.lru.sizes[4] * 100 / data_->header.num_entries);
1940 }
1941
1942 stats_.ResetRatios();
1943 stats_.SetCounter(Stats::TRIM_ENTRY, 0);
1944
1945 if (cache_type_ == net::DISK_CACHE)
1946 block_files_.ReportStats();
1947 }
1948
UpgradeTo2_1()1949 void BackendImpl::UpgradeTo2_1() {
1950 // 2.1 is basically the same as 2.0, except that new fields are actually
1951 // updated by the new eviction algorithm.
1952 DCHECK(0x20000 == data_->header.version);
1953 data_->header.version = 0x20001;
1954 data_->header.lru.sizes[Rankings::NO_USE] = data_->header.num_entries;
1955 }
1956
CheckIndex()1957 bool BackendImpl::CheckIndex() {
1958 DCHECK(data_);
1959
1960 size_t current_size = index_->GetLength();
1961 if (current_size < sizeof(Index)) {
1962 LOG(ERROR) << "Corrupt Index file";
1963 return false;
1964 }
1965
1966 if (new_eviction_) {
1967 // We support versions 2.0 and 2.1, upgrading 2.0 to 2.1.
1968 if (kIndexMagic != data_->header.magic ||
1969 kCurrentVersion >> 16 != data_->header.version >> 16) {
1970 LOG(ERROR) << "Invalid file version or magic";
1971 return false;
1972 }
1973 if (kCurrentVersion == data_->header.version) {
1974 // We need file version 2.1 for the new eviction algorithm.
1975 UpgradeTo2_1();
1976 }
1977 } else {
1978 if (kIndexMagic != data_->header.magic ||
1979 kCurrentVersion != data_->header.version) {
1980 LOG(ERROR) << "Invalid file version or magic";
1981 return false;
1982 }
1983 }
1984
1985 if (!data_->header.table_len) {
1986 LOG(ERROR) << "Invalid table size";
1987 return false;
1988 }
1989
1990 if (current_size < GetIndexSize(data_->header.table_len) ||
1991 data_->header.table_len & (kBaseTableLen - 1)) {
1992 LOG(ERROR) << "Corrupt Index file";
1993 return false;
1994 }
1995
1996 AdjustMaxCacheSize(data_->header.table_len);
1997
1998 if (data_->header.num_bytes < 0 ||
1999 (max_size_ < kint32max - kDefaultCacheSize &&
2000 data_->header.num_bytes > max_size_ + kDefaultCacheSize)) {
2001 LOG(ERROR) << "Invalid cache (current) size";
2002 return false;
2003 }
2004
2005 if (data_->header.num_entries < 0) {
2006 LOG(ERROR) << "Invalid number of entries";
2007 return false;
2008 }
2009
2010 if (!mask_)
2011 mask_ = data_->header.table_len - 1;
2012
2013 // Load the table into memory with a single read.
2014 scoped_array<char> buf(new char[current_size]);
2015 return index_->Read(buf.get(), current_size, 0);
2016 }
2017
CheckAllEntries()2018 int BackendImpl::CheckAllEntries() {
2019 int num_dirty = 0;
2020 int num_entries = 0;
2021 DCHECK(mask_ < kuint32max);
2022 for (int i = 0; i <= static_cast<int>(mask_); i++) {
2023 Addr address(data_->table[i]);
2024 if (!address.is_initialized())
2025 continue;
2026 for (;;) {
2027 EntryImpl* tmp;
2028 int ret = NewEntry(address, &tmp);
2029 if (ret)
2030 return ret;
2031 scoped_refptr<EntryImpl> cache_entry;
2032 cache_entry.swap(&tmp);
2033
2034 if (cache_entry->dirty())
2035 num_dirty++;
2036 else if (CheckEntry(cache_entry.get()))
2037 num_entries++;
2038 else
2039 return ERR_INVALID_ENTRY;
2040
2041 address.set_value(cache_entry->GetNextAddress());
2042 if (!address.is_initialized())
2043 break;
2044 }
2045 }
2046
2047 Trace("CheckAllEntries End");
2048 if (num_entries + num_dirty != data_->header.num_entries) {
2049 LOG(ERROR) << "Number of entries mismatch";
2050 return ERR_NUM_ENTRIES_MISMATCH;
2051 }
2052
2053 return num_dirty;
2054 }
2055
CheckEntry(EntryImpl * cache_entry)2056 bool BackendImpl::CheckEntry(EntryImpl* cache_entry) {
2057 bool ok = block_files_.IsValid(cache_entry->entry()->address());
2058 ok = ok && block_files_.IsValid(cache_entry->rankings()->address());
2059 EntryStore* data = cache_entry->entry()->Data();
2060 for (size_t i = 0; i < arraysize(data->data_addr); i++) {
2061 if (data->data_addr[i]) {
2062 Addr address(data->data_addr[i]);
2063 if (address.is_block_file())
2064 ok = ok && block_files_.IsValid(address);
2065 }
2066 }
2067
2068 RankingsNode* rankings = cache_entry->rankings()->Data();
2069 return ok && !rankings->dummy;
2070 }
2071
MaxBuffersSize()2072 int BackendImpl::MaxBuffersSize() {
2073 static int64 total_memory = base::SysInfo::AmountOfPhysicalMemory();
2074 static bool done = false;
2075
2076 if (!done) {
2077 const int kMaxBuffersSize = 30 * 1024 * 1024;
2078
2079 // We want to use up to 2% of the computer's memory.
2080 total_memory = total_memory * 2 / 100;
2081 if (total_memory > kMaxBuffersSize || total_memory <= 0)
2082 total_memory = kMaxBuffersSize;
2083
2084 done = true;
2085 }
2086
2087 return static_cast<int>(total_memory);
2088 }
2089
2090 } // namespace disk_cache
2091