• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2012 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "net/disk_cache/blockfile/backend_impl.h"
6 
7 #include <limits>
8 #include <memory>
9 #include <utility>
10 
11 #include "base/files/file.h"
12 #include "base/files/file_path.h"
13 #include "base/files/file_util.h"
14 #include "base/functional/bind.h"
15 #include "base/functional/callback_helpers.h"
16 #include "base/hash/hash.h"
17 #include "base/lazy_instance.h"
18 #include "base/location.h"
19 #include "base/message_loop/message_pump_type.h"
20 #include "base/metrics/field_trial.h"
21 #include "base/metrics/histogram.h"
22 #include "base/rand_util.h"
23 #include "base/strings/string_number_conversions.h"
24 #include "base/strings/string_util.h"
25 #include "base/strings/stringprintf.h"
26 #include "base/synchronization/waitable_event.h"
27 #include "base/system/sys_info.h"
28 #include "base/task/sequenced_task_runner.h"
29 #include "base/task/single_thread_task_runner.h"
30 #include "base/threading/thread.h"
31 #include "base/threading/thread_restrictions.h"
32 #include "base/time/time.h"
33 #include "base/timer/timer.h"
34 #include "net/base/net_errors.h"
35 #include "net/base/tracing.h"
36 #include "net/disk_cache/backend_cleanup_tracker.h"
37 #include "net/disk_cache/blockfile/disk_format.h"
38 #include "net/disk_cache/blockfile/entry_impl.h"
39 #include "net/disk_cache/blockfile/errors.h"
40 #include "net/disk_cache/blockfile/experiments.h"
41 #include "net/disk_cache/blockfile/file.h"
42 #include "net/disk_cache/blockfile/histogram_macros.h"
43 #include "net/disk_cache/cache_util.h"
44 
45 // Provide a BackendImpl object to macros from histogram_macros.h.
46 #define CACHE_UMA_BACKEND_IMPL_OBJ this
47 
48 using base::Time;
49 using base::TimeTicks;
50 
51 namespace {
52 
53 const char kIndexName[] = "index";
54 
55 // Seems like ~240 MB correspond to less than 50k entries for 99% of the people.
56 // Note that the actual target is to keep the index table load factor under 55%
57 // for most users.
58 const int k64kEntriesStore = 240 * 1000 * 1000;
59 const int kBaseTableLen = 64 * 1024;
60 
61 // Avoid trimming the cache for the first 5 minutes (10 timer ticks).
62 const int kTrimDelay = 10;
63 
DesiredIndexTableLen(int32_t storage_size)64 int DesiredIndexTableLen(int32_t storage_size) {
65   if (storage_size <= k64kEntriesStore)
66     return kBaseTableLen;
67   if (storage_size <= k64kEntriesStore * 2)
68     return kBaseTableLen * 2;
69   if (storage_size <= k64kEntriesStore * 4)
70     return kBaseTableLen * 4;
71   if (storage_size <= k64kEntriesStore * 8)
72     return kBaseTableLen * 8;
73 
74   // The biggest storage_size for int32_t requires a 4 MB table.
75   return kBaseTableLen * 16;
76 }
77 
MaxStorageSizeForTable(int table_len)78 int MaxStorageSizeForTable(int table_len) {
79   return table_len * (k64kEntriesStore / kBaseTableLen);
80 }
81 
GetIndexSize(int table_len)82 size_t GetIndexSize(int table_len) {
83   size_t table_size = sizeof(disk_cache::CacheAddr) * table_len;
84   return sizeof(disk_cache::IndexHeader) + table_size;
85 }
86 
87 // ------------------------------------------------------------------------
88 
89 // Sets group for the current experiment. Returns false if the files should be
90 // discarded.
InitExperiment(disk_cache::IndexHeader * header,bool cache_created)91 bool InitExperiment(disk_cache::IndexHeader* header, bool cache_created) {
92   if (header->experiment == disk_cache::EXPERIMENT_OLD_FILE1 ||
93       header->experiment == disk_cache::EXPERIMENT_OLD_FILE2) {
94     // Discard current cache.
95     return false;
96   }
97 
98   if (base::FieldTrialList::FindFullName("SimpleCacheTrial") ==
99           "ExperimentControl") {
100     if (cache_created) {
101       header->experiment = disk_cache::EXPERIMENT_SIMPLE_CONTROL;
102       return true;
103     }
104     return header->experiment == disk_cache::EXPERIMENT_SIMPLE_CONTROL;
105   }
106 
107   header->experiment = disk_cache::NO_EXPERIMENT;
108   return true;
109 }
110 
111 // A callback to perform final cleanup on the background thread.
FinalCleanupCallback(disk_cache::BackendImpl * backend,base::WaitableEvent * done)112 void FinalCleanupCallback(disk_cache::BackendImpl* backend,
113                           base::WaitableEvent* done) {
114   backend->CleanupCache();
115   done->Signal();
116 }
117 
118 class CacheThread : public base::Thread {
119  public:
CacheThread()120   CacheThread() : base::Thread("CacheThread_BlockFile") {
121     CHECK(
122         StartWithOptions(base::Thread::Options(base::MessagePumpType::IO, 0)));
123   }
124 
~CacheThread()125   ~CacheThread() override {
126     // We don't expect to be deleted, but call Stop() in dtor 'cause docs
127     // say we should.
128     Stop();
129   }
130 };
131 
132 static base::LazyInstance<CacheThread>::Leaky g_internal_cache_thread =
133     LAZY_INSTANCE_INITIALIZER;
134 
InternalCacheThread()135 scoped_refptr<base::SingleThreadTaskRunner> InternalCacheThread() {
136   return g_internal_cache_thread.Get().task_runner();
137 }
138 
FallbackToInternalIfNull(const scoped_refptr<base::SingleThreadTaskRunner> & cache_thread)139 scoped_refptr<base::SingleThreadTaskRunner> FallbackToInternalIfNull(
140     const scoped_refptr<base::SingleThreadTaskRunner>& cache_thread) {
141   return cache_thread ? cache_thread : InternalCacheThread();
142 }
143 
144 }  // namespace
145 
146 // ------------------------------------------------------------------------
147 
148 namespace disk_cache {
149 
BackendImpl(const base::FilePath & path,scoped_refptr<BackendCleanupTracker> cleanup_tracker,const scoped_refptr<base::SingleThreadTaskRunner> & cache_thread,net::CacheType cache_type,net::NetLog * net_log)150 BackendImpl::BackendImpl(
151     const base::FilePath& path,
152     scoped_refptr<BackendCleanupTracker> cleanup_tracker,
153     const scoped_refptr<base::SingleThreadTaskRunner>& cache_thread,
154     net::CacheType cache_type,
155     net::NetLog* net_log)
156     : Backend(cache_type),
157       cleanup_tracker_(std::move(cleanup_tracker)),
158       background_queue_(this, FallbackToInternalIfNull(cache_thread)),
159       path_(path),
160       block_files_(path),
161       user_flags_(0),
162       net_log_(net_log) {
163   TRACE_EVENT0("disk_cache", "BackendImpl::BackendImpl");
164 }
165 
BackendImpl(const base::FilePath & path,uint32_t mask,const scoped_refptr<base::SingleThreadTaskRunner> & cache_thread,net::CacheType cache_type,net::NetLog * net_log)166 BackendImpl::BackendImpl(
167     const base::FilePath& path,
168     uint32_t mask,
169     const scoped_refptr<base::SingleThreadTaskRunner>& cache_thread,
170     net::CacheType cache_type,
171     net::NetLog* net_log)
172     : Backend(cache_type),
173       background_queue_(this, FallbackToInternalIfNull(cache_thread)),
174       path_(path),
175       block_files_(path),
176       mask_(mask),
177       user_flags_(kMask),
178       net_log_(net_log) {
179   TRACE_EVENT0("disk_cache", "BackendImpl::BackendImpl");
180 }
181 
~BackendImpl()182 BackendImpl::~BackendImpl() {
183   TRACE_EVENT0("disk_cache", "BackendImpl::~BackendImpl");
184   if (user_flags_ & kNoRandom) {
185     // This is a unit test, so we want to be strict about not leaking entries
186     // and completing all the work.
187     background_queue_.WaitForPendingIO();
188   } else {
189     // This is most likely not a test, so we want to do as little work as
190     // possible at this time, at the price of leaving dirty entries behind.
191     background_queue_.DropPendingIO();
192   }
193 
194   if (background_queue_.BackgroundIsCurrentSequence()) {
195     // Unit tests may use the same sequence for everything.
196     CleanupCache();
197   } else {
198     // Signals the end of background work.
199     base::WaitableEvent done;
200 
201     background_queue_.background_thread()->PostTask(
202         FROM_HERE, base::BindOnce(&FinalCleanupCallback, base::Unretained(this),
203                                   base::Unretained(&done)));
204     // http://crbug.com/74623
205     base::ScopedAllowBaseSyncPrimitivesOutsideBlockingScope allow_wait;
206     done.Wait();
207   }
208 }
209 
Init(CompletionOnceCallback callback)210 void BackendImpl::Init(CompletionOnceCallback callback) {
211   background_queue_.Init(std::move(callback));
212 }
213 
SyncInit()214 int BackendImpl::SyncInit() {
215   TRACE_EVENT0("disk_cache", "BackendImpl::SyncInit");
216 
217 #if defined(NET_BUILD_STRESS_CACHE)
218   // Start evictions right away.
219   up_ticks_ = kTrimDelay * 2;
220 #endif
221   DCHECK(!init_);
222   if (init_)
223     return net::ERR_FAILED;
224 
225   bool create_files = false;
226   if (!InitBackingStore(&create_files)) {
227     ReportError(ERR_STORAGE_ERROR);
228     return net::ERR_FAILED;
229   }
230 
231   num_refs_ = num_pending_io_ = max_refs_ = 0;
232   entry_count_ = byte_count_ = 0;
233 
234   bool should_create_timer = false;
235   if (!restarted_) {
236     buffer_bytes_ = 0;
237     should_create_timer = true;
238   }
239 
240   init_ = true;
241 
242   if (data_->header.experiment != NO_EXPERIMENT &&
243       GetCacheType() != net::DISK_CACHE) {
244     // No experiment for other caches.
245     return net::ERR_FAILED;
246   }
247 
248   if (!(user_flags_ & kNoRandom)) {
249     // The unit test controls directly what to test.
250     new_eviction_ = (GetCacheType() == net::DISK_CACHE);
251   }
252 
253   if (!CheckIndex()) {
254     ReportError(ERR_INIT_FAILED);
255     return net::ERR_FAILED;
256   }
257 
258   if (!restarted_ && (create_files || !data_->header.num_entries))
259     ReportError(ERR_CACHE_CREATED);
260 
261   if (!(user_flags_ & kNoRandom) && GetCacheType() == net::DISK_CACHE &&
262       !InitExperiment(&data_->header, create_files)) {
263     return net::ERR_FAILED;
264   }
265 
266   // We don't care if the value overflows. The only thing we care about is that
267   // the id cannot be zero, because that value is used as "not dirty".
268   // Increasing the value once per second gives us many years before we start
269   // having collisions.
270   data_->header.this_id++;
271   if (!data_->header.this_id)
272     data_->header.this_id++;
273 
274   bool previous_crash = (data_->header.crash != 0);
275   data_->header.crash = 1;
276 
277   if (!block_files_.Init(create_files))
278     return net::ERR_FAILED;
279 
280   // We want to minimize the changes to cache for an AppCache.
281   if (GetCacheType() == net::APP_CACHE) {
282     DCHECK(!new_eviction_);
283     read_only_ = true;
284   } else if (GetCacheType() == net::SHADER_CACHE) {
285     DCHECK(!new_eviction_);
286   }
287 
288   eviction_.Init(this);
289 
290   // stats_ and rankings_ may end up calling back to us so we better be enabled.
291   disabled_ = false;
292   if (!InitStats())
293     return net::ERR_FAILED;
294 
295   disabled_ = !rankings_.Init(this, new_eviction_);
296 
297 #if defined(STRESS_CACHE_EXTENDED_VALIDATION)
298   trace_object_->EnableTracing(false);
299   int sc = SelfCheck();
300   if (sc < 0 && sc != ERR_NUM_ENTRIES_MISMATCH)
301     NOTREACHED();
302   trace_object_->EnableTracing(true);
303 #endif
304 
305   if (previous_crash) {
306     ReportError(ERR_PREVIOUS_CRASH);
307   } else if (!restarted_) {
308     ReportError(ERR_NO_ERROR);
309   }
310 
311   FlushIndex();
312 
313   if (!disabled_ && should_create_timer) {
314     // Create a recurrent timer of 30 secs.
315     DCHECK(background_queue_.BackgroundIsCurrentSequence());
316     int timer_delay = unit_test_ ? 1000 : 30000;
317     timer_ = std::make_unique<base::RepeatingTimer>();
318     timer_->Start(FROM_HERE, base::Milliseconds(timer_delay), this,
319                   &BackendImpl::OnStatsTimer);
320   }
321 
322   return disabled_ ? net::ERR_FAILED : net::OK;
323 }
324 
CleanupCache()325 void BackendImpl::CleanupCache() {
326   DCHECK(background_queue_.BackgroundIsCurrentSequence());
327   TRACE_EVENT0("disk_cache", "BackendImpl::CleanupCache");
328 
329   eviction_.Stop();
330   timer_.reset();
331 
332   if (init_) {
333     StoreStats();
334     if (data_)
335       data_->header.crash = 0;
336 
337     if (user_flags_ & kNoRandom) {
338       // This is a net_unittest, verify that we are not 'leaking' entries.
339       // TODO(https://crbug.com/1184679): Refactor this and eliminate the
340       //    WaitForPendingIOForTesting API.
341       File::WaitForPendingIOForTesting(&num_pending_io_);
342       DCHECK(!num_refs_);
343     } else {
344       File::DropPendingIO();
345     }
346   }
347   block_files_.CloseFiles();
348   FlushIndex();
349   index_ = nullptr;
350   ptr_factory_.InvalidateWeakPtrs();
351 }
352 
353 // ------------------------------------------------------------------------
354 
SyncOpenEntry(const std::string & key,scoped_refptr<EntryImpl> * entry)355 int BackendImpl::SyncOpenEntry(const std::string& key,
356                                scoped_refptr<EntryImpl>* entry) {
357   DCHECK(entry);
358   *entry = OpenEntryImpl(key);
359   return (*entry) ? net::OK : net::ERR_FAILED;
360 }
361 
SyncCreateEntry(const std::string & key,scoped_refptr<EntryImpl> * entry)362 int BackendImpl::SyncCreateEntry(const std::string& key,
363                                  scoped_refptr<EntryImpl>* entry) {
364   DCHECK(entry);
365   *entry = CreateEntryImpl(key);
366   return (*entry) ? net::OK : net::ERR_FAILED;
367 }
368 
SyncDoomEntry(const std::string & key)369 int BackendImpl::SyncDoomEntry(const std::string& key) {
370   if (disabled_)
371     return net::ERR_FAILED;
372 
373   scoped_refptr<EntryImpl> entry = OpenEntryImpl(key);
374   if (!entry)
375     return net::ERR_FAILED;
376 
377   entry->DoomImpl();
378   return net::OK;
379 }
380 
SyncDoomAllEntries()381 int BackendImpl::SyncDoomAllEntries() {
382   if (disabled_)
383     return net::ERR_FAILED;
384 
385   // This is not really an error, but it is an interesting condition.
386   ReportError(ERR_CACHE_DOOMED);
387   stats_.OnEvent(Stats::DOOM_CACHE);
388   if (!num_refs_) {
389     RestartCache(false);
390     return disabled_ ? net::ERR_FAILED : net::OK;
391   } else {
392     if (disabled_)
393       return net::ERR_FAILED;
394 
395     eviction_.TrimCache(true);
396     return net::OK;
397   }
398 }
399 
SyncDoomEntriesBetween(const base::Time initial_time,const base::Time end_time)400 int BackendImpl::SyncDoomEntriesBetween(const base::Time initial_time,
401                                         const base::Time end_time) {
402   TRACE_EVENT0("disk_cache", "BackendImpl::SyncDoomEntriesBetween");
403 
404   DCHECK_NE(net::APP_CACHE, GetCacheType());
405   if (end_time.is_null())
406     return SyncDoomEntriesSince(initial_time);
407 
408   DCHECK(end_time >= initial_time);
409 
410   if (disabled_)
411     return net::ERR_FAILED;
412 
413   scoped_refptr<EntryImpl> node;
414   auto iterator = std::make_unique<Rankings::Iterator>();
415   scoped_refptr<EntryImpl> next = OpenNextEntryImpl(iterator.get());
416   if (!next)
417     return net::OK;
418 
419   while (next) {
420     node = std::move(next);
421     next = OpenNextEntryImpl(iterator.get());
422 
423     if (node->GetLastUsed() >= initial_time &&
424         node->GetLastUsed() < end_time) {
425       node->DoomImpl();
426     } else if (node->GetLastUsed() < initial_time) {
427       next = nullptr;
428       SyncEndEnumeration(std::move(iterator));
429     }
430   }
431 
432   return net::OK;
433 }
434 
SyncCalculateSizeOfAllEntries()435 int BackendImpl::SyncCalculateSizeOfAllEntries() {
436   TRACE_EVENT0("disk_cache", "BackendImpl::SyncCalculateSizeOfAllEntries");
437 
438   DCHECK_NE(net::APP_CACHE, GetCacheType());
439   if (disabled_)
440     return net::ERR_FAILED;
441 
442   return data_->header.num_bytes;
443 }
444 
445 // We use OpenNextEntryImpl to retrieve elements from the cache, until we get
446 // entries that are too old.
SyncDoomEntriesSince(const base::Time initial_time)447 int BackendImpl::SyncDoomEntriesSince(const base::Time initial_time) {
448   TRACE_EVENT0("disk_cache", "BackendImpl::SyncDoomEntriesSince");
449 
450   DCHECK_NE(net::APP_CACHE, GetCacheType());
451   if (disabled_)
452     return net::ERR_FAILED;
453 
454   stats_.OnEvent(Stats::DOOM_RECENT);
455   for (;;) {
456     auto iterator = std::make_unique<Rankings::Iterator>();
457     scoped_refptr<EntryImpl> entry = OpenNextEntryImpl(iterator.get());
458     if (!entry)
459       return net::OK;
460 
461     if (initial_time > entry->GetLastUsed()) {
462       entry = nullptr;
463       SyncEndEnumeration(std::move(iterator));
464       return net::OK;
465     }
466 
467     entry->DoomImpl();
468     entry = nullptr;
469     SyncEndEnumeration(
470         std::move(iterator));  // The doom invalidated the iterator.
471   }
472 }
473 
SyncOpenNextEntry(Rankings::Iterator * iterator,scoped_refptr<EntryImpl> * next_entry)474 int BackendImpl::SyncOpenNextEntry(Rankings::Iterator* iterator,
475                                    scoped_refptr<EntryImpl>* next_entry) {
476   TRACE_EVENT0("disk_cache", "BackendImpl::SyncOpenNextEntry");
477 
478   *next_entry = OpenNextEntryImpl(iterator);
479   return (*next_entry) ? net::OK : net::ERR_FAILED;
480 }
481 
SyncEndEnumeration(std::unique_ptr<Rankings::Iterator> iterator)482 void BackendImpl::SyncEndEnumeration(
483     std::unique_ptr<Rankings::Iterator> iterator) {
484   iterator->Reset();
485 }
486 
SyncOnExternalCacheHit(const std::string & key)487 void BackendImpl::SyncOnExternalCacheHit(const std::string& key) {
488   if (disabled_)
489     return;
490 
491   uint32_t hash = base::PersistentHash(key);
492   bool error;
493   scoped_refptr<EntryImpl> cache_entry =
494       MatchEntry(key, hash, false, Addr(), &error);
495   if (cache_entry && ENTRY_NORMAL == cache_entry->entry()->Data()->state)
496     UpdateRank(cache_entry.get(), GetCacheType() == net::SHADER_CACHE);
497 }
498 
OpenEntryImpl(const std::string & key)499 scoped_refptr<EntryImpl> BackendImpl::OpenEntryImpl(const std::string& key) {
500   TRACE_EVENT0("disk_cache", "BackendImpl::OpenEntryImpl");
501 
502   if (disabled_)
503     return nullptr;
504 
505   TimeTicks start = TimeTicks::Now();
506   uint32_t hash = base::PersistentHash(key);
507 
508   bool error;
509   scoped_refptr<EntryImpl> cache_entry =
510       MatchEntry(key, hash, false, Addr(), &error);
511   if (cache_entry && ENTRY_NORMAL != cache_entry->entry()->Data()->state) {
512     // The entry was already evicted.
513     cache_entry = nullptr;
514   }
515 
516   int64_t current_size = data_->header.num_bytes / (1024 * 1024);
517   int64_t total_hours = stats_.GetCounter(Stats::TIMER) / 120;
518   int64_t no_use_hours = stats_.GetCounter(Stats::LAST_REPORT_TIMER) / 120;
519   int64_t use_hours = total_hours - no_use_hours;
520 
521   if (!cache_entry) {
522     stats_.OnEvent(Stats::OPEN_MISS);
523     return nullptr;
524   }
525 
526   eviction_.OnOpenEntry(cache_entry.get());
527   entry_count_++;
528 
529   CACHE_UMA(AGE_MS, "OpenTime", 0, start);
530   CACHE_UMA(COUNTS_10000, "AllOpenBySize.Hit", 0, current_size);
531   CACHE_UMA(HOURS, "AllOpenByTotalHours.Hit", 0,
532             static_cast<base::HistogramBase::Sample>(total_hours));
533   CACHE_UMA(HOURS, "AllOpenByUseHours.Hit", 0,
534             static_cast<base::HistogramBase::Sample>(use_hours));
535   stats_.OnEvent(Stats::OPEN_HIT);
536   return cache_entry;
537 }
538 
CreateEntryImpl(const std::string & key)539 scoped_refptr<EntryImpl> BackendImpl::CreateEntryImpl(const std::string& key) {
540   TRACE_EVENT0("disk_cache", "BackendImpl::CreateEntryImpl");
541 
542   if (disabled_ || key.empty())
543     return nullptr;
544 
545   TimeTicks start = TimeTicks::Now();
546   uint32_t hash = base::PersistentHash(key);
547 
548   scoped_refptr<EntryImpl> parent;
549   Addr entry_address(data_->table[hash & mask_]);
550   if (entry_address.is_initialized()) {
551     // We have an entry already. It could be the one we are looking for, or just
552     // a hash conflict.
553     bool error;
554     scoped_refptr<EntryImpl> old_entry =
555         MatchEntry(key, hash, false, Addr(), &error);
556     if (old_entry)
557       return ResurrectEntry(std::move(old_entry));
558 
559     parent = MatchEntry(key, hash, true, Addr(), &error);
560     DCHECK(!error);
561     if (!parent && data_->table[hash & mask_]) {
562       // We should have corrected the problem.
563       NOTREACHED();
564       return nullptr;
565     }
566   }
567 
568   // The general flow is to allocate disk space and initialize the entry data,
569   // followed by saving that to disk, then linking the entry though the index
570   // and finally through the lists. If there is a crash in this process, we may
571   // end up with:
572   // a. Used, unreferenced empty blocks on disk (basically just garbage).
573   // b. Used, unreferenced but meaningful data on disk (more garbage).
574   // c. A fully formed entry, reachable only through the index.
575   // d. A fully formed entry, also reachable through the lists, but still dirty.
576   //
577   // Anything after (b) can be automatically cleaned up. We may consider saving
578   // the current operation (as we do while manipulating the lists) so that we
579   // can detect and cleanup (a) and (b).
580 
581   int num_blocks = EntryImpl::NumBlocksForEntry(key.size());
582   if (!block_files_.CreateBlock(BLOCK_256, num_blocks, &entry_address)) {
583     LOG(ERROR) << "Create entry failed " << key.c_str();
584     stats_.OnEvent(Stats::CREATE_ERROR);
585     return nullptr;
586   }
587 
588   Addr node_address(0);
589   if (!block_files_.CreateBlock(RANKINGS, 1, &node_address)) {
590     block_files_.DeleteBlock(entry_address, false);
591     LOG(ERROR) << "Create entry failed " << key.c_str();
592     stats_.OnEvent(Stats::CREATE_ERROR);
593     return nullptr;
594   }
595 
596   auto cache_entry =
597       base::MakeRefCounted<EntryImpl>(this, entry_address, false);
598   IncreaseNumRefs();
599 
600   if (!cache_entry->CreateEntry(node_address, key, hash)) {
601     block_files_.DeleteBlock(entry_address, false);
602     block_files_.DeleteBlock(node_address, false);
603     LOG(ERROR) << "Create entry failed " << key.c_str();
604     stats_.OnEvent(Stats::CREATE_ERROR);
605     return nullptr;
606   }
607 
608   cache_entry->BeginLogging(net_log_, true);
609 
610   // We are not failing the operation; let's add this to the map.
611   open_entries_[entry_address.value()] = cache_entry.get();
612 
613   // Save the entry.
614   cache_entry->entry()->Store();
615   cache_entry->rankings()->Store();
616   IncreaseNumEntries();
617   entry_count_++;
618 
619   // Link this entry through the index.
620   if (parent.get()) {
621     parent->SetNextAddress(entry_address);
622   } else {
623     data_->table[hash & mask_] = entry_address.value();
624   }
625 
626   // Link this entry through the lists.
627   eviction_.OnCreateEntry(cache_entry.get());
628 
629   CACHE_UMA(AGE_MS, "CreateTime", 0, start);
630   stats_.OnEvent(Stats::CREATE_HIT);
631   FlushIndex();
632   return cache_entry;
633 }
634 
OpenNextEntryImpl(Rankings::Iterator * iterator)635 scoped_refptr<EntryImpl> BackendImpl::OpenNextEntryImpl(
636     Rankings::Iterator* iterator) {
637   if (disabled_)
638     return nullptr;
639 
640   const int kListsToSearch = 3;
641   scoped_refptr<EntryImpl> entries[kListsToSearch];
642   if (!iterator->my_rankings) {
643     iterator->my_rankings = &rankings_;
644     bool ret = false;
645 
646     // Get an entry from each list.
647     for (int i = 0; i < kListsToSearch; i++) {
648       ret |= OpenFollowingEntryFromList(static_cast<Rankings::List>(i),
649                                         &iterator->nodes[i], &entries[i]);
650     }
651     if (!ret) {
652       iterator->Reset();
653       return nullptr;
654     }
655   } else {
656     // Get the next entry from the last list, and the actual entries for the
657     // elements on the other lists.
658     for (int i = 0; i < kListsToSearch; i++) {
659       if (iterator->list == i) {
660         OpenFollowingEntryFromList(iterator->list, &iterator->nodes[i],
661                                    &entries[i]);
662       } else {
663         entries[i] = GetEnumeratedEntry(iterator->nodes[i],
664                                         static_cast<Rankings::List>(i));
665       }
666     }
667   }
668 
669   int newest = -1;
670   int oldest = -1;
671   Time access_times[kListsToSearch];
672   for (int i = 0; i < kListsToSearch; i++) {
673     if (entries[i].get()) {
674       access_times[i] = entries[i]->GetLastUsed();
675       if (newest < 0) {
676         DCHECK_LT(oldest, 0);
677         newest = oldest = i;
678         continue;
679       }
680       if (access_times[i] > access_times[newest])
681         newest = i;
682       if (access_times[i] < access_times[oldest])
683         oldest = i;
684     }
685   }
686 
687   if (newest < 0 || oldest < 0) {
688     iterator->Reset();
689     return nullptr;
690   }
691 
692   scoped_refptr<EntryImpl> next_entry = entries[newest];
693   iterator->list = static_cast<Rankings::List>(newest);
694   return next_entry;
695 }
696 
SetMaxSize(int64_t max_bytes)697 bool BackendImpl::SetMaxSize(int64_t max_bytes) {
698   if (max_bytes < 0 || max_bytes > std::numeric_limits<int>::max())
699     return false;
700 
701   // Zero size means use the default.
702   if (!max_bytes)
703     return true;
704 
705   // Avoid a DCHECK later on.
706   if (max_bytes >= std::numeric_limits<int32_t>::max() -
707                        std::numeric_limits<int32_t>::max() / 10) {
708     max_bytes = std::numeric_limits<int32_t>::max() -
709                 std::numeric_limits<int32_t>::max() / 10 - 1;
710   }
711 
712   user_flags_ |= kMaxSize;
713   max_size_ = max_bytes;
714   return true;
715 }
716 
GetFileName(Addr address) const717 base::FilePath BackendImpl::GetFileName(Addr address) const {
718   if (!address.is_separate_file() || !address.is_initialized()) {
719     NOTREACHED();
720     return base::FilePath();
721   }
722 
723   std::string tmp = base::StringPrintf("f_%06x", address.FileNumber());
724   return path_.AppendASCII(tmp);
725 }
726 
File(Addr address)727 MappedFile* BackendImpl::File(Addr address) {
728   if (disabled_)
729     return nullptr;
730   return block_files_.GetFile(address);
731 }
732 
GetBackgroundQueue()733 base::WeakPtr<InFlightBackendIO> BackendImpl::GetBackgroundQueue() {
734   return background_queue_.GetWeakPtr();
735 }
736 
CreateExternalFile(Addr * address)737 bool BackendImpl::CreateExternalFile(Addr* address) {
738   TRACE_EVENT0("disk_cache", "BackendImpl::CreateExternalFile");
739   int file_number = data_->header.last_file + 1;
740   Addr file_address(0);
741   bool success = false;
742   for (int i = 0; i < 0x0fffffff; i++, file_number++) {
743     if (!file_address.SetFileNumber(file_number)) {
744       file_number = 1;
745       continue;
746     }
747     base::FilePath name = GetFileName(file_address);
748     int flags = base::File::FLAG_READ | base::File::FLAG_WRITE |
749                 base::File::FLAG_CREATE | base::File::FLAG_WIN_EXCLUSIVE_WRITE;
750     base::File file(name, flags);
751     if (!file.IsValid()) {
752       base::File::Error error = file.error_details();
753       if (error != base::File::FILE_ERROR_EXISTS) {
754         LOG(ERROR) << "Unable to create file: " << error;
755         return false;
756       }
757       continue;
758     }
759 
760     success = true;
761     break;
762   }
763 
764   DCHECK(success);
765   if (!success)
766     return false;
767 
768   data_->header.last_file = file_number;
769   address->set_value(file_address.value());
770   return true;
771 }
772 
CreateBlock(FileType block_type,int block_count,Addr * block_address)773 bool BackendImpl::CreateBlock(FileType block_type, int block_count,
774                              Addr* block_address) {
775   return block_files_.CreateBlock(block_type, block_count, block_address);
776 }
777 
DeleteBlock(Addr block_address,bool deep)778 void BackendImpl::DeleteBlock(Addr block_address, bool deep) {
779   block_files_.DeleteBlock(block_address, deep);
780 }
781 
GetLruData()782 LruData* BackendImpl::GetLruData() {
783   return &data_->header.lru;
784 }
785 
UpdateRank(EntryImpl * entry,bool modified)786 void BackendImpl::UpdateRank(EntryImpl* entry, bool modified) {
787   if (read_only_ || (!modified && GetCacheType() == net::SHADER_CACHE))
788     return;
789   eviction_.UpdateRank(entry, modified);
790 }
791 
RecoveredEntry(CacheRankingsBlock * rankings)792 void BackendImpl::RecoveredEntry(CacheRankingsBlock* rankings) {
793   Addr address(rankings->Data()->contents);
794   scoped_refptr<EntryImpl> cache_entry;
795   if (NewEntry(address, &cache_entry)) {
796     STRESS_NOTREACHED();
797     return;
798   }
799 
800   uint32_t hash = cache_entry->GetHash();
801   cache_entry = nullptr;
802 
803   // Anything on the table means that this entry is there.
804   if (data_->table[hash & mask_])
805     return;
806 
807   data_->table[hash & mask_] = address.value();
808   FlushIndex();
809 }
810 
InternalDoomEntry(EntryImpl * entry)811 void BackendImpl::InternalDoomEntry(EntryImpl* entry) {
812   uint32_t hash = entry->GetHash();
813   std::string key = entry->GetKey();
814   Addr entry_addr = entry->entry()->address();
815   bool error;
816   scoped_refptr<EntryImpl> parent_entry =
817       MatchEntry(key, hash, true, entry_addr, &error);
818   CacheAddr child(entry->GetNextAddress());
819 
820   if (!entry->doomed()) {
821     // We may have doomed this entry from within MatchEntry.
822     eviction_.OnDoomEntry(entry);
823     entry->InternalDoom();
824     if (!new_eviction_) {
825       DecreaseNumEntries();
826     }
827     stats_.OnEvent(Stats::DOOM_ENTRY);
828   }
829 
830   if (parent_entry) {
831     parent_entry->SetNextAddress(Addr(child));
832     parent_entry = nullptr;
833   } else if (!error) {
834     data_->table[hash & mask_] = child;
835   }
836 
837   FlushIndex();
838 }
839 
840 #if defined(NET_BUILD_STRESS_CACHE)
841 
GetNextAddr(Addr address)842 CacheAddr BackendImpl::GetNextAddr(Addr address) {
843   EntriesMap::iterator it = open_entries_.find(address.value());
844   if (it != open_entries_.end()) {
845     EntryImpl* this_entry = it->second;
846     return this_entry->GetNextAddress();
847   }
848   DCHECK(block_files_.IsValid(address));
849   DCHECK(!address.is_separate_file() && address.file_type() == BLOCK_256);
850 
851   CacheEntryBlock entry(File(address), address);
852   CHECK(entry.Load());
853   return entry.Data()->next;
854 }
855 
NotLinked(EntryImpl * entry)856 void BackendImpl::NotLinked(EntryImpl* entry) {
857   Addr entry_addr = entry->entry()->address();
858   uint32_t i = entry->GetHash() & mask_;
859   Addr address(data_->table[i]);
860   if (!address.is_initialized())
861     return;
862 
863   for (;;) {
864     DCHECK(entry_addr.value() != address.value());
865     address.set_value(GetNextAddr(address));
866     if (!address.is_initialized())
867       break;
868   }
869 }
870 #endif  // NET_BUILD_STRESS_CACHE
871 
872 // An entry may be linked on the DELETED list for a while after being doomed.
873 // This function is called when we want to remove it.
RemoveEntry(EntryImpl * entry)874 void BackendImpl::RemoveEntry(EntryImpl* entry) {
875 #if defined(NET_BUILD_STRESS_CACHE)
876   NotLinked(entry);
877 #endif
878   if (!new_eviction_)
879     return;
880 
881   DCHECK_NE(ENTRY_NORMAL, entry->entry()->Data()->state);
882 
883   eviction_.OnDestroyEntry(entry);
884   DecreaseNumEntries();
885 }
886 
OnEntryDestroyBegin(Addr address)887 void BackendImpl::OnEntryDestroyBegin(Addr address) {
888   auto it = open_entries_.find(address.value());
889   if (it != open_entries_.end())
890     open_entries_.erase(it);
891 }
892 
OnEntryDestroyEnd()893 void BackendImpl::OnEntryDestroyEnd() {
894   DecreaseNumRefs();
895   consider_evicting_at_op_end_ = true;
896 }
897 
OnSyncBackendOpComplete()898 void BackendImpl::OnSyncBackendOpComplete() {
899   if (consider_evicting_at_op_end_) {
900     if (data_->header.num_bytes > max_size_ && !read_only_ &&
901         (up_ticks_ > kTrimDelay || user_flags_ & kNoRandom))
902       eviction_.TrimCache(false);
903     consider_evicting_at_op_end_ = false;
904   }
905 }
906 
GetOpenEntry(CacheRankingsBlock * rankings) const907 EntryImpl* BackendImpl::GetOpenEntry(CacheRankingsBlock* rankings) const {
908   DCHECK(rankings->HasData());
909   auto it = open_entries_.find(rankings->Data()->contents);
910   if (it != open_entries_.end()) {
911     // We have this entry in memory.
912     return it->second;
913   }
914 
915   return nullptr;
916 }
917 
GetCurrentEntryId() const918 int32_t BackendImpl::GetCurrentEntryId() const {
919   return data_->header.this_id;
920 }
921 
MaxFileSize() const922 int64_t BackendImpl::MaxFileSize() const {
923   return GetCacheType() == net::PNACL_CACHE ? max_size_ : max_size_ / 8;
924 }
925 
ModifyStorageSize(int32_t old_size,int32_t new_size)926 void BackendImpl::ModifyStorageSize(int32_t old_size, int32_t new_size) {
927   if (disabled_ || old_size == new_size)
928     return;
929   if (old_size > new_size)
930     SubstractStorageSize(old_size - new_size);
931   else
932     AddStorageSize(new_size - old_size);
933 
934   FlushIndex();
935 
936   // Update the usage statistics.
937   stats_.ModifyStorageStats(old_size, new_size);
938 }
939 
TooMuchStorageRequested(int32_t size)940 void BackendImpl::TooMuchStorageRequested(int32_t size) {
941   stats_.ModifyStorageStats(0, size);
942 }
943 
IsAllocAllowed(int current_size,int new_size)944 bool BackendImpl::IsAllocAllowed(int current_size, int new_size) {
945   DCHECK_GT(new_size, current_size);
946   if (user_flags_ & kNoBuffering)
947     return false;
948 
949   int to_add = new_size - current_size;
950   if (buffer_bytes_ + to_add > MaxBuffersSize())
951     return false;
952 
953   buffer_bytes_ += to_add;
954   CACHE_UMA(COUNTS_50000, "BufferBytes", 0, buffer_bytes_ / 1024);
955   return true;
956 }
957 
BufferDeleted(int size)958 void BackendImpl::BufferDeleted(int size) {
959   buffer_bytes_ -= size;
960   DCHECK_GE(size, 0);
961 }
962 
IsLoaded() const963 bool BackendImpl::IsLoaded() const {
964   CACHE_UMA(COUNTS, "PendingIO", 0, num_pending_io_);
965   if (user_flags_ & kNoLoadProtection)
966     return false;
967 
968   return (num_pending_io_ > 5 || user_load_);
969 }
970 
HistogramName(const char * name,int experiment) const971 std::string BackendImpl::HistogramName(const char* name, int experiment) const {
972   if (!experiment)
973     return base::StringPrintf("DiskCache.%d.%s", GetCacheType(), name);
974   return base::StringPrintf("DiskCache.%d.%s_%d", GetCacheType(), name,
975                             experiment);
976 }
977 
GetWeakPtr()978 base::WeakPtr<BackendImpl> BackendImpl::GetWeakPtr() {
979   return ptr_factory_.GetWeakPtr();
980 }
981 
982 // We want to remove biases from some histograms so we only send data once per
983 // week.
ShouldReportAgain()984 bool BackendImpl::ShouldReportAgain() {
985   if (uma_report_)
986     return uma_report_ == 2;
987 
988   uma_report_++;
989   int64_t last_report = stats_.GetCounter(Stats::LAST_REPORT);
990   Time last_time = Time::FromInternalValue(last_report);
991   if (!last_report || (Time::Now() - last_time).InDays() >= 7) {
992     stats_.SetCounter(Stats::LAST_REPORT, Time::Now().ToInternalValue());
993     uma_report_++;
994     return true;
995   }
996   return false;
997 }
998 
FirstEviction()999 void BackendImpl::FirstEviction() {
1000   DCHECK(data_->header.create_time);
1001   if (!GetEntryCount())
1002     return;  // This is just for unit tests.
1003 
1004   Time create_time = Time::FromInternalValue(data_->header.create_time);
1005   CACHE_UMA(AGE, "FillupAge", 0, create_time);
1006 
1007   int64_t use_time = stats_.GetCounter(Stats::TIMER);
1008   CACHE_UMA(HOURS, "FillupTime", 0, static_cast<int>(use_time / 120));
1009   CACHE_UMA(PERCENTAGE, "FirstHitRatio", 0, stats_.GetHitRatio());
1010 
1011   if (!use_time)
1012     use_time = 1;
1013   CACHE_UMA(COUNTS_10000, "FirstEntryAccessRate", 0,
1014             static_cast<int>(data_->header.num_entries / use_time));
1015   CACHE_UMA(COUNTS, "FirstByteIORate", 0,
1016             static_cast<int>((data_->header.num_bytes / 1024) / use_time));
1017 
1018   int avg_size = data_->header.num_bytes / GetEntryCount();
1019   CACHE_UMA(COUNTS, "FirstEntrySize", 0, avg_size);
1020 
1021   int large_entries_bytes = stats_.GetLargeEntriesSize();
1022   int large_ratio = large_entries_bytes * 100 / data_->header.num_bytes;
1023   CACHE_UMA(PERCENTAGE, "FirstLargeEntriesRatio", 0, large_ratio);
1024 
1025   if (new_eviction_) {
1026     CACHE_UMA(PERCENTAGE, "FirstResurrectRatio", 0, stats_.GetResurrectRatio());
1027     CACHE_UMA(PERCENTAGE, "FirstNoUseRatio", 0,
1028               data_->header.lru.sizes[0] * 100 / data_->header.num_entries);
1029     CACHE_UMA(PERCENTAGE, "FirstLowUseRatio", 0,
1030               data_->header.lru.sizes[1] * 100 / data_->header.num_entries);
1031     CACHE_UMA(PERCENTAGE, "FirstHighUseRatio", 0,
1032               data_->header.lru.sizes[2] * 100 / data_->header.num_entries);
1033   }
1034 
1035   stats_.ResetRatios();
1036 }
1037 
CriticalError(int error)1038 void BackendImpl::CriticalError(int error) {
1039   STRESS_NOTREACHED();
1040   LOG(ERROR) << "Critical error found " << error;
1041   if (disabled_)
1042     return;
1043 
1044   stats_.OnEvent(Stats::FATAL_ERROR);
1045   LogStats();
1046   ReportError(error);
1047 
1048   // Setting the index table length to an invalid value will force re-creation
1049   // of the cache files.
1050   data_->header.table_len = 1;
1051   disabled_ = true;
1052 
1053   if (!num_refs_)
1054     base::SingleThreadTaskRunner::GetCurrentDefault()->PostTask(
1055         FROM_HERE,
1056         base::BindOnce(&BackendImpl::RestartCache, GetWeakPtr(), true));
1057 }
1058 
ReportError(int error)1059 void BackendImpl::ReportError(int error) {
1060   STRESS_DCHECK(!error || error == ERR_PREVIOUS_CRASH ||
1061                 error == ERR_CACHE_CREATED);
1062 
1063   // We transmit positive numbers, instead of direct error codes.
1064   DCHECK_LE(error, 0);
1065   CACHE_UMA(CACHE_ERROR, "Error", 0, error * -1);
1066 }
1067 
OnEvent(Stats::Counters an_event)1068 void BackendImpl::OnEvent(Stats::Counters an_event) {
1069   stats_.OnEvent(an_event);
1070 }
1071 
OnRead(int32_t bytes)1072 void BackendImpl::OnRead(int32_t bytes) {
1073   DCHECK_GE(bytes, 0);
1074   byte_count_ += bytes;
1075   if (byte_count_ < 0)
1076     byte_count_ = std::numeric_limits<int32_t>::max();
1077 }
1078 
OnWrite(int32_t bytes)1079 void BackendImpl::OnWrite(int32_t bytes) {
1080   // We use the same implementation as OnRead... just log the number of bytes.
1081   OnRead(bytes);
1082 }
1083 
OnStatsTimer()1084 void BackendImpl::OnStatsTimer() {
1085   if (disabled_)
1086     return;
1087 
1088   stats_.OnEvent(Stats::TIMER);
1089   int64_t time = stats_.GetCounter(Stats::TIMER);
1090   int64_t current = stats_.GetCounter(Stats::OPEN_ENTRIES);
1091 
1092   // OPEN_ENTRIES is a sampled average of the number of open entries, avoiding
1093   // the bias towards 0.
1094   if (num_refs_ && (current != num_refs_)) {
1095     int64_t diff = (num_refs_ - current) / 50;
1096     if (!diff)
1097       diff = num_refs_ > current ? 1 : -1;
1098     current = current + diff;
1099     stats_.SetCounter(Stats::OPEN_ENTRIES, current);
1100     stats_.SetCounter(Stats::MAX_ENTRIES, max_refs_);
1101   }
1102 
1103   CACHE_UMA(COUNTS, "NumberOfReferences", 0, num_refs_);
1104 
1105   CACHE_UMA(COUNTS_10000, "EntryAccessRate", 0, entry_count_);
1106   CACHE_UMA(COUNTS, "ByteIORate", 0, byte_count_ / 1024);
1107 
1108   // These values cover about 99.5% of the population (Oct 2011).
1109   user_load_ = (entry_count_ > 300 || byte_count_ > 7 * 1024 * 1024);
1110   entry_count_ = 0;
1111   byte_count_ = 0;
1112   up_ticks_++;
1113 
1114   if (!data_)
1115     first_timer_ = false;
1116   if (first_timer_) {
1117     first_timer_ = false;
1118     if (ShouldReportAgain())
1119       ReportStats();
1120   }
1121 
1122   // Save stats to disk at 5 min intervals.
1123   if (time % 10 == 0)
1124     StoreStats();
1125 }
1126 
IncrementIoCount()1127 void BackendImpl::IncrementIoCount() {
1128   num_pending_io_++;
1129 }
1130 
DecrementIoCount()1131 void BackendImpl::DecrementIoCount() {
1132   num_pending_io_--;
1133 }
1134 
SetUnitTestMode()1135 void BackendImpl::SetUnitTestMode() {
1136   user_flags_ |= kUnitTestMode;
1137   unit_test_ = true;
1138 }
1139 
SetUpgradeMode()1140 void BackendImpl::SetUpgradeMode() {
1141   user_flags_ |= kUpgradeMode;
1142   read_only_ = true;
1143 }
1144 
SetNewEviction()1145 void BackendImpl::SetNewEviction() {
1146   user_flags_ |= kNewEviction;
1147   new_eviction_ = true;
1148 }
1149 
SetFlags(uint32_t flags)1150 void BackendImpl::SetFlags(uint32_t flags) {
1151   user_flags_ |= flags;
1152 }
1153 
ClearRefCountForTest()1154 void BackendImpl::ClearRefCountForTest() {
1155   num_refs_ = 0;
1156 }
1157 
FlushQueueForTest(CompletionOnceCallback callback)1158 int BackendImpl::FlushQueueForTest(CompletionOnceCallback callback) {
1159   background_queue_.FlushQueue(std::move(callback));
1160   return net::ERR_IO_PENDING;
1161 }
1162 
RunTaskForTest(base::OnceClosure task,CompletionOnceCallback callback)1163 int BackendImpl::RunTaskForTest(base::OnceClosure task,
1164                                 CompletionOnceCallback callback) {
1165   background_queue_.RunTask(std::move(task), std::move(callback));
1166   return net::ERR_IO_PENDING;
1167 }
1168 
TrimForTest(bool empty)1169 void BackendImpl::TrimForTest(bool empty) {
1170   eviction_.SetTestMode();
1171   eviction_.TrimCache(empty);
1172 }
1173 
TrimDeletedListForTest(bool empty)1174 void BackendImpl::TrimDeletedListForTest(bool empty) {
1175   eviction_.SetTestMode();
1176   eviction_.TrimDeletedList(empty);
1177 }
1178 
GetTimerForTest()1179 base::RepeatingTimer* BackendImpl::GetTimerForTest() {
1180   return timer_.get();
1181 }
1182 
SelfCheck()1183 int BackendImpl::SelfCheck() {
1184   if (!init_) {
1185     LOG(ERROR) << "Init failed";
1186     return ERR_INIT_FAILED;
1187   }
1188 
1189   int num_entries = rankings_.SelfCheck();
1190   if (num_entries < 0) {
1191     LOG(ERROR) << "Invalid rankings list, error " << num_entries;
1192 #if !defined(NET_BUILD_STRESS_CACHE)
1193     return num_entries;
1194 #endif
1195   }
1196 
1197   if (num_entries != data_->header.num_entries) {
1198     LOG(ERROR) << "Number of entries mismatch";
1199 #if !defined(NET_BUILD_STRESS_CACHE)
1200     return ERR_NUM_ENTRIES_MISMATCH;
1201 #endif
1202   }
1203 
1204   return CheckAllEntries();
1205 }
1206 
FlushIndex()1207 void BackendImpl::FlushIndex() {
1208   if (index_.get() && !disabled_)
1209     index_->Flush();
1210 }
1211 
1212 // ------------------------------------------------------------------------
1213 
GetEntryCount() const1214 int32_t BackendImpl::GetEntryCount() const {
1215   if (!index_.get() || disabled_)
1216     return 0;
1217   // num_entries includes entries already evicted.
1218   int32_t not_deleted =
1219       data_->header.num_entries - data_->header.lru.sizes[Rankings::DELETED];
1220 
1221   if (not_deleted < 0) {
1222     NOTREACHED();
1223     not_deleted = 0;
1224   }
1225 
1226   return not_deleted;
1227 }
1228 
OpenOrCreateEntry(const std::string & key,net::RequestPriority request_priority,EntryResultCallback callback)1229 EntryResult BackendImpl::OpenOrCreateEntry(
1230     const std::string& key,
1231     net::RequestPriority request_priority,
1232     EntryResultCallback callback) {
1233   DCHECK(!callback.is_null());
1234   background_queue_.OpenOrCreateEntry(key, std::move(callback));
1235   return EntryResult::MakeError(net::ERR_IO_PENDING);
1236 }
1237 
OpenEntry(const std::string & key,net::RequestPriority request_priority,EntryResultCallback callback)1238 EntryResult BackendImpl::OpenEntry(const std::string& key,
1239                                    net::RequestPriority request_priority,
1240                                    EntryResultCallback callback) {
1241   DCHECK(!callback.is_null());
1242   background_queue_.OpenEntry(key, std::move(callback));
1243   return EntryResult::MakeError(net::ERR_IO_PENDING);
1244 }
1245 
CreateEntry(const std::string & key,net::RequestPriority request_priority,EntryResultCallback callback)1246 EntryResult BackendImpl::CreateEntry(const std::string& key,
1247                                      net::RequestPriority request_priority,
1248                                      EntryResultCallback callback) {
1249   DCHECK(!callback.is_null());
1250   background_queue_.CreateEntry(key, std::move(callback));
1251   return EntryResult::MakeError(net::ERR_IO_PENDING);
1252 }
1253 
DoomEntry(const std::string & key,net::RequestPriority priority,CompletionOnceCallback callback)1254 net::Error BackendImpl::DoomEntry(const std::string& key,
1255                                   net::RequestPriority priority,
1256                                   CompletionOnceCallback callback) {
1257   DCHECK(!callback.is_null());
1258   background_queue_.DoomEntry(key, std::move(callback));
1259   return net::ERR_IO_PENDING;
1260 }
1261 
DoomAllEntries(CompletionOnceCallback callback)1262 net::Error BackendImpl::DoomAllEntries(CompletionOnceCallback callback) {
1263   DCHECK(!callback.is_null());
1264   background_queue_.DoomAllEntries(std::move(callback));
1265   return net::ERR_IO_PENDING;
1266 }
1267 
DoomEntriesBetween(const base::Time initial_time,const base::Time end_time,CompletionOnceCallback callback)1268 net::Error BackendImpl::DoomEntriesBetween(const base::Time initial_time,
1269                                            const base::Time end_time,
1270                                            CompletionOnceCallback callback) {
1271   DCHECK(!callback.is_null());
1272   background_queue_.DoomEntriesBetween(initial_time, end_time,
1273                                        std::move(callback));
1274   return net::ERR_IO_PENDING;
1275 }
1276 
DoomEntriesSince(const base::Time initial_time,CompletionOnceCallback callback)1277 net::Error BackendImpl::DoomEntriesSince(const base::Time initial_time,
1278                                          CompletionOnceCallback callback) {
1279   DCHECK(!callback.is_null());
1280   background_queue_.DoomEntriesSince(initial_time, std::move(callback));
1281   return net::ERR_IO_PENDING;
1282 }
1283 
CalculateSizeOfAllEntries(Int64CompletionOnceCallback callback)1284 int64_t BackendImpl::CalculateSizeOfAllEntries(
1285     Int64CompletionOnceCallback callback) {
1286   DCHECK(!callback.is_null());
1287   background_queue_.CalculateSizeOfAllEntries(BindOnce(
1288       [](Int64CompletionOnceCallback callback, int result) {
1289         std::move(callback).Run(static_cast<int64_t>(result));
1290       },
1291       std::move(callback)));
1292   return net::ERR_IO_PENDING;
1293 }
1294 
1295 class BackendImpl::IteratorImpl : public Backend::Iterator {
1296  public:
IteratorImpl(base::WeakPtr<InFlightBackendIO> background_queue)1297   explicit IteratorImpl(base::WeakPtr<InFlightBackendIO> background_queue)
1298       : background_queue_(background_queue),
1299         iterator_(std::make_unique<Rankings::Iterator>()) {}
1300 
~IteratorImpl()1301   ~IteratorImpl() override {
1302     if (background_queue_)
1303       background_queue_->EndEnumeration(std::move(iterator_));
1304   }
1305 
OpenNextEntry(EntryResultCallback callback)1306   EntryResult OpenNextEntry(EntryResultCallback callback) override {
1307     if (!background_queue_)
1308       return EntryResult::MakeError(net::ERR_FAILED);
1309     background_queue_->OpenNextEntry(iterator_.get(), std::move(callback));
1310     return EntryResult::MakeError(net::ERR_IO_PENDING);
1311   }
1312 
1313  private:
1314   const base::WeakPtr<InFlightBackendIO> background_queue_;
1315   std::unique_ptr<Rankings::Iterator> iterator_;
1316 };
1317 
CreateIterator()1318 std::unique_ptr<Backend::Iterator> BackendImpl::CreateIterator() {
1319   return std::make_unique<IteratorImpl>(GetBackgroundQueue());
1320 }
1321 
GetStats(StatsItems * stats)1322 void BackendImpl::GetStats(StatsItems* stats) {
1323   if (disabled_)
1324     return;
1325 
1326   std::pair<std::string, std::string> item;
1327 
1328   item.first = "Entries";
1329   item.second = base::NumberToString(data_->header.num_entries);
1330   stats->push_back(item);
1331 
1332   item.first = "Pending IO";
1333   item.second = base::NumberToString(num_pending_io_);
1334   stats->push_back(item);
1335 
1336   item.first = "Max size";
1337   item.second = base::NumberToString(max_size_);
1338   stats->push_back(item);
1339 
1340   item.first = "Current size";
1341   item.second = base::NumberToString(data_->header.num_bytes);
1342   stats->push_back(item);
1343 
1344   item.first = "Cache type";
1345   item.second = "Blockfile Cache";
1346   stats->push_back(item);
1347 
1348   stats_.GetItems(stats);
1349 }
1350 
OnExternalCacheHit(const std::string & key)1351 void BackendImpl::OnExternalCacheHit(const std::string& key) {
1352   background_queue_.OnExternalCacheHit(key);
1353 }
1354 
1355 // ------------------------------------------------------------------------
1356 
1357 // We just created a new file so we're going to write the header and set the
1358 // file length to include the hash table (zero filled).
CreateBackingStore(disk_cache::File * file)1359 bool BackendImpl::CreateBackingStore(disk_cache::File* file) {
1360   AdjustMaxCacheSize(0);
1361 
1362   IndexHeader header;
1363   header.table_len = DesiredIndexTableLen(max_size_);
1364   header.create_time = Time::Now().ToInternalValue();
1365 
1366   if (!file->Write(&header, sizeof(header), 0))
1367     return false;
1368 
1369   size_t size = GetIndexSize(header.table_len);
1370   if (!file->SetLength(size))
1371     return false;
1372 
1373   // The call to SetLength() above is supposed to have already expanded the file
1374   // to |size| and zero-filled it, but on some systems the actual storage may
1375   // not get allocated until the pages are actually touched... resulting in a
1376   // SIGBUS trying to search through the index if the system is out of disk
1377   // space. So actually write out the zeroes (for pages after the one with the
1378   // header), to force allocation now and fail cleanly if there is no space.
1379   //
1380   // See https://crbug.com/1097518
1381   const int kPageSize = 4096;
1382   static_assert(sizeof(disk_cache::IndexHeader) < kPageSize,
1383                 "Code below assumes it wouldn't overwrite header by starting "
1384                 "at kPageSize");
1385   auto page = std::make_unique<char[]>(kPageSize);
1386   memset(page.get(), 0, kPageSize);
1387 
1388   for (size_t offset = kPageSize; offset < size; offset += kPageSize) {
1389     size_t end = std::min(offset + kPageSize, size);
1390     if (!file->Write(page.get(), end - offset, offset))
1391       return false;
1392   }
1393   return true;
1394 }
1395 
InitBackingStore(bool * file_created)1396 bool BackendImpl::InitBackingStore(bool* file_created) {
1397   if (!base::CreateDirectory(path_))
1398     return false;
1399 
1400   base::FilePath index_name = path_.AppendASCII(kIndexName);
1401 
1402   int flags = base::File::FLAG_READ | base::File::FLAG_WRITE |
1403               base::File::FLAG_OPEN_ALWAYS |
1404               base::File::FLAG_WIN_EXCLUSIVE_WRITE;
1405   base::File base_file(index_name, flags);
1406   if (!base_file.IsValid())
1407     return false;
1408 
1409   bool ret = true;
1410   *file_created = base_file.created();
1411 
1412   auto file = base::MakeRefCounted<disk_cache::File>(std::move(base_file));
1413   if (*file_created)
1414     ret = CreateBackingStore(file.get());
1415 
1416   file = nullptr;
1417   if (!ret)
1418     return false;
1419 
1420   index_ = base::MakeRefCounted<MappedFile>();
1421   data_ = static_cast<Index*>(index_->Init(index_name, 0));
1422   if (!data_) {
1423     LOG(ERROR) << "Unable to map Index file";
1424     return false;
1425   }
1426 
1427   if (index_->GetLength() < sizeof(Index)) {
1428     // We verify this again on CheckIndex() but it's easier to make sure now
1429     // that the header is there.
1430     LOG(ERROR) << "Corrupt Index file";
1431     return false;
1432   }
1433 
1434   return true;
1435 }
1436 
1437 // The maximum cache size will be either set explicitly by the caller, or
1438 // calculated by this code.
AdjustMaxCacheSize(int table_len)1439 void BackendImpl::AdjustMaxCacheSize(int table_len) {
1440   if (max_size_)
1441     return;
1442 
1443   // If table_len is provided, the index file exists.
1444   DCHECK(!table_len || data_->header.magic);
1445 
1446   // The user is not setting the size, let's figure it out.
1447   int64_t available = base::SysInfo::AmountOfFreeDiskSpace(path_);
1448   if (available < 0) {
1449     max_size_ = kDefaultCacheSize;
1450     return;
1451   }
1452 
1453   if (table_len)
1454     available += data_->header.num_bytes;
1455 
1456   max_size_ = PreferredCacheSize(available, GetCacheType());
1457 
1458   if (!table_len)
1459     return;
1460 
1461   // If we already have a table, adjust the size to it.
1462   max_size_ = std::min(max_size_, MaxStorageSizeForTable(table_len));
1463 }
1464 
InitStats()1465 bool BackendImpl::InitStats() {
1466   Addr address(data_->header.stats);
1467   int size = stats_.StorageSize();
1468 
1469   if (!address.is_initialized()) {
1470     FileType file_type = Addr::RequiredFileType(size);
1471     DCHECK_NE(file_type, EXTERNAL);
1472     int num_blocks = Addr::RequiredBlocks(size, file_type);
1473 
1474     if (!CreateBlock(file_type, num_blocks, &address))
1475       return false;
1476 
1477     data_->header.stats = address.value();
1478     return stats_.Init(nullptr, 0, address);
1479   }
1480 
1481   if (!address.is_block_file()) {
1482     NOTREACHED();
1483     return false;
1484   }
1485 
1486   // Load the required data.
1487   size = address.num_blocks() * address.BlockSize();
1488   MappedFile* file = File(address);
1489   if (!file)
1490     return false;
1491 
1492   auto data = std::make_unique<char[]>(size);
1493   size_t offset = address.start_block() * address.BlockSize() +
1494                   kBlockHeaderSize;
1495   if (!file->Read(data.get(), size, offset))
1496     return false;
1497 
1498   if (!stats_.Init(data.get(), size, address))
1499     return false;
1500   if (GetCacheType() == net::DISK_CACHE && ShouldReportAgain())
1501     stats_.InitSizeHistogram();
1502   return true;
1503 }
1504 
StoreStats()1505 void BackendImpl::StoreStats() {
1506   int size = stats_.StorageSize();
1507   auto data = std::make_unique<char[]>(size);
1508   Addr address;
1509   size = stats_.SerializeStats(data.get(), size, &address);
1510   DCHECK(size);
1511   if (!address.is_initialized())
1512     return;
1513 
1514   MappedFile* file = File(address);
1515   if (!file)
1516     return;
1517 
1518   size_t offset = address.start_block() * address.BlockSize() +
1519                   kBlockHeaderSize;
1520   file->Write(data.get(), size, offset);  // ignore result.
1521 }
1522 
RestartCache(bool failure)1523 void BackendImpl::RestartCache(bool failure) {
1524   TRACE_EVENT0("disk_cache", "BackendImpl::RestartCache");
1525 
1526   int64_t errors = stats_.GetCounter(Stats::FATAL_ERROR);
1527   int64_t full_dooms = stats_.GetCounter(Stats::DOOM_CACHE);
1528   int64_t partial_dooms = stats_.GetCounter(Stats::DOOM_RECENT);
1529   int64_t last_report = stats_.GetCounter(Stats::LAST_REPORT);
1530 
1531   PrepareForRestart();
1532   if (failure) {
1533     DCHECK(!num_refs_);
1534     DCHECK(open_entries_.empty());
1535     CleanupDirectorySync(path_);
1536   } else {
1537     DeleteCache(path_, false);
1538   }
1539 
1540   // Don't call Init() if directed by the unit test: we are simulating a failure
1541   // trying to re-enable the cache.
1542   if (unit_test_) {
1543     init_ = true;  // Let the destructor do proper cleanup.
1544   } else if (SyncInit() == net::OK) {
1545     stats_.SetCounter(Stats::FATAL_ERROR, errors);
1546     stats_.SetCounter(Stats::DOOM_CACHE, full_dooms);
1547     stats_.SetCounter(Stats::DOOM_RECENT, partial_dooms);
1548     stats_.SetCounter(Stats::LAST_REPORT, last_report);
1549   }
1550 }
1551 
PrepareForRestart()1552 void BackendImpl::PrepareForRestart() {
1553   // Reset the mask_ if it was not given by the user.
1554   if (!(user_flags_ & kMask))
1555     mask_ = 0;
1556 
1557   if (!(user_flags_ & kNewEviction))
1558     new_eviction_ = false;
1559 
1560   disabled_ = true;
1561   data_->header.crash = 0;
1562   index_->Flush();
1563   index_ = nullptr;
1564   data_ = nullptr;
1565   block_files_.CloseFiles();
1566   rankings_.Reset();
1567   init_ = false;
1568   restarted_ = true;
1569 }
1570 
NewEntry(Addr address,scoped_refptr<EntryImpl> * entry)1571 int BackendImpl::NewEntry(Addr address, scoped_refptr<EntryImpl>* entry) {
1572   auto it = open_entries_.find(address.value());
1573   if (it != open_entries_.end()) {
1574     // Easy job. This entry is already in memory.
1575     *entry = base::WrapRefCounted(it->second);
1576     return 0;
1577   }
1578 
1579   STRESS_DCHECK(block_files_.IsValid(address));
1580 
1581   if (!address.SanityCheckForEntry()) {
1582     LOG(WARNING) << "Wrong entry address.";
1583     STRESS_NOTREACHED();
1584     return ERR_INVALID_ADDRESS;
1585   }
1586 
1587   auto cache_entry = base::MakeRefCounted<EntryImpl>(this, address, read_only_);
1588   IncreaseNumRefs();
1589   *entry = nullptr;
1590 
1591   if (!cache_entry->entry()->Load())
1592     return ERR_READ_FAILURE;
1593 
1594   if (!cache_entry->SanityCheck()) {
1595     LOG(WARNING) << "Messed up entry found.";
1596     STRESS_NOTREACHED();
1597     return ERR_INVALID_ENTRY;
1598   }
1599 
1600   STRESS_DCHECK(block_files_.IsValid(
1601                     Addr(cache_entry->entry()->Data()->rankings_node)));
1602 
1603   if (!cache_entry->LoadNodeAddress())
1604     return ERR_READ_FAILURE;
1605 
1606   if (!rankings_.SanityCheck(cache_entry->rankings(), false)) {
1607     STRESS_NOTREACHED();
1608     cache_entry->SetDirtyFlag(0);
1609     // Don't remove this from the list (it is not linked properly). Instead,
1610     // break the link back to the entry because it is going away, and leave the
1611     // rankings node to be deleted if we find it through a list.
1612     rankings_.SetContents(cache_entry->rankings(), 0);
1613   } else if (!rankings_.DataSanityCheck(cache_entry->rankings(), false)) {
1614     STRESS_NOTREACHED();
1615     cache_entry->SetDirtyFlag(0);
1616     rankings_.SetContents(cache_entry->rankings(), address.value());
1617   }
1618 
1619   if (!cache_entry->DataSanityCheck()) {
1620     LOG(WARNING) << "Messed up entry found.";
1621     cache_entry->SetDirtyFlag(0);
1622     cache_entry->FixForDelete();
1623   }
1624 
1625   // Prevent overwriting the dirty flag on the destructor.
1626   cache_entry->SetDirtyFlag(GetCurrentEntryId());
1627 
1628   open_entries_[address.value()] = cache_entry.get();
1629 
1630   cache_entry->BeginLogging(net_log_, false);
1631   *entry = std::move(cache_entry);
1632   return 0;
1633 }
1634 
MatchEntry(const std::string & key,uint32_t hash,bool find_parent,Addr entry_addr,bool * match_error)1635 scoped_refptr<EntryImpl> BackendImpl::MatchEntry(const std::string& key,
1636                                                  uint32_t hash,
1637                                                  bool find_parent,
1638                                                  Addr entry_addr,
1639                                                  bool* match_error) {
1640   TRACE_EVENT0("disk_cache", "BackendImpl::MatchEntry");
1641 
1642   Addr address(data_->table[hash & mask_]);
1643   scoped_refptr<EntryImpl> cache_entry, parent_entry;
1644   bool found = false;
1645   std::set<CacheAddr> visited;
1646   *match_error = false;
1647 
1648   for (;;) {
1649     if (disabled_)
1650       break;
1651 
1652     if (visited.find(address.value()) != visited.end()) {
1653       // It's possible for a buggy version of the code to write a loop. Just
1654       // break it.
1655       address.set_value(0);
1656       parent_entry->SetNextAddress(address);
1657     }
1658     visited.insert(address.value());
1659 
1660     if (!address.is_initialized()) {
1661       if (find_parent)
1662         found = true;
1663       break;
1664     }
1665 
1666     int error = NewEntry(address, &cache_entry);
1667     if (error || cache_entry->dirty()) {
1668       // This entry is dirty on disk (it was not properly closed): we cannot
1669       // trust it.
1670       Addr child(0);
1671       if (!error)
1672         child.set_value(cache_entry->GetNextAddress());
1673 
1674       if (parent_entry.get()) {
1675         parent_entry->SetNextAddress(child);
1676         parent_entry = nullptr;
1677       } else {
1678         data_->table[hash & mask_] = child.value();
1679       }
1680 
1681       if (!error) {
1682         // It is important to call DestroyInvalidEntry after removing this
1683         // entry from the table.
1684         DestroyInvalidEntry(cache_entry.get());
1685         cache_entry = nullptr;
1686       }
1687 
1688       // Restart the search.
1689       address.set_value(data_->table[hash & mask_]);
1690       visited.clear();
1691       continue;
1692     }
1693 
1694     DCHECK_EQ(hash & mask_, cache_entry->entry()->Data()->hash & mask_);
1695     if (cache_entry->IsSameEntry(key, hash)) {
1696       if (!cache_entry->Update())
1697         cache_entry = nullptr;
1698       found = true;
1699       if (find_parent && entry_addr.value() != address.value()) {
1700         *match_error = true;
1701         parent_entry = nullptr;
1702       }
1703       break;
1704     }
1705     if (!cache_entry->Update())
1706       cache_entry = nullptr;
1707     parent_entry = cache_entry;
1708     cache_entry = nullptr;
1709     if (!parent_entry.get())
1710       break;
1711 
1712     address.set_value(parent_entry->GetNextAddress());
1713   }
1714 
1715   if (parent_entry.get() && (!find_parent || !found))
1716     parent_entry = nullptr;
1717 
1718   if (find_parent && entry_addr.is_initialized() && !cache_entry.get()) {
1719     *match_error = true;
1720     parent_entry = nullptr;
1721   }
1722 
1723   if (cache_entry.get() && (find_parent || !found))
1724     cache_entry = nullptr;
1725 
1726   FlushIndex();
1727 
1728   return find_parent ? std::move(parent_entry) : std::move(cache_entry);
1729 }
1730 
OpenFollowingEntryFromList(Rankings::List list,CacheRankingsBlock ** from_entry,scoped_refptr<EntryImpl> * next_entry)1731 bool BackendImpl::OpenFollowingEntryFromList(
1732     Rankings::List list,
1733     CacheRankingsBlock** from_entry,
1734     scoped_refptr<EntryImpl>* next_entry) {
1735   if (disabled_)
1736     return false;
1737 
1738   if (!new_eviction_ && Rankings::NO_USE != list)
1739     return false;
1740 
1741   Rankings::ScopedRankingsBlock rankings(&rankings_, *from_entry);
1742   CacheRankingsBlock* next_block = rankings_.GetNext(rankings.get(), list);
1743   Rankings::ScopedRankingsBlock next(&rankings_, next_block);
1744   *from_entry = nullptr;
1745 
1746   *next_entry = GetEnumeratedEntry(next.get(), list);
1747   if (!*next_entry)
1748     return false;
1749 
1750   *from_entry = next.release();
1751   return true;
1752 }
1753 
GetEnumeratedEntry(CacheRankingsBlock * next,Rankings::List list)1754 scoped_refptr<EntryImpl> BackendImpl::GetEnumeratedEntry(
1755     CacheRankingsBlock* next,
1756     Rankings::List list) {
1757   if (!next || disabled_)
1758     return nullptr;
1759 
1760   scoped_refptr<EntryImpl> entry;
1761   int rv = NewEntry(Addr(next->Data()->contents), &entry);
1762   if (rv) {
1763     STRESS_NOTREACHED();
1764     rankings_.Remove(next, list, false);
1765     if (rv == ERR_INVALID_ADDRESS) {
1766       // There is nothing linked from the index. Delete the rankings node.
1767       DeleteBlock(next->address(), true);
1768     }
1769     return nullptr;
1770   }
1771 
1772   if (entry->dirty()) {
1773     // We cannot trust this entry.
1774     InternalDoomEntry(entry.get());
1775     return nullptr;
1776   }
1777 
1778   if (!entry->Update()) {
1779     STRESS_NOTREACHED();
1780     return nullptr;
1781   }
1782 
1783   // Note that it is unfortunate (but possible) for this entry to be clean, but
1784   // not actually the real entry. In other words, we could have lost this entry
1785   // from the index, and it could have been replaced with a newer one. It's not
1786   // worth checking that this entry is "the real one", so we just return it and
1787   // let the enumeration continue; this entry will be evicted at some point, and
1788   // the regular path will work with the real entry. With time, this problem
1789   // will disasappear because this scenario is just a bug.
1790 
1791   // Make sure that we save the key for later.
1792   entry->GetKey();
1793 
1794   return entry;
1795 }
1796 
ResurrectEntry(scoped_refptr<EntryImpl> deleted_entry)1797 scoped_refptr<EntryImpl> BackendImpl::ResurrectEntry(
1798     scoped_refptr<EntryImpl> deleted_entry) {
1799   if (ENTRY_NORMAL == deleted_entry->entry()->Data()->state) {
1800     deleted_entry = nullptr;
1801     stats_.OnEvent(Stats::CREATE_MISS);
1802     return nullptr;
1803   }
1804 
1805   // We are attempting to create an entry and found out that the entry was
1806   // previously deleted.
1807 
1808   eviction_.OnCreateEntry(deleted_entry.get());
1809   entry_count_++;
1810 
1811   stats_.OnEvent(Stats::RESURRECT_HIT);
1812   return deleted_entry;
1813 }
1814 
DestroyInvalidEntry(EntryImpl * entry)1815 void BackendImpl::DestroyInvalidEntry(EntryImpl* entry) {
1816   LOG(WARNING) << "Destroying invalid entry.";
1817 
1818   entry->SetPointerForInvalidEntry(GetCurrentEntryId());
1819 
1820   eviction_.OnDoomEntry(entry);
1821   entry->InternalDoom();
1822 
1823   if (!new_eviction_)
1824     DecreaseNumEntries();
1825   stats_.OnEvent(Stats::INVALID_ENTRY);
1826 }
1827 
AddStorageSize(int32_t bytes)1828 void BackendImpl::AddStorageSize(int32_t bytes) {
1829   data_->header.num_bytes += bytes;
1830   DCHECK_GE(data_->header.num_bytes, 0);
1831 }
1832 
SubstractStorageSize(int32_t bytes)1833 void BackendImpl::SubstractStorageSize(int32_t bytes) {
1834   data_->header.num_bytes -= bytes;
1835   DCHECK_GE(data_->header.num_bytes, 0);
1836 }
1837 
IncreaseNumRefs()1838 void BackendImpl::IncreaseNumRefs() {
1839   num_refs_++;
1840   if (max_refs_ < num_refs_)
1841     max_refs_ = num_refs_;
1842 }
1843 
DecreaseNumRefs()1844 void BackendImpl::DecreaseNumRefs() {
1845   DCHECK(num_refs_);
1846   num_refs_--;
1847 
1848   if (!num_refs_ && disabled_)
1849     base::SingleThreadTaskRunner::GetCurrentDefault()->PostTask(
1850         FROM_HERE,
1851         base::BindOnce(&BackendImpl::RestartCache, GetWeakPtr(), true));
1852 }
1853 
IncreaseNumEntries()1854 void BackendImpl::IncreaseNumEntries() {
1855   data_->header.num_entries++;
1856   DCHECK_GT(data_->header.num_entries, 0);
1857 }
1858 
DecreaseNumEntries()1859 void BackendImpl::DecreaseNumEntries() {
1860   data_->header.num_entries--;
1861   if (data_->header.num_entries < 0) {
1862     NOTREACHED();
1863     data_->header.num_entries = 0;
1864   }
1865 }
1866 
LogStats()1867 void BackendImpl::LogStats() {
1868   StatsItems stats;
1869   GetStats(&stats);
1870 
1871   for (const auto& stat : stats)
1872     VLOG(1) << stat.first << ": " << stat.second;
1873 }
1874 
ReportStats()1875 void BackendImpl::ReportStats() {
1876   CACHE_UMA(COUNTS, "Entries", 0, data_->header.num_entries);
1877 
1878   int64_t current_size = data_->header.num_bytes / (1024 * 1024);
1879   int max_size = max_size_ / (1024 * 1024);
1880   int hit_ratio_as_percentage = stats_.GetHitRatio();
1881 
1882   CACHE_UMA(COUNTS_10000, "Size2", 0, current_size);
1883   // For any bin in HitRatioBySize2, the hit ratio of caches of that size is the
1884   // ratio of that bin's total count to the count in the same bin in the Size2
1885   // histogram.
1886   if (base::RandInt(0, 99) < hit_ratio_as_percentage)
1887     CACHE_UMA(COUNTS_10000, "HitRatioBySize2", 0, current_size);
1888   CACHE_UMA(COUNTS_10000, "MaxSize2", 0, max_size);
1889   if (!max_size)
1890     max_size++;
1891   CACHE_UMA(PERCENTAGE, "UsedSpace", 0, current_size * 100 / max_size);
1892 
1893   CACHE_UMA(COUNTS_10000, "AverageOpenEntries2", 0,
1894             static_cast<int>(stats_.GetCounter(Stats::OPEN_ENTRIES)));
1895   CACHE_UMA(COUNTS_10000, "MaxOpenEntries2", 0,
1896             static_cast<int>(stats_.GetCounter(Stats::MAX_ENTRIES)));
1897   stats_.SetCounter(Stats::MAX_ENTRIES, 0);
1898 
1899   CACHE_UMA(COUNTS_10000, "TotalFatalErrors", 0,
1900             static_cast<int>(stats_.GetCounter(Stats::FATAL_ERROR)));
1901   CACHE_UMA(COUNTS_10000, "TotalDoomCache", 0,
1902             static_cast<int>(stats_.GetCounter(Stats::DOOM_CACHE)));
1903   CACHE_UMA(COUNTS_10000, "TotalDoomRecentEntries", 0,
1904             static_cast<int>(stats_.GetCounter(Stats::DOOM_RECENT)));
1905   stats_.SetCounter(Stats::FATAL_ERROR, 0);
1906   stats_.SetCounter(Stats::DOOM_CACHE, 0);
1907   stats_.SetCounter(Stats::DOOM_RECENT, 0);
1908 
1909   int64_t total_hours = stats_.GetCounter(Stats::TIMER) / 120;
1910   if (!data_->header.create_time || !data_->header.lru.filled) {
1911     int cause = data_->header.create_time ? 0 : 1;
1912     if (!data_->header.lru.filled)
1913       cause |= 2;
1914     CACHE_UMA(CACHE_ERROR, "ShortReport", 0, cause);
1915     CACHE_UMA(HOURS, "TotalTimeNotFull", 0, static_cast<int>(total_hours));
1916     return;
1917   }
1918 
1919   // This is an up to date client that will report FirstEviction() data. After
1920   // that event, start reporting this:
1921 
1922   CACHE_UMA(HOURS, "TotalTime", 0, static_cast<int>(total_hours));
1923   // For any bin in HitRatioByTotalTime, the hit ratio of caches of that total
1924   // time is the ratio of that bin's total count to the count in the same bin in
1925   // the TotalTime histogram.
1926   if (base::RandInt(0, 99) < hit_ratio_as_percentage)
1927     CACHE_UMA(HOURS, "HitRatioByTotalTime", 0, static_cast<int>(total_hours));
1928 
1929   int64_t use_hours = stats_.GetCounter(Stats::LAST_REPORT_TIMER) / 120;
1930   stats_.SetCounter(Stats::LAST_REPORT_TIMER, stats_.GetCounter(Stats::TIMER));
1931 
1932   // We may see users with no use_hours at this point if this is the first time
1933   // we are running this code.
1934   if (use_hours)
1935     use_hours = total_hours - use_hours;
1936 
1937   if (!use_hours || !GetEntryCount() || !data_->header.num_bytes)
1938     return;
1939 
1940   CACHE_UMA(HOURS, "UseTime", 0, static_cast<int>(use_hours));
1941   // For any bin in HitRatioByUseTime, the hit ratio of caches of that use time
1942   // is the ratio of that bin's total count to the count in the same bin in the
1943   // UseTime histogram.
1944   if (base::RandInt(0, 99) < hit_ratio_as_percentage)
1945     CACHE_UMA(HOURS, "HitRatioByUseTime", 0, static_cast<int>(use_hours));
1946   CACHE_UMA(PERCENTAGE, "HitRatio", 0, hit_ratio_as_percentage);
1947 
1948   int64_t trim_rate = stats_.GetCounter(Stats::TRIM_ENTRY) / use_hours;
1949   CACHE_UMA(COUNTS, "TrimRate", 0, static_cast<int>(trim_rate));
1950 
1951   int64_t avg_size = data_->header.num_bytes / GetEntryCount();
1952   CACHE_UMA(COUNTS, "EntrySize", 0, avg_size);
1953   CACHE_UMA(COUNTS, "EntriesFull", 0, data_->header.num_entries);
1954 
1955   CACHE_UMA(PERCENTAGE, "IndexLoad", 0,
1956             data_->header.num_entries * 100 / (mask_ + 1));
1957 
1958   int large_entries_bytes = stats_.GetLargeEntriesSize();
1959   int large_ratio = large_entries_bytes * 100 / data_->header.num_bytes;
1960   CACHE_UMA(PERCENTAGE, "LargeEntriesRatio", 0, large_ratio);
1961 
1962   if (new_eviction_) {
1963     CACHE_UMA(PERCENTAGE, "ResurrectRatio", 0, stats_.GetResurrectRatio());
1964     CACHE_UMA(PERCENTAGE, "NoUseRatio", 0,
1965               data_->header.lru.sizes[0] * 100 / data_->header.num_entries);
1966     CACHE_UMA(PERCENTAGE, "LowUseRatio", 0,
1967               data_->header.lru.sizes[1] * 100 / data_->header.num_entries);
1968     CACHE_UMA(PERCENTAGE, "HighUseRatio", 0,
1969               data_->header.lru.sizes[2] * 100 / data_->header.num_entries);
1970     CACHE_UMA(PERCENTAGE, "DeletedRatio", 0,
1971               data_->header.lru.sizes[4] * 100 / data_->header.num_entries);
1972   }
1973 
1974   stats_.ResetRatios();
1975   stats_.SetCounter(Stats::TRIM_ENTRY, 0);
1976 }
1977 
UpgradeTo2_1()1978 void BackendImpl::UpgradeTo2_1() {
1979   // 2.1 is basically the same as 2.0, except that new fields are actually
1980   // updated by the new eviction algorithm.
1981   DCHECK_EQ(kVersion2_0, data_->header.version);
1982   data_->header.version = kVersion2_1;
1983   data_->header.lru.sizes[Rankings::NO_USE] = data_->header.num_entries;
1984 }
1985 
UpgradeTo3_0()1986 void BackendImpl::UpgradeTo3_0() {
1987   // 3.0 uses a 64-bit size field.
1988   DCHECK(kVersion2_0 == data_->header.version ||
1989          kVersion2_1 == data_->header.version);
1990   data_->header.version = kVersion3_0;
1991   data_->header.num_bytes = data_->header.old_v2_num_bytes;
1992 }
1993 
CheckIndex()1994 bool BackendImpl::CheckIndex() {
1995   DCHECK(data_);
1996 
1997   size_t current_size = index_->GetLength();
1998   if (current_size < sizeof(Index)) {
1999     LOG(ERROR) << "Corrupt Index file";
2000     return false;
2001   }
2002 
2003   if (data_->header.magic != kIndexMagic) {
2004     LOG(ERROR) << "Invalid file magic";
2005     return false;
2006   }
2007 
2008   // 2.0 + new_eviction needs conversion to 2.1.
2009   if (data_->header.version == kVersion2_0 && new_eviction_) {
2010     UpgradeTo2_1();
2011   }
2012 
2013   // 2.0 or 2.1 can be upgraded to 3.0
2014   if (data_->header.version == kVersion2_0 ||
2015       data_->header.version == kVersion2_1) {
2016     UpgradeTo3_0();
2017   }
2018 
2019   if (kCurrentVersion != data_->header.version) {
2020     LOG(ERROR) << "Invalid file version";
2021     return false;
2022   }
2023 
2024   if (!data_->header.table_len) {
2025     LOG(ERROR) << "Invalid table size";
2026     return false;
2027   }
2028 
2029   if (current_size < GetIndexSize(data_->header.table_len) ||
2030       data_->header.table_len & (kBaseTableLen - 1)) {
2031     LOG(ERROR) << "Corrupt Index file";
2032     return false;
2033   }
2034 
2035   AdjustMaxCacheSize(data_->header.table_len);
2036 
2037 #if !defined(NET_BUILD_STRESS_CACHE)
2038   if (data_->header.num_bytes < 0 ||
2039       (max_size_ < std::numeric_limits<int32_t>::max() - kDefaultCacheSize &&
2040        data_->header.num_bytes > max_size_ + kDefaultCacheSize)) {
2041     LOG(ERROR) << "Invalid cache (current) size";
2042     return false;
2043   }
2044 #endif
2045 
2046   if (data_->header.num_entries < 0) {
2047     LOG(ERROR) << "Invalid number of entries";
2048     return false;
2049   }
2050 
2051   if (!mask_)
2052     mask_ = data_->header.table_len - 1;
2053 
2054   // Load the table into memory.
2055   return index_->Preload();
2056 }
2057 
CheckAllEntries()2058 int BackendImpl::CheckAllEntries() {
2059   int num_dirty = 0;
2060   int num_entries = 0;
2061   DCHECK(mask_ < std::numeric_limits<uint32_t>::max());
2062   for (unsigned int i = 0; i <= mask_; i++) {
2063     Addr address(data_->table[i]);
2064     if (!address.is_initialized())
2065       continue;
2066     for (;;) {
2067       scoped_refptr<EntryImpl> cache_entry;
2068       int ret = NewEntry(address, &cache_entry);
2069       if (ret) {
2070         STRESS_NOTREACHED();
2071         return ret;
2072       }
2073 
2074       if (cache_entry->dirty())
2075         num_dirty++;
2076       else if (CheckEntry(cache_entry.get()))
2077         num_entries++;
2078       else
2079         return ERR_INVALID_ENTRY;
2080 
2081       DCHECK_EQ(i, cache_entry->entry()->Data()->hash & mask_);
2082       address.set_value(cache_entry->GetNextAddress());
2083       if (!address.is_initialized())
2084         break;
2085     }
2086   }
2087 
2088   if (num_entries + num_dirty != data_->header.num_entries) {
2089     LOG(ERROR) << "Number of entries " << num_entries << " " << num_dirty <<
2090                   " " << data_->header.num_entries;
2091     DCHECK_LT(num_entries, data_->header.num_entries);
2092     return ERR_NUM_ENTRIES_MISMATCH;
2093   }
2094 
2095   return num_dirty;
2096 }
2097 
CheckEntry(EntryImpl * cache_entry)2098 bool BackendImpl::CheckEntry(EntryImpl* cache_entry) {
2099   bool ok = block_files_.IsValid(cache_entry->entry()->address());
2100   ok = ok && block_files_.IsValid(cache_entry->rankings()->address());
2101   EntryStore* data = cache_entry->entry()->Data();
2102   for (size_t i = 0; i < std::size(data->data_addr); i++) {
2103     if (data->data_addr[i]) {
2104       Addr address(data->data_addr[i]);
2105       if (address.is_block_file())
2106         ok = ok && block_files_.IsValid(address);
2107     }
2108   }
2109 
2110   return ok && cache_entry->rankings()->VerifyHash();
2111 }
2112 
MaxBuffersSize()2113 int BackendImpl::MaxBuffersSize() {
2114   static uint64_t total_memory = base::SysInfo::AmountOfPhysicalMemory();
2115   static bool done = false;
2116 
2117   if (!done) {
2118     done = true;
2119 
2120     // We want to use up to 2% of the computer's memory, limit 30 MB.
2121     total_memory = total_memory * 2 / 100;
2122     constexpr uint64_t kMaxBuffersSize = 30 * 1024 * 1024;
2123     if (total_memory > kMaxBuffersSize || total_memory == 0)
2124       total_memory = kMaxBuffersSize;
2125   }
2126 
2127   return static_cast<int>(total_memory);
2128 }
2129 
FlushForTesting()2130 void BackendImpl::FlushForTesting() {
2131   if (!g_internal_cache_thread.IsCreated()) {
2132     return;
2133   }
2134 
2135   g_internal_cache_thread.Get().FlushForTesting();
2136 }
2137 
FlushAsynchronouslyForTesting(base::OnceClosure callback)2138 void BackendImpl::FlushAsynchronouslyForTesting(base::OnceClosure callback) {
2139   if (!g_internal_cache_thread.IsCreated()) {
2140     base::SequencedTaskRunner::GetCurrentDefault()->PostTask(
2141         FROM_HERE, std::move(callback));
2142     return;
2143   }
2144 
2145   InternalCacheThread()->PostTaskAndReply(FROM_HERE, base::BindOnce([]() {}),
2146                                           std::move(callback));
2147 }
2148 
2149 }  // namespace disk_cache
2150 
2151 #undef CACHE_UMA_BACKEND_IMPL_OBJ  // undef for jumbo builds
2152