• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2013 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "net/disk_cache/simple/simple_index.h"
6 
7 #include <algorithm>
8 #include <limits>
9 #include <string>
10 #include <utility>
11 
12 #include "base/check_op.h"
13 #include "base/files/file_util.h"
14 #include "base/functional/bind.h"
15 #include "base/not_fatal_until.h"
16 #include "base/numerics/safe_conversions.h"
17 #include "base/pickle.h"
18 #include "base/strings/string_number_conversions.h"
19 #include "base/strings/string_tokenizer.h"
20 #include "base/task/sequenced_task_runner.h"
21 #include "base/task/task_runner.h"
22 #include "base/time/time.h"
23 #include "base/trace_event/memory_usage_estimator.h"
24 #include "build/build_config.h"
25 #include "net/base/features.h"
26 #include "net/base/net_errors.h"
27 #include "net/disk_cache/backend_cleanup_tracker.h"
28 #include "net/disk_cache/memory_entry_data_hints.h"
29 #include "net/disk_cache/simple/simple_entry_format.h"
30 #include "net/disk_cache/simple/simple_histogram_macros.h"
31 #include "net/disk_cache/simple/simple_index_delegate.h"
32 #include "net/disk_cache/simple/simple_index_file.h"
33 #include "net/disk_cache/simple/simple_synchronous_entry.h"
34 #include "net/disk_cache/simple/simple_util.h"
35 
36 #if BUILDFLAG(IS_POSIX)
37 #include <sys/stat.h>
38 #include <sys/time.h>
39 #endif
40 
41 namespace {
42 
43 // How many milliseconds we delay writing the index to disk since the last cache
44 // operation has happened.
45 const int kWriteToDiskDelayMSecs = 20000;
46 const int kWriteToDiskOnBackgroundDelayMSecs = 100;
47 
48 // Divides the cache space into this amount of parts to evict when only one part
49 // is left.
50 const uint32_t kEvictionMarginDivisor = 20;
51 
52 const uint32_t kBytesInKb = 1024;
53 
54 // This is added to the size of each entry before using the size
55 // to determine which entries to evict first. It's basically an
56 // estimate of the filesystem overhead, but it also serves to flatten
57 // the curve so that 1-byte entries and 2-byte entries are basically
58 // treated the same.
59 static const int kEstimatedEntryOverhead = 512;
60 
61 }  // namespace
62 
63 namespace disk_cache {
64 
EntryMetadata()65 EntryMetadata::EntryMetadata()
66     : last_used_time_seconds_since_epoch_(0),
67       entry_size_256b_chunks_(0),
68       in_memory_data_(0) {}
69 
EntryMetadata(base::Time last_used_time,base::StrictNumeric<uint32_t> entry_size)70 EntryMetadata::EntryMetadata(base::Time last_used_time,
71                              base::StrictNumeric<uint32_t> entry_size)
72     : last_used_time_seconds_since_epoch_(0),
73       entry_size_256b_chunks_(0),
74       in_memory_data_(0) {
75   SetEntrySize(entry_size);  // to round/pack properly.
76   SetLastUsedTime(last_used_time);
77 }
78 
EntryMetadata(int32_t trailer_prefetch_size,base::StrictNumeric<uint32_t> entry_size)79 EntryMetadata::EntryMetadata(int32_t trailer_prefetch_size,
80                              base::StrictNumeric<uint32_t> entry_size)
81     : trailer_prefetch_size_(0),
82       entry_size_256b_chunks_(0),
83       in_memory_data_(0) {
84   SetEntrySize(entry_size);  // to round/pack properly
85   SetTrailerPrefetchSize(trailer_prefetch_size);
86 }
87 
GetLastUsedTime() const88 base::Time EntryMetadata::GetLastUsedTime() const {
89   // Preserve nullity.
90   if (last_used_time_seconds_since_epoch_ == 0)
91     return base::Time();
92 
93   return base::Time::UnixEpoch() +
94          base::Seconds(last_used_time_seconds_since_epoch_);
95 }
96 
SetLastUsedTime(const base::Time & last_used_time)97 void EntryMetadata::SetLastUsedTime(const base::Time& last_used_time) {
98   // Preserve nullity.
99   if (last_used_time.is_null()) {
100     last_used_time_seconds_since_epoch_ = 0;
101     return;
102   }
103 
104   last_used_time_seconds_since_epoch_ = base::saturated_cast<uint32_t>(
105       (last_used_time - base::Time::UnixEpoch()).InSeconds());
106   // Avoid accidental nullity.
107   if (last_used_time_seconds_since_epoch_ == 0)
108     last_used_time_seconds_since_epoch_ = 1;
109 }
110 
GetTrailerPrefetchSize() const111 int32_t EntryMetadata::GetTrailerPrefetchSize() const {
112   return trailer_prefetch_size_;
113 }
114 
SetTrailerPrefetchSize(int32_t size)115 void EntryMetadata::SetTrailerPrefetchSize(int32_t size) {
116   if (size <= 0)
117     return;
118   trailer_prefetch_size_ = size;
119 }
120 
GetEntrySize() const121 uint32_t EntryMetadata::GetEntrySize() const {
122   return entry_size_256b_chunks_ << 8;
123 }
124 
SetEntrySize(base::StrictNumeric<uint32_t> entry_size)125 void EntryMetadata::SetEntrySize(base::StrictNumeric<uint32_t> entry_size) {
126   // This should not overflow since we limit entries to 1/8th of the cache.
127   entry_size_256b_chunks_ = (static_cast<uint32_t>(entry_size) + 255) >> 8;
128 }
129 
Serialize(net::CacheType cache_type,base::Pickle * pickle) const130 void EntryMetadata::Serialize(net::CacheType cache_type,
131                               base::Pickle* pickle) const {
132   DCHECK(pickle);
133   // If you modify the size of the size of the pickle, be sure to update
134   // kOnDiskSizeBytes.
135   uint32_t packed_entry_info = (entry_size_256b_chunks_ << 8) | in_memory_data_;
136   if (cache_type == net::APP_CACHE) {
137     pickle->WriteInt64(trailer_prefetch_size_);
138   } else {
139     int64_t internal_last_used_time = GetLastUsedTime().ToInternalValue();
140     pickle->WriteInt64(internal_last_used_time);
141   }
142   pickle->WriteUInt64(packed_entry_info);
143 }
144 
Deserialize(net::CacheType cache_type,base::PickleIterator * it,bool has_entry_in_memory_data,bool app_cache_has_trailer_prefetch_size)145 bool EntryMetadata::Deserialize(net::CacheType cache_type,
146                                 base::PickleIterator* it,
147                                 bool has_entry_in_memory_data,
148                                 bool app_cache_has_trailer_prefetch_size) {
149   DCHECK(it);
150   int64_t tmp_time_or_prefetch_size;
151   uint64_t tmp_entry_size;
152   if (!it->ReadInt64(&tmp_time_or_prefetch_size) ||
153       !it->ReadUInt64(&tmp_entry_size) ||
154       tmp_entry_size > std::numeric_limits<uint32_t>::max())
155     return false;
156   if (cache_type == net::APP_CACHE) {
157     if (app_cache_has_trailer_prefetch_size) {
158       int32_t trailer_prefetch_size = 0;
159       base::CheckedNumeric<int32_t> numeric_size(tmp_time_or_prefetch_size);
160       if (numeric_size.AssignIfValid(&trailer_prefetch_size)) {
161         SetTrailerPrefetchSize(trailer_prefetch_size);
162       }
163     }
164   } else {
165     SetLastUsedTime(base::Time::FromInternalValue(tmp_time_or_prefetch_size));
166   }
167   if (has_entry_in_memory_data) {
168     // tmp_entry_size actually packs entry_size_256b_chunks_ and
169     // in_memory_data_.
170     SetEntrySize(static_cast<uint32_t>(tmp_entry_size & 0xFFFFFF00));
171     SetInMemoryData(static_cast<uint8_t>(tmp_entry_size & 0xFF));
172   } else {
173     SetEntrySize(static_cast<uint32_t>(tmp_entry_size));
174     SetInMemoryData(0);
175   }
176   return true;
177 }
178 
SimpleIndex(const scoped_refptr<base::SequencedTaskRunner> & task_runner,scoped_refptr<BackendCleanupTracker> cleanup_tracker,SimpleIndexDelegate * delegate,net::CacheType cache_type,std::unique_ptr<SimpleIndexFile> index_file)179 SimpleIndex::SimpleIndex(
180     const scoped_refptr<base::SequencedTaskRunner>& task_runner,
181     scoped_refptr<BackendCleanupTracker> cleanup_tracker,
182     SimpleIndexDelegate* delegate,
183     net::CacheType cache_type,
184     std::unique_ptr<SimpleIndexFile> index_file)
185     : cleanup_tracker_(std::move(cleanup_tracker)),
186       delegate_(delegate),
187       cache_type_(cache_type),
188       index_file_(std::move(index_file)),
189       task_runner_(task_runner),
190       prioritized_caching_enabled_(base::FeatureList::IsEnabled(
191           net::features::kSimpleCachePrioritizedCaching)),
192       caching_prioritization_factor_(
193           net::features::kSimpleCachePrioritizedCachingPrioritizationFactor
194               .Get()),
195       caching_prioritization_period_in_seconds_(static_cast<uint64_t>(
196           net::features::kSimpleCachePrioritizedCachingPrioritizationPeriod
197               .Get()
198               .InSeconds())) {
199   // Creating the callback once so it is reused every time
200   // write_to_disk_timer_.Start() is called.
201   write_to_disk_cb_ = base::BindRepeating(&SimpleIndex::WriteToDisk,
202                                           weak_ptr_factory_.GetWeakPtr(),
203                                           INDEX_WRITE_REASON_IDLE);
204 }
205 
~SimpleIndex()206 SimpleIndex::~SimpleIndex() {
207   DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
208 
209   // Fail all callbacks waiting for the index to come up.
210   for (auto& callback : to_run_when_initialized_) {
211     std::move(callback).Run(net::ERR_ABORTED);
212   }
213 }
214 
Initialize(base::Time cache_mtime)215 void SimpleIndex::Initialize(base::Time cache_mtime) {
216   DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
217 
218 #if BUILDFLAG(IS_ANDROID)
219   if (app_status_listener_getter_) {
220     base::android::ApplicationStatusListener* listener =
221         app_status_listener_getter_.Run();
222     if (listener) {
223       listener->SetCallback(
224           base::BindRepeating(&SimpleIndex::OnApplicationStateChange,
225                               weak_ptr_factory_.GetWeakPtr()));
226     }
227     // Not using the fallback on purpose here --- if the getter is set, we may
228     // be in a process where the base::android::ApplicationStatusListener::New
229     // impl is unavailable.
230     // (See https://crbug.com/881572)
231   } else if (base::android::IsVMInitialized()) {
232     owned_app_status_listener_ = base::android::ApplicationStatusListener::New(
233         base::BindRepeating(&SimpleIndex::OnApplicationStateChange,
234                             weak_ptr_factory_.GetWeakPtr()));
235   }
236 #endif
237 
238   auto load_result = std::make_unique<SimpleIndexLoadResult>();
239   auto* load_result_ptr = load_result.get();
240   index_file_->LoadIndexEntries(
241       cache_mtime,
242       base::BindOnce(&SimpleIndex::MergeInitializingSet,
243                      weak_ptr_factory_.GetWeakPtr(), std::move(load_result)),
244       load_result_ptr);
245 }
246 
SetMaxSize(uint64_t max_bytes)247 void SimpleIndex::SetMaxSize(uint64_t max_bytes) {
248   // Zero size means use the default.
249   if (max_bytes) {
250     max_size_ = max_bytes;
251     high_watermark_ = max_size_ - max_size_ / kEvictionMarginDivisor;
252     low_watermark_ = max_size_ - 2 * (max_size_ / kEvictionMarginDivisor);
253   }
254 }
255 
ExecuteWhenReady(net::CompletionOnceCallback task)256 void SimpleIndex::ExecuteWhenReady(net::CompletionOnceCallback task) {
257   DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
258   if (initialized_)
259     task_runner_->PostTask(FROM_HERE, base::BindOnce(std::move(task), net::OK));
260   else
261     to_run_when_initialized_.push_back(std::move(task));
262 }
263 
GetEntriesBetween(base::Time initial_time,base::Time end_time)264 std::unique_ptr<SimpleIndex::HashList> SimpleIndex::GetEntriesBetween(
265     base::Time initial_time,
266     base::Time end_time) {
267   DCHECK_EQ(true, initialized_);
268 
269   // The net::APP_CACHE mode does not track access times.  Assert that external
270   // consumers are not relying on access time ranges.
271   DCHECK(cache_type_ != net::APP_CACHE ||
272          (initial_time.is_null() && end_time.is_null()));
273 
274   if (!initial_time.is_null())
275     initial_time -= EntryMetadata::GetLowerEpsilonForTimeComparisons();
276   if (end_time.is_null())
277     end_time = base::Time::Max();
278   else
279     end_time += EntryMetadata::GetUpperEpsilonForTimeComparisons();
280   DCHECK(end_time >= initial_time);
281 
282   auto ret_hashes = std::make_unique<HashList>();
283   for (const auto& entry : entries_set_) {
284     const EntryMetadata& metadata = entry.second;
285     base::Time entry_time = metadata.GetLastUsedTime();
286     if (initial_time <= entry_time && entry_time < end_time)
287       ret_hashes->push_back(entry.first);
288   }
289   return ret_hashes;
290 }
291 
GetAllHashes()292 std::unique_ptr<SimpleIndex::HashList> SimpleIndex::GetAllHashes() {
293   return GetEntriesBetween(base::Time(), base::Time());
294 }
295 
GetEntryCount() const296 int32_t SimpleIndex::GetEntryCount() const {
297   // TODO(pasko): return a meaningful initial estimate before initialized.
298   return entries_set_.size();
299 }
300 
GetCacheSize() const301 uint64_t SimpleIndex::GetCacheSize() const {
302   DCHECK(initialized_);
303   return cache_size_;
304 }
305 
GetCacheSizeBetween(base::Time initial_time,base::Time end_time) const306 uint64_t SimpleIndex::GetCacheSizeBetween(base::Time initial_time,
307                                           base::Time end_time) const {
308   DCHECK_EQ(true, initialized_);
309 
310   if (!initial_time.is_null())
311     initial_time -= EntryMetadata::GetLowerEpsilonForTimeComparisons();
312   if (end_time.is_null())
313     end_time = base::Time::Max();
314   else
315     end_time += EntryMetadata::GetUpperEpsilonForTimeComparisons();
316 
317   DCHECK(end_time >= initial_time);
318   uint64_t size = 0;
319   for (const auto& entry : entries_set_) {
320     const EntryMetadata& metadata = entry.second;
321     base::Time entry_time = metadata.GetLastUsedTime();
322     if (initial_time <= entry_time && entry_time < end_time)
323       size += metadata.GetEntrySize();
324   }
325   return size;
326 }
327 
GetLastUsedTime(uint64_t entry_hash)328 base::Time SimpleIndex::GetLastUsedTime(uint64_t entry_hash) {
329   DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
330   DCHECK_NE(cache_type_, net::APP_CACHE);
331   auto it = entries_set_.find(entry_hash);
332   if (it == entries_set_.end())
333     return base::Time();
334   return it->second.GetLastUsedTime();
335 }
336 
SetLastUsedTimeForTest(uint64_t entry_hash,const base::Time last_used)337 void SimpleIndex::SetLastUsedTimeForTest(uint64_t entry_hash,
338                                          const base::Time last_used) {
339   auto it = entries_set_.find(entry_hash);
340   CHECK(it != entries_set_.end(), base::NotFatalUntil::M130);
341   it->second.SetLastUsedTime(last_used);
342 }
343 
HasPendingWrite() const344 bool SimpleIndex::HasPendingWrite() const {
345   return write_to_disk_timer_.IsRunning();
346 }
347 
Insert(uint64_t entry_hash)348 void SimpleIndex::Insert(uint64_t entry_hash) {
349   DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
350   // Upon insert we don't know yet the size of the entry.
351   // It will be updated later when the SimpleEntryImpl finishes opening or
352   // creating the new entry, and then UpdateEntrySize will be called.
353   bool inserted = false;
354   if (cache_type_ == net::APP_CACHE) {
355     inserted =
356         InsertInEntrySet(entry_hash, EntryMetadata(-1, 0u), &entries_set_);
357   } else {
358     inserted = InsertInEntrySet(
359         entry_hash, EntryMetadata(base::Time::Now(), 0u), &entries_set_);
360   }
361   if (!initialized_)
362     removed_entries_.erase(entry_hash);
363   if (inserted)
364     PostponeWritingToDisk();
365 }
366 
Remove(uint64_t entry_hash)367 void SimpleIndex::Remove(uint64_t entry_hash) {
368   DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
369   bool need_write = false;
370   auto it = entries_set_.find(entry_hash);
371   if (it != entries_set_.end()) {
372     UpdateEntryIteratorSize(&it, 0u);
373     entries_set_.erase(it);
374     need_write = true;
375   }
376 
377   if (!initialized_)
378     removed_entries_.insert(entry_hash);
379 
380   if (need_write)
381     PostponeWritingToDisk();
382 }
383 
Has(uint64_t hash) const384 bool SimpleIndex::Has(uint64_t hash) const {
385   DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
386   // If not initialized, always return true, forcing it to go to the disk.
387   return !initialized_ || entries_set_.count(hash) > 0;
388 }
389 
GetEntryInMemoryData(uint64_t entry_hash) const390 uint8_t SimpleIndex::GetEntryInMemoryData(uint64_t entry_hash) const {
391   DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
392   auto it = entries_set_.find(entry_hash);
393   if (it == entries_set_.end())
394     return 0;
395   return it->second.GetInMemoryData();
396 }
397 
SetEntryInMemoryData(uint64_t entry_hash,uint8_t value)398 void SimpleIndex::SetEntryInMemoryData(uint64_t entry_hash, uint8_t value) {
399   DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
400   auto it = entries_set_.find(entry_hash);
401   if (it == entries_set_.end())
402     return;
403   return it->second.SetInMemoryData(value);
404 }
405 
UseIfExists(uint64_t entry_hash)406 bool SimpleIndex::UseIfExists(uint64_t entry_hash) {
407   DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
408   // Always update the last used time, even if it is during initialization.
409   // It will be merged later.
410   auto it = entries_set_.find(entry_hash);
411   if (it == entries_set_.end())
412     // If not initialized, always return true, forcing it to go to the disk.
413     return !initialized_;
414   // We do not need to track access times in APP_CACHE mode.
415   if (cache_type_ == net::APP_CACHE)
416     return true;
417   it->second.SetLastUsedTime(base::Time::Now());
418   PostponeWritingToDisk();
419   return true;
420 }
421 
StartEvictionIfNeeded()422 void SimpleIndex::StartEvictionIfNeeded() {
423   DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
424   if (eviction_in_progress_ || cache_size_ <= high_watermark_)
425     return;
426   // Take all live key hashes from the index and sort them by time.
427   eviction_in_progress_ = true;
428   eviction_start_time_ = base::TimeTicks::Now();
429 
430   const bool use_size_heuristic =
431       (cache_type_ != net::GENERATED_BYTE_CODE_CACHE &&
432        cache_type_ != net::GENERATED_WEBUI_BYTE_CODE_CACHE);
433 
434   // Flatten for sorting.
435   std::vector<std::pair<uint64_t, const EntrySet::value_type*>> entries;
436   entries.reserve(entries_set_.size());
437   uint32_t now = (base::Time::Now() - base::Time::UnixEpoch()).InSeconds();
438   for (EntrySet::const_iterator i = entries_set_.begin();
439        i != entries_set_.end(); ++i) {
440     const uint64_t time_since_last_used = now - i->second.RawTimeForSorting();
441     uint64_t sort_value = time_since_last_used;
442     // See crbug.com/736437 for context.
443     //
444     // Will not overflow since we're multiplying two 32-bit values and storing
445     // them in a 64-bit variable.
446     if (use_size_heuristic) {
447       sort_value *= i->second.GetEntrySize() + kEstimatedEntryOverhead;
448       // When prioritized caching is enabled, we want to evict entries that are
449       // not prioritized before entries that are prioritized. So we divide the
450       // sort value by the `caching_prioritization_factor`.
451       if (prioritized_caching_enabled_ &&
452           time_since_last_used < caching_prioritization_period_in_seconds_ &&
453           (i->second.GetInMemoryData() & HINT_HIGH_PRIORITY) ==
454               HINT_HIGH_PRIORITY) {
455         sort_value /= caching_prioritization_factor_;
456       }
457     }
458     // Subtract so we don't need a custom comparator.
459     entries.emplace_back(std::numeric_limits<uint64_t>::max() - sort_value,
460                          &*i);
461   }
462 
463   uint64_t evicted_so_far_size = 0;
464   const uint64_t amount_to_evict = cache_size_ - low_watermark_;
465   std::vector<uint64_t> entry_hashes;
466   std::sort(entries.begin(), entries.end());
467   for (const auto& score_metadata_pair : entries) {
468     if (evicted_so_far_size >= amount_to_evict)
469       break;
470     evicted_so_far_size += score_metadata_pair.second->second.GetEntrySize();
471     entry_hashes.push_back(score_metadata_pair.second->first);
472   }
473 
474   SIMPLE_CACHE_UMA(COUNTS_1M,
475                    "Eviction.EntryCount", cache_type_, entry_hashes.size());
476   SIMPLE_CACHE_UMA(TIMES,
477                    "Eviction.TimeToSelectEntries", cache_type_,
478                    base::TimeTicks::Now() - eviction_start_time_);
479 
480   delegate_->DoomEntries(&entry_hashes,
481                          base::BindOnce(&SimpleIndex::EvictionDone,
482                                         weak_ptr_factory_.GetWeakPtr()));
483 }
484 
GetTrailerPrefetchSize(uint64_t entry_hash) const485 int32_t SimpleIndex::GetTrailerPrefetchSize(uint64_t entry_hash) const {
486   DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
487   DCHECK_EQ(cache_type_, net::APP_CACHE);
488   auto it = entries_set_.find(entry_hash);
489   if (it == entries_set_.end())
490     return -1;
491   return it->second.GetTrailerPrefetchSize();
492 }
493 
SetTrailerPrefetchSize(uint64_t entry_hash,int32_t size)494 void SimpleIndex::SetTrailerPrefetchSize(uint64_t entry_hash, int32_t size) {
495   DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
496   DCHECK_EQ(cache_type_, net::APP_CACHE);
497   auto it = entries_set_.find(entry_hash);
498   if (it == entries_set_.end())
499     return;
500   int32_t original_size = it->second.GetTrailerPrefetchSize();
501   it->second.SetTrailerPrefetchSize(size);
502   if (original_size != it->second.GetTrailerPrefetchSize())
503     PostponeWritingToDisk();
504 }
505 
UpdateEntrySize(uint64_t entry_hash,base::StrictNumeric<uint32_t> entry_size)506 bool SimpleIndex::UpdateEntrySize(uint64_t entry_hash,
507                                   base::StrictNumeric<uint32_t> entry_size) {
508   DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
509   auto it = entries_set_.find(entry_hash);
510   if (it == entries_set_.end())
511     return false;
512 
513   // Update the entry size.  If there was no change, then there is nothing
514   // else to do here.
515   if (!UpdateEntryIteratorSize(&it, entry_size))
516     return true;
517 
518   PostponeWritingToDisk();
519   StartEvictionIfNeeded();
520   return true;
521 }
522 
EvictionDone(int result)523 void SimpleIndex::EvictionDone(int result) {
524   DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
525 
526   // Ignore the result of eviction. We did our best.
527   eviction_in_progress_ = false;
528   SIMPLE_CACHE_UMA(TIMES,
529                    "Eviction.TimeToDone", cache_type_,
530                    base::TimeTicks::Now() - eviction_start_time_);
531 }
532 
533 // static
InsertInEntrySet(uint64_t entry_hash,const disk_cache::EntryMetadata & entry_metadata,EntrySet * entry_set)534 bool SimpleIndex::InsertInEntrySet(
535     uint64_t entry_hash,
536     const disk_cache::EntryMetadata& entry_metadata,
537     EntrySet* entry_set) {
538   DCHECK(entry_set);
539   auto result = entry_set->emplace(entry_hash, entry_metadata);
540   return result.second;
541 }
542 
InsertEntryForTesting(uint64_t entry_hash,const EntryMetadata & entry_metadata)543 void SimpleIndex::InsertEntryForTesting(uint64_t entry_hash,
544                                         const EntryMetadata& entry_metadata) {
545   DCHECK(entries_set_.find(entry_hash) == entries_set_.end());
546   if (InsertInEntrySet(entry_hash, entry_metadata, &entries_set_))
547     cache_size_ += entry_metadata.GetEntrySize();
548 }
549 
PostponeWritingToDisk()550 void SimpleIndex::PostponeWritingToDisk() {
551   if (!initialized_)
552     return;
553   const int delay = app_on_background_ ? kWriteToDiskOnBackgroundDelayMSecs
554                                        : kWriteToDiskDelayMSecs;
555   // If the timer is already active, Start() will just Reset it, postponing it.
556   write_to_disk_timer_.Start(FROM_HERE, base::Milliseconds(delay),
557                              write_to_disk_cb_);
558 }
559 
UpdateEntryIteratorSize(EntrySet::iterator * it,base::StrictNumeric<uint32_t> entry_size)560 bool SimpleIndex::UpdateEntryIteratorSize(
561     EntrySet::iterator* it,
562     base::StrictNumeric<uint32_t> entry_size) {
563   // Update the total cache size with the new entry size.
564   DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
565   DCHECK_GE(cache_size_, (*it)->second.GetEntrySize());
566   uint32_t original_size = (*it)->second.GetEntrySize();
567   cache_size_ -= (*it)->second.GetEntrySize();
568   (*it)->second.SetEntrySize(entry_size);
569   // We use GetEntrySize to get consistent rounding.
570   cache_size_ += (*it)->second.GetEntrySize();
571   // Return true if the size of the entry actually changed.  Make sure to
572   // compare the rounded values provided by GetEntrySize().
573   return original_size != (*it)->second.GetEntrySize();
574 }
575 
MergeInitializingSet(std::unique_ptr<SimpleIndexLoadResult> load_result)576 void SimpleIndex::MergeInitializingSet(
577     std::unique_ptr<SimpleIndexLoadResult> load_result) {
578   DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
579 
580   EntrySet* index_file_entries = &load_result->entries;
581 
582   for (uint64_t removed_entry : removed_entries_) {
583     index_file_entries->erase(removed_entry);
584   }
585   removed_entries_.clear();
586 
587   for (const auto& it : entries_set_) {
588     const uint64_t entry_hash = it.first;
589     std::pair<EntrySet::iterator, bool> insert_result =
590         index_file_entries->insert(EntrySet::value_type(entry_hash,
591                                                         EntryMetadata()));
592     EntrySet::iterator& possibly_inserted_entry = insert_result.first;
593     possibly_inserted_entry->second = it.second;
594   }
595 
596   uint64_t merged_cache_size = 0;
597   for (const auto& index_file_entry : *index_file_entries) {
598     merged_cache_size += index_file_entry.second.GetEntrySize();
599   }
600 
601   entries_set_.swap(*index_file_entries);
602   cache_size_ = merged_cache_size;
603   initialized_ = true;
604   init_method_ = load_result->init_method;
605 
606   // The actual IO is asynchronous, so calling WriteToDisk() shouldn't slow the
607   // merge down much.
608   if (load_result->flush_required)
609     WriteToDisk(INDEX_WRITE_REASON_STARTUP_MERGE);
610 
611   SIMPLE_CACHE_UMA(CUSTOM_COUNTS, "IndexNumEntriesOnInit", cache_type_,
612                    entries_set_.size(), 0, 100000, 50);
613   SIMPLE_CACHE_UMA(
614       MEMORY_KB, "CacheSizeOnInit", cache_type_,
615       static_cast<base::HistogramBase::Sample>(cache_size_ / kBytesInKb));
616   SIMPLE_CACHE_UMA(
617       MEMORY_KB, "MaxCacheSizeOnInit", cache_type_,
618       static_cast<base::HistogramBase::Sample>(max_size_ / kBytesInKb));
619 
620   // Run all callbacks waiting for the index to come up.
621   for (auto& callback : to_run_when_initialized_) {
622     task_runner_->PostTask(FROM_HERE,
623                            base::BindOnce(std::move(callback), net::OK));
624   }
625   to_run_when_initialized_.clear();
626 }
627 
628 #if BUILDFLAG(IS_ANDROID)
OnApplicationStateChange(base::android::ApplicationState state)629 void SimpleIndex::OnApplicationStateChange(
630     base::android::ApplicationState state) {
631   DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
632   // For more info about android activities, see:
633   // developer.android.com/training/basics/activity-lifecycle/pausing.html
634   if (state == base::android::APPLICATION_STATE_HAS_RUNNING_ACTIVITIES) {
635     app_on_background_ = false;
636   } else if (state ==
637       base::android::APPLICATION_STATE_HAS_STOPPED_ACTIVITIES) {
638     app_on_background_ = true;
639     WriteToDisk(INDEX_WRITE_REASON_ANDROID_STOPPED);
640   }
641 }
642 #endif
643 
WriteToDisk(IndexWriteToDiskReason reason)644 void SimpleIndex::WriteToDisk(IndexWriteToDiskReason reason) {
645   DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
646   if (!initialized_)
647     return;
648 
649   // Cancel any pending writes since we are about to write to disk now.
650   write_to_disk_timer_.Stop();
651 
652   base::OnceClosure after_write;
653   if (cleanup_tracker_) {
654     // Make anyone synchronizing with our cleanup wait for the index to be
655     // written back.
656     after_write = base::DoNothingWithBoundArgs(cleanup_tracker_);
657   }
658 
659   index_file_->WriteToDisk(cache_type_, reason, entries_set_, cache_size_,
660                            std::move(after_write));
661 }
662 
663 }  // namespace disk_cache
664