1 // Copyright 2013 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "net/disk_cache/simple/simple_index.h"
6
7 #include <algorithm>
8 #include <limits>
9 #include <string>
10 #include <utility>
11
12 #include "base/check_op.h"
13 #include "base/files/file_util.h"
14 #include "base/functional/bind.h"
15 #include "base/numerics/safe_conversions.h"
16 #include "base/pickle.h"
17 #include "base/strings/string_number_conversions.h"
18 #include "base/strings/string_tokenizer.h"
19 #include "base/task/sequenced_task_runner.h"
20 #include "base/task/task_runner.h"
21 #include "base/time/time.h"
22 #include "base/trace_event/memory_usage_estimator.h"
23 #include "build/build_config.h"
24 #include "net/base/net_errors.h"
25 #include "net/disk_cache/backend_cleanup_tracker.h"
26 #include "net/disk_cache/simple/simple_entry_format.h"
27 #include "net/disk_cache/simple/simple_histogram_macros.h"
28 #include "net/disk_cache/simple/simple_index_delegate.h"
29 #include "net/disk_cache/simple/simple_index_file.h"
30 #include "net/disk_cache/simple/simple_synchronous_entry.h"
31 #include "net/disk_cache/simple/simple_util.h"
32
33 #if BUILDFLAG(IS_POSIX)
34 #include <sys/stat.h>
35 #include <sys/time.h>
36 #endif
37
38 namespace {
39
40 // How many milliseconds we delay writing the index to disk since the last cache
41 // operation has happened.
42 const int kWriteToDiskDelayMSecs = 20000;
43 const int kWriteToDiskOnBackgroundDelayMSecs = 100;
44
45 // Divides the cache space into this amount of parts to evict when only one part
46 // is left.
47 const uint32_t kEvictionMarginDivisor = 20;
48
49 const uint32_t kBytesInKb = 1024;
50
51 // This is added to the size of each entry before using the size
52 // to determine which entries to evict first. It's basically an
53 // estimate of the filesystem overhead, but it also serves to flatten
54 // the curve so that 1-byte entries and 2-byte entries are basically
55 // treated the same.
56 static const int kEstimatedEntryOverhead = 512;
57
58 } // namespace
59
60 namespace disk_cache {
61
EntryMetadata()62 EntryMetadata::EntryMetadata()
63 : last_used_time_seconds_since_epoch_(0),
64 entry_size_256b_chunks_(0),
65 in_memory_data_(0) {}
66
EntryMetadata(base::Time last_used_time,base::StrictNumeric<uint32_t> entry_size)67 EntryMetadata::EntryMetadata(base::Time last_used_time,
68 base::StrictNumeric<uint32_t> entry_size)
69 : last_used_time_seconds_since_epoch_(0),
70 entry_size_256b_chunks_(0),
71 in_memory_data_(0) {
72 SetEntrySize(entry_size); // to round/pack properly.
73 SetLastUsedTime(last_used_time);
74 }
75
EntryMetadata(int32_t trailer_prefetch_size,base::StrictNumeric<uint32_t> entry_size)76 EntryMetadata::EntryMetadata(int32_t trailer_prefetch_size,
77 base::StrictNumeric<uint32_t> entry_size)
78 : trailer_prefetch_size_(0),
79 entry_size_256b_chunks_(0),
80 in_memory_data_(0) {
81 SetEntrySize(entry_size); // to round/pack properly
82 SetTrailerPrefetchSize(trailer_prefetch_size);
83 }
84
GetLastUsedTime() const85 base::Time EntryMetadata::GetLastUsedTime() const {
86 // Preserve nullity.
87 if (last_used_time_seconds_since_epoch_ == 0)
88 return base::Time();
89
90 return base::Time::UnixEpoch() +
91 base::Seconds(last_used_time_seconds_since_epoch_);
92 }
93
SetLastUsedTime(const base::Time & last_used_time)94 void EntryMetadata::SetLastUsedTime(const base::Time& last_used_time) {
95 // Preserve nullity.
96 if (last_used_time.is_null()) {
97 last_used_time_seconds_since_epoch_ = 0;
98 return;
99 }
100
101 last_used_time_seconds_since_epoch_ = base::saturated_cast<uint32_t>(
102 (last_used_time - base::Time::UnixEpoch()).InSeconds());
103 // Avoid accidental nullity.
104 if (last_used_time_seconds_since_epoch_ == 0)
105 last_used_time_seconds_since_epoch_ = 1;
106 }
107
GetTrailerPrefetchSize() const108 int32_t EntryMetadata::GetTrailerPrefetchSize() const {
109 return trailer_prefetch_size_;
110 }
111
SetTrailerPrefetchSize(int32_t size)112 void EntryMetadata::SetTrailerPrefetchSize(int32_t size) {
113 if (size <= 0)
114 return;
115 trailer_prefetch_size_ = size;
116 }
117
GetEntrySize() const118 uint32_t EntryMetadata::GetEntrySize() const {
119 return entry_size_256b_chunks_ << 8;
120 }
121
SetEntrySize(base::StrictNumeric<uint32_t> entry_size)122 void EntryMetadata::SetEntrySize(base::StrictNumeric<uint32_t> entry_size) {
123 // This should not overflow since we limit entries to 1/8th of the cache.
124 entry_size_256b_chunks_ = (static_cast<uint32_t>(entry_size) + 255) >> 8;
125 }
126
Serialize(net::CacheType cache_type,base::Pickle * pickle) const127 void EntryMetadata::Serialize(net::CacheType cache_type,
128 base::Pickle* pickle) const {
129 DCHECK(pickle);
130 // If you modify the size of the size of the pickle, be sure to update
131 // kOnDiskSizeBytes.
132 uint32_t packed_entry_info = (entry_size_256b_chunks_ << 8) | in_memory_data_;
133 if (cache_type == net::APP_CACHE) {
134 pickle->WriteInt64(trailer_prefetch_size_);
135 } else {
136 int64_t internal_last_used_time = GetLastUsedTime().ToInternalValue();
137 pickle->WriteInt64(internal_last_used_time);
138 }
139 pickle->WriteUInt64(packed_entry_info);
140 }
141
Deserialize(net::CacheType cache_type,base::PickleIterator * it,bool has_entry_in_memory_data,bool app_cache_has_trailer_prefetch_size)142 bool EntryMetadata::Deserialize(net::CacheType cache_type,
143 base::PickleIterator* it,
144 bool has_entry_in_memory_data,
145 bool app_cache_has_trailer_prefetch_size) {
146 DCHECK(it);
147 int64_t tmp_time_or_prefetch_size;
148 uint64_t tmp_entry_size;
149 if (!it->ReadInt64(&tmp_time_or_prefetch_size) ||
150 !it->ReadUInt64(&tmp_entry_size) ||
151 tmp_entry_size > std::numeric_limits<uint32_t>::max())
152 return false;
153 if (cache_type == net::APP_CACHE) {
154 if (app_cache_has_trailer_prefetch_size) {
155 int32_t trailer_prefetch_size = 0;
156 base::CheckedNumeric<int32_t> numeric_size(tmp_time_or_prefetch_size);
157 if (numeric_size.AssignIfValid(&trailer_prefetch_size)) {
158 SetTrailerPrefetchSize(trailer_prefetch_size);
159 }
160 }
161 } else {
162 SetLastUsedTime(base::Time::FromInternalValue(tmp_time_or_prefetch_size));
163 }
164 if (has_entry_in_memory_data) {
165 // tmp_entry_size actually packs entry_size_256b_chunks_ and
166 // in_memory_data_.
167 SetEntrySize(static_cast<uint32_t>(tmp_entry_size & 0xFFFFFF00));
168 SetInMemoryData(static_cast<uint8_t>(tmp_entry_size & 0xFF));
169 } else {
170 SetEntrySize(static_cast<uint32_t>(tmp_entry_size));
171 SetInMemoryData(0);
172 }
173 return true;
174 }
175
SimpleIndex(const scoped_refptr<base::SequencedTaskRunner> & task_runner,scoped_refptr<BackendCleanupTracker> cleanup_tracker,SimpleIndexDelegate * delegate,net::CacheType cache_type,std::unique_ptr<SimpleIndexFile> index_file)176 SimpleIndex::SimpleIndex(
177 const scoped_refptr<base::SequencedTaskRunner>& task_runner,
178 scoped_refptr<BackendCleanupTracker> cleanup_tracker,
179 SimpleIndexDelegate* delegate,
180 net::CacheType cache_type,
181 std::unique_ptr<SimpleIndexFile> index_file)
182 : cleanup_tracker_(std::move(cleanup_tracker)),
183 delegate_(delegate),
184 cache_type_(cache_type),
185 index_file_(std::move(index_file)),
186 task_runner_(task_runner),
187 // Creating the callback once so it is reused every time
188 // write_to_disk_timer_.Start() is called.
189 write_to_disk_cb_(base::BindRepeating(&SimpleIndex::WriteToDisk,
190 AsWeakPtr(),
191 INDEX_WRITE_REASON_IDLE)) {}
192
~SimpleIndex()193 SimpleIndex::~SimpleIndex() {
194 DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
195
196 // Fail all callbacks waiting for the index to come up.
197 for (auto& callback : to_run_when_initialized_) {
198 std::move(callback).Run(net::ERR_ABORTED);
199 }
200 }
201
Initialize(base::Time cache_mtime)202 void SimpleIndex::Initialize(base::Time cache_mtime) {
203 DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
204
205 #if BUILDFLAG(IS_ANDROID)
206 if (app_status_listener_) {
207 app_status_listener_->SetCallback(base::BindRepeating(
208 &SimpleIndex::OnApplicationStateChange, AsWeakPtr()));
209 } else if (base::android::IsVMInitialized()) {
210 owned_app_status_listener_ =
211 base::android::ApplicationStatusListener::New(base::BindRepeating(
212 &SimpleIndex::OnApplicationStateChange, AsWeakPtr()));
213 app_status_listener_ = owned_app_status_listener_.get();
214 }
215 #endif
216
217 auto load_result = std::make_unique<SimpleIndexLoadResult>();
218 auto* load_result_ptr = load_result.get();
219 index_file_->LoadIndexEntries(
220 cache_mtime,
221 base::BindOnce(&SimpleIndex::MergeInitializingSet, AsWeakPtr(),
222 std::move(load_result)),
223 load_result_ptr);
224 }
225
SetMaxSize(uint64_t max_bytes)226 void SimpleIndex::SetMaxSize(uint64_t max_bytes) {
227 // Zero size means use the default.
228 if (max_bytes) {
229 max_size_ = max_bytes;
230 high_watermark_ = max_size_ - max_size_ / kEvictionMarginDivisor;
231 low_watermark_ = max_size_ - 2 * (max_size_ / kEvictionMarginDivisor);
232 }
233 }
234
ExecuteWhenReady(net::CompletionOnceCallback task)235 void SimpleIndex::ExecuteWhenReady(net::CompletionOnceCallback task) {
236 DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
237 if (initialized_)
238 task_runner_->PostTask(FROM_HERE, base::BindOnce(std::move(task), net::OK));
239 else
240 to_run_when_initialized_.push_back(std::move(task));
241 }
242
GetEntriesBetween(base::Time initial_time,base::Time end_time)243 std::unique_ptr<SimpleIndex::HashList> SimpleIndex::GetEntriesBetween(
244 base::Time initial_time,
245 base::Time end_time) {
246 DCHECK_EQ(true, initialized_);
247
248 // The net::APP_CACHE mode does not track access times. Assert that external
249 // consumers are not relying on access time ranges.
250 DCHECK(cache_type_ != net::APP_CACHE ||
251 (initial_time.is_null() && end_time.is_null()));
252
253 if (!initial_time.is_null())
254 initial_time -= EntryMetadata::GetLowerEpsilonForTimeComparisons();
255 if (end_time.is_null())
256 end_time = base::Time::Max();
257 else
258 end_time += EntryMetadata::GetUpperEpsilonForTimeComparisons();
259 DCHECK(end_time >= initial_time);
260
261 auto ret_hashes = std::make_unique<HashList>();
262 for (const auto& entry : entries_set_) {
263 const EntryMetadata& metadata = entry.second;
264 base::Time entry_time = metadata.GetLastUsedTime();
265 if (initial_time <= entry_time && entry_time < end_time)
266 ret_hashes->push_back(entry.first);
267 }
268 return ret_hashes;
269 }
270
GetAllHashes()271 std::unique_ptr<SimpleIndex::HashList> SimpleIndex::GetAllHashes() {
272 return GetEntriesBetween(base::Time(), base::Time());
273 }
274
GetEntryCount() const275 int32_t SimpleIndex::GetEntryCount() const {
276 // TODO(pasko): return a meaningful initial estimate before initialized.
277 return entries_set_.size();
278 }
279
GetCacheSize() const280 uint64_t SimpleIndex::GetCacheSize() const {
281 DCHECK(initialized_);
282 return cache_size_;
283 }
284
GetCacheSizeBetween(base::Time initial_time,base::Time end_time) const285 uint64_t SimpleIndex::GetCacheSizeBetween(base::Time initial_time,
286 base::Time end_time) const {
287 DCHECK_EQ(true, initialized_);
288
289 if (!initial_time.is_null())
290 initial_time -= EntryMetadata::GetLowerEpsilonForTimeComparisons();
291 if (end_time.is_null())
292 end_time = base::Time::Max();
293 else
294 end_time += EntryMetadata::GetUpperEpsilonForTimeComparisons();
295
296 DCHECK(end_time >= initial_time);
297 uint64_t size = 0;
298 for (const auto& entry : entries_set_) {
299 const EntryMetadata& metadata = entry.second;
300 base::Time entry_time = metadata.GetLastUsedTime();
301 if (initial_time <= entry_time && entry_time < end_time)
302 size += metadata.GetEntrySize();
303 }
304 return size;
305 }
306
GetLastUsedTime(uint64_t entry_hash)307 base::Time SimpleIndex::GetLastUsedTime(uint64_t entry_hash) {
308 DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
309 DCHECK_NE(cache_type_, net::APP_CACHE);
310 auto it = entries_set_.find(entry_hash);
311 if (it == entries_set_.end())
312 return base::Time();
313 return it->second.GetLastUsedTime();
314 }
315
SetLastUsedTimeForTest(uint64_t entry_hash,const base::Time last_used)316 void SimpleIndex::SetLastUsedTimeForTest(uint64_t entry_hash,
317 const base::Time last_used) {
318 auto it = entries_set_.find(entry_hash);
319 DCHECK(it != entries_set_.end());
320 it->second.SetLastUsedTime(last_used);
321 }
322
HasPendingWrite() const323 bool SimpleIndex::HasPendingWrite() const {
324 return write_to_disk_timer_.IsRunning();
325 }
326
Insert(uint64_t entry_hash)327 void SimpleIndex::Insert(uint64_t entry_hash) {
328 DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
329 // Upon insert we don't know yet the size of the entry.
330 // It will be updated later when the SimpleEntryImpl finishes opening or
331 // creating the new entry, and then UpdateEntrySize will be called.
332 bool inserted = false;
333 if (cache_type_ == net::APP_CACHE) {
334 inserted =
335 InsertInEntrySet(entry_hash, EntryMetadata(-1, 0u), &entries_set_);
336 } else {
337 inserted = InsertInEntrySet(
338 entry_hash, EntryMetadata(base::Time::Now(), 0u), &entries_set_);
339 }
340 if (!initialized_)
341 removed_entries_.erase(entry_hash);
342 if (inserted)
343 PostponeWritingToDisk();
344 }
345
Remove(uint64_t entry_hash)346 void SimpleIndex::Remove(uint64_t entry_hash) {
347 DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
348 bool need_write = false;
349 auto it = entries_set_.find(entry_hash);
350 if (it != entries_set_.end()) {
351 UpdateEntryIteratorSize(&it, 0u);
352 entries_set_.erase(it);
353 need_write = true;
354 }
355
356 if (!initialized_)
357 removed_entries_.insert(entry_hash);
358
359 if (need_write)
360 PostponeWritingToDisk();
361 }
362
Has(uint64_t hash) const363 bool SimpleIndex::Has(uint64_t hash) const {
364 DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
365 // If not initialized, always return true, forcing it to go to the disk.
366 return !initialized_ || entries_set_.count(hash) > 0;
367 }
368
GetEntryInMemoryData(uint64_t entry_hash) const369 uint8_t SimpleIndex::GetEntryInMemoryData(uint64_t entry_hash) const {
370 DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
371 auto it = entries_set_.find(entry_hash);
372 if (it == entries_set_.end())
373 return 0;
374 return it->second.GetInMemoryData();
375 }
376
SetEntryInMemoryData(uint64_t entry_hash,uint8_t value)377 void SimpleIndex::SetEntryInMemoryData(uint64_t entry_hash, uint8_t value) {
378 DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
379 auto it = entries_set_.find(entry_hash);
380 if (it == entries_set_.end())
381 return;
382 return it->second.SetInMemoryData(value);
383 }
384
UseIfExists(uint64_t entry_hash)385 bool SimpleIndex::UseIfExists(uint64_t entry_hash) {
386 DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
387 // Always update the last used time, even if it is during initialization.
388 // It will be merged later.
389 auto it = entries_set_.find(entry_hash);
390 if (it == entries_set_.end())
391 // If not initialized, always return true, forcing it to go to the disk.
392 return !initialized_;
393 // We do not need to track access times in APP_CACHE mode.
394 if (cache_type_ == net::APP_CACHE)
395 return true;
396 it->second.SetLastUsedTime(base::Time::Now());
397 PostponeWritingToDisk();
398 return true;
399 }
400
StartEvictionIfNeeded()401 void SimpleIndex::StartEvictionIfNeeded() {
402 DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
403 if (eviction_in_progress_ || cache_size_ <= high_watermark_)
404 return;
405 // Take all live key hashes from the index and sort them by time.
406 eviction_in_progress_ = true;
407 eviction_start_time_ = base::TimeTicks::Now();
408
409 bool use_size_heuristic =
410 (cache_type_ != net::GENERATED_BYTE_CODE_CACHE &&
411 cache_type_ != net::GENERATED_WEBUI_BYTE_CODE_CACHE);
412
413 // Flatten for sorting.
414 std::vector<std::pair<uint64_t, const EntrySet::value_type*>> entries;
415 entries.reserve(entries_set_.size());
416 uint32_t now = (base::Time::Now() - base::Time::UnixEpoch()).InSeconds();
417 for (EntrySet::const_iterator i = entries_set_.begin();
418 i != entries_set_.end(); ++i) {
419 uint64_t sort_value = now - i->second.RawTimeForSorting();
420 // See crbug.com/736437 for context.
421 //
422 // Will not overflow since we're multiplying two 32-bit values and storing
423 // them in a 64-bit variable.
424 if (use_size_heuristic)
425 sort_value *= i->second.GetEntrySize() + kEstimatedEntryOverhead;
426 // Subtract so we don't need a custom comparator.
427 entries.emplace_back(std::numeric_limits<uint64_t>::max() - sort_value,
428 &*i);
429 }
430
431 uint64_t evicted_so_far_size = 0;
432 const uint64_t amount_to_evict = cache_size_ - low_watermark_;
433 std::vector<uint64_t> entry_hashes;
434 std::sort(entries.begin(), entries.end());
435 for (const auto& score_metadata_pair : entries) {
436 if (evicted_so_far_size >= amount_to_evict)
437 break;
438 evicted_so_far_size += score_metadata_pair.second->second.GetEntrySize();
439 entry_hashes.push_back(score_metadata_pair.second->first);
440 }
441
442 SIMPLE_CACHE_UMA(COUNTS_1M,
443 "Eviction.EntryCount", cache_type_, entry_hashes.size());
444 SIMPLE_CACHE_UMA(TIMES,
445 "Eviction.TimeToSelectEntries", cache_type_,
446 base::TimeTicks::Now() - eviction_start_time_);
447
448 delegate_->DoomEntries(
449 &entry_hashes, base::BindOnce(&SimpleIndex::EvictionDone, AsWeakPtr()));
450 }
451
GetTrailerPrefetchSize(uint64_t entry_hash) const452 int32_t SimpleIndex::GetTrailerPrefetchSize(uint64_t entry_hash) const {
453 DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
454 DCHECK_EQ(cache_type_, net::APP_CACHE);
455 auto it = entries_set_.find(entry_hash);
456 if (it == entries_set_.end())
457 return -1;
458 return it->second.GetTrailerPrefetchSize();
459 }
460
SetTrailerPrefetchSize(uint64_t entry_hash,int32_t size)461 void SimpleIndex::SetTrailerPrefetchSize(uint64_t entry_hash, int32_t size) {
462 DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
463 DCHECK_EQ(cache_type_, net::APP_CACHE);
464 auto it = entries_set_.find(entry_hash);
465 if (it == entries_set_.end())
466 return;
467 int32_t original_size = it->second.GetTrailerPrefetchSize();
468 it->second.SetTrailerPrefetchSize(size);
469 if (original_size != it->second.GetTrailerPrefetchSize())
470 PostponeWritingToDisk();
471 }
472
UpdateEntrySize(uint64_t entry_hash,base::StrictNumeric<uint32_t> entry_size)473 bool SimpleIndex::UpdateEntrySize(uint64_t entry_hash,
474 base::StrictNumeric<uint32_t> entry_size) {
475 DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
476 auto it = entries_set_.find(entry_hash);
477 if (it == entries_set_.end())
478 return false;
479
480 // Update the entry size. If there was no change, then there is nothing
481 // else to do here.
482 if (!UpdateEntryIteratorSize(&it, entry_size))
483 return true;
484
485 PostponeWritingToDisk();
486 StartEvictionIfNeeded();
487 return true;
488 }
489
EvictionDone(int result)490 void SimpleIndex::EvictionDone(int result) {
491 DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
492
493 // Ignore the result of eviction. We did our best.
494 eviction_in_progress_ = false;
495 SIMPLE_CACHE_UMA(TIMES,
496 "Eviction.TimeToDone", cache_type_,
497 base::TimeTicks::Now() - eviction_start_time_);
498 }
499
500 // static
InsertInEntrySet(uint64_t entry_hash,const disk_cache::EntryMetadata & entry_metadata,EntrySet * entry_set)501 bool SimpleIndex::InsertInEntrySet(
502 uint64_t entry_hash,
503 const disk_cache::EntryMetadata& entry_metadata,
504 EntrySet* entry_set) {
505 DCHECK(entry_set);
506 auto result = entry_set->insert(std::make_pair(entry_hash, entry_metadata));
507 return result.second;
508 }
509
InsertEntryForTesting(uint64_t entry_hash,const EntryMetadata & entry_metadata)510 void SimpleIndex::InsertEntryForTesting(uint64_t entry_hash,
511 const EntryMetadata& entry_metadata) {
512 DCHECK(entries_set_.find(entry_hash) == entries_set_.end());
513 if (InsertInEntrySet(entry_hash, entry_metadata, &entries_set_))
514 cache_size_ += entry_metadata.GetEntrySize();
515 }
516
PostponeWritingToDisk()517 void SimpleIndex::PostponeWritingToDisk() {
518 if (!initialized_)
519 return;
520 const int delay = app_on_background_ ? kWriteToDiskOnBackgroundDelayMSecs
521 : kWriteToDiskDelayMSecs;
522 // If the timer is already active, Start() will just Reset it, postponing it.
523 write_to_disk_timer_.Start(FROM_HERE, base::Milliseconds(delay),
524 write_to_disk_cb_);
525 }
526
UpdateEntryIteratorSize(EntrySet::iterator * it,base::StrictNumeric<uint32_t> entry_size)527 bool SimpleIndex::UpdateEntryIteratorSize(
528 EntrySet::iterator* it,
529 base::StrictNumeric<uint32_t> entry_size) {
530 // Update the total cache size with the new entry size.
531 DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
532 DCHECK_GE(cache_size_, (*it)->second.GetEntrySize());
533 uint32_t original_size = (*it)->second.GetEntrySize();
534 cache_size_ -= (*it)->second.GetEntrySize();
535 (*it)->second.SetEntrySize(entry_size);
536 // We use GetEntrySize to get consistent rounding.
537 cache_size_ += (*it)->second.GetEntrySize();
538 // Return true if the size of the entry actually changed. Make sure to
539 // compare the rounded values provided by GetEntrySize().
540 return original_size != (*it)->second.GetEntrySize();
541 }
542
MergeInitializingSet(std::unique_ptr<SimpleIndexLoadResult> load_result)543 void SimpleIndex::MergeInitializingSet(
544 std::unique_ptr<SimpleIndexLoadResult> load_result) {
545 DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
546
547 EntrySet* index_file_entries = &load_result->entries;
548
549 for (uint64_t removed_entry : removed_entries_) {
550 index_file_entries->erase(removed_entry);
551 }
552 removed_entries_.clear();
553
554 for (const auto& it : entries_set_) {
555 const uint64_t entry_hash = it.first;
556 std::pair<EntrySet::iterator, bool> insert_result =
557 index_file_entries->insert(EntrySet::value_type(entry_hash,
558 EntryMetadata()));
559 EntrySet::iterator& possibly_inserted_entry = insert_result.first;
560 possibly_inserted_entry->second = it.second;
561 }
562
563 uint64_t merged_cache_size = 0;
564 for (const auto& index_file_entry : *index_file_entries) {
565 merged_cache_size += index_file_entry.second.GetEntrySize();
566 }
567
568 entries_set_.swap(*index_file_entries);
569 cache_size_ = merged_cache_size;
570 initialized_ = true;
571 init_method_ = load_result->init_method;
572
573 // The actual IO is asynchronous, so calling WriteToDisk() shouldn't slow the
574 // merge down much.
575 if (load_result->flush_required)
576 WriteToDisk(INDEX_WRITE_REASON_STARTUP_MERGE);
577
578 SIMPLE_CACHE_UMA(CUSTOM_COUNTS, "IndexNumEntriesOnInit", cache_type_,
579 entries_set_.size(), 0, 100000, 50);
580 SIMPLE_CACHE_UMA(
581 MEMORY_KB, "CacheSizeOnInit", cache_type_,
582 static_cast<base::HistogramBase::Sample>(cache_size_ / kBytesInKb));
583 SIMPLE_CACHE_UMA(
584 MEMORY_KB, "MaxCacheSizeOnInit", cache_type_,
585 static_cast<base::HistogramBase::Sample>(max_size_ / kBytesInKb));
586
587 // Run all callbacks waiting for the index to come up.
588 for (auto& callback : to_run_when_initialized_) {
589 task_runner_->PostTask(FROM_HERE,
590 base::BindOnce(std::move(callback), net::OK));
591 }
592 to_run_when_initialized_.clear();
593 }
594
595 #if BUILDFLAG(IS_ANDROID)
OnApplicationStateChange(base::android::ApplicationState state)596 void SimpleIndex::OnApplicationStateChange(
597 base::android::ApplicationState state) {
598 DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
599 // For more info about android activities, see:
600 // developer.android.com/training/basics/activity-lifecycle/pausing.html
601 if (state == base::android::APPLICATION_STATE_HAS_RUNNING_ACTIVITIES) {
602 app_on_background_ = false;
603 } else if (state ==
604 base::android::APPLICATION_STATE_HAS_STOPPED_ACTIVITIES) {
605 app_on_background_ = true;
606 WriteToDisk(INDEX_WRITE_REASON_ANDROID_STOPPED);
607 }
608 }
609 #endif
610
WriteToDisk(IndexWriteToDiskReason reason)611 void SimpleIndex::WriteToDisk(IndexWriteToDiskReason reason) {
612 DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
613 if (!initialized_)
614 return;
615
616 // Cancel any pending writes since we are about to write to disk now.
617 write_to_disk_timer_.AbandonAndStop();
618
619 base::OnceClosure after_write;
620 if (cleanup_tracker_) {
621 // Make anyone synchronizing with our cleanup wait for the index to be
622 // written back.
623 after_write = base::DoNothingWithBoundArgs(cleanup_tracker_);
624 }
625
626 index_file_->WriteToDisk(cache_type_, reason, entries_set_, cache_size_,
627 std::move(after_write));
628 }
629
630 } // namespace disk_cache
631