// Copyright 2013 The Chromium Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "net/disk_cache/simple/simple_index.h" #include #include #include #include #include "base/feature_list.h" #include "base/files/scoped_temp_dir.h" #include "base/functional/bind.h" #include "base/hash/hash.h" #include "base/memory/raw_ptr.h" #include "base/pickle.h" #include "base/strings/stringprintf.h" #include "base/task/task_runner.h" #include "base/test/mock_entropy_provider.h" #include "base/test/scoped_feature_list.h" #include "base/threading/platform_thread.h" #include "base/time/time.h" #include "net/base/cache_type.h" #include "net/base/features.h" #include "net/disk_cache/backend_cleanup_tracker.h" #include "net/disk_cache/disk_cache.h" #include "net/disk_cache/memory_entry_data_hints.h" #include "net/disk_cache/simple/simple_index_delegate.h" #include "net/disk_cache/simple/simple_index_file.h" #include "net/disk_cache/simple/simple_test_util.h" #include "net/disk_cache/simple/simple_util.h" #include "net/test/test_with_task_environment.h" #include "testing/gtest/include/gtest/gtest.h" namespace disk_cache { namespace { const base::Time kTestLastUsedTime = base::Time::UnixEpoch() + base::Days(20); const uint32_t kTestEntrySize = 789; const uint8_t kTestEntryMemoryData = 123; uint32_t RoundSize(uint32_t in) { return (in + 0xFFu) & 0xFFFFFF00u; } } // namespace class EntryMetadataTest : public testing::Test { public: EntryMetadata NewEntryMetadataWithValues() { EntryMetadata entry(kTestLastUsedTime, kTestEntrySize); entry.SetInMemoryData(kTestEntryMemoryData); return entry; } void CheckEntryMetadataValues(const EntryMetadata& entry_metadata) { EXPECT_LT(kTestLastUsedTime - base::Seconds(2), entry_metadata.GetLastUsedTime()); EXPECT_GT(kTestLastUsedTime + base::Seconds(2), entry_metadata.GetLastUsedTime()); EXPECT_EQ(RoundSize(kTestEntrySize), entry_metadata.GetEntrySize()); EXPECT_EQ(kTestEntryMemoryData, entry_metadata.GetInMemoryData()); } }; class MockSimpleIndexFile final : public SimpleIndexFile { public: explicit MockSimpleIndexFile(net::CacheType cache_type) : SimpleIndexFile(nullptr, base::MakeRefCounted(), cache_type, base::FilePath()) {} void LoadIndexEntries(base::Time cache_last_modified, base::OnceClosure callback, SimpleIndexLoadResult* out_load_result) override { load_callback_ = std::move(callback); load_result_ = out_load_result; ++load_index_entries_calls_; } void WriteToDisk(net::CacheType cache_type, SimpleIndex::IndexWriteToDiskReason reason, const SimpleIndex::EntrySet& entry_set, uint64_t cache_size, base::OnceClosure callback) override { disk_writes_++; disk_write_entry_set_ = entry_set; } void GetAndResetDiskWriteEntrySet(SimpleIndex::EntrySet* entry_set) { entry_set->swap(disk_write_entry_set_); } void RunLoadCallback() { // Clear dangling reference since callback may destroy `load_result_`. load_result_ = nullptr; std::move(load_callback_).Run(); } SimpleIndexLoadResult* load_result() const { return load_result_; } int load_index_entries_calls() const { return load_index_entries_calls_; } int disk_writes() const { return disk_writes_; } base::WeakPtr AsWeakPtr() { return weak_ptr_factory_.GetWeakPtr(); } private: base::OnceClosure load_callback_; raw_ptr load_result_ = nullptr; int load_index_entries_calls_ = 0; int disk_writes_ = 0; SimpleIndex::EntrySet disk_write_entry_set_; base::WeakPtrFactory weak_ptr_factory_{this}; }; class SimpleIndexTest : public net::TestWithTaskEnvironment, public SimpleIndexDelegate { protected: SimpleIndexTest() : hashes_(base::BindRepeating(&HashesInitializer)) {} static uint64_t HashesInitializer(size_t hash_index) { return disk_cache::simple_util::GetEntryHashKey( base::StringPrintf("key%d", static_cast(hash_index))); } void SetUp() override { auto index_file = std::make_unique(CacheType()); index_file_ = index_file->AsWeakPtr(); index_ = std::make_unique(/* io_thread = */ nullptr, /* cleanup_tracker = */ nullptr, this, CacheType(), std::move(index_file)); index_->Initialize(base::Time()); } void WaitForTimeChange() { const base::Time initial_time = base::Time::Now(); do { base::PlatformThread::YieldCurrentThread(); } while (base::Time::Now() - initial_time < base::Seconds(1)); } // From SimpleIndexDelegate: void DoomEntries(std::vector* entry_hashes, net::CompletionOnceCallback callback) override { for (const uint64_t& entry_hash : *entry_hashes) index_->Remove(entry_hash); last_doom_entry_hashes_ = *entry_hashes; ++doom_entries_calls_; } // Redirect to allow single "friend" declaration in base class. bool GetEntryForTesting(uint64_t key, EntryMetadata* metadata) { auto it = index_->entries_set_.find(key); if (index_->entries_set_.end() == it) return false; *metadata = it->second; return true; } void InsertIntoIndexFileReturn(uint64_t hash_key, base::Time last_used_time, int entry_size) { index_file_->load_result()->entries.emplace( hash_key, EntryMetadata(last_used_time, base::checked_cast(entry_size))); } void InsertIntoIndexFileWithPrioritizeCachingFlagReturn( uint64_t hash_key, base::Time last_used_time, int entry_size) { EntryMetadata entry_meta_data = EntryMetadata(last_used_time, base::checked_cast(entry_size)); entry_meta_data.SetInMemoryData(HINT_HIGH_PRIORITY); index_file_->load_result()->entries.emplace(hash_key, entry_meta_data); } void ReturnIndexFile() { index_file_->load_result()->did_load = true; index_file_->RunLoadCallback(); } // Non-const for timer manipulation. SimpleIndex* index() { return index_.get(); } const MockSimpleIndexFile* index_file() const { return index_file_.get(); } const std::vector& last_doom_entry_hashes() const { return last_doom_entry_hashes_; } int doom_entries_calls() const { return doom_entries_calls_; } virtual net::CacheType CacheType() const { return net::DISK_CACHE; } const simple_util::ImmutableArray hashes_; std::unique_ptr index_; base::WeakPtr index_file_; std::vector last_doom_entry_hashes_; int doom_entries_calls_ = 0; }; class SimpleIndexAppCacheTest : public SimpleIndexTest { protected: net::CacheType CacheType() const override { return net::APP_CACHE; } }; class SimpleIndexCodeCacheTest : public SimpleIndexTest { protected: net::CacheType CacheType() const override { return net::GENERATED_BYTE_CODE_CACHE; } }; TEST_F(EntryMetadataTest, Basics) { EntryMetadata entry_metadata; EXPECT_EQ(base::Time(), entry_metadata.GetLastUsedTime()); EXPECT_EQ(0u, entry_metadata.GetEntrySize()); EXPECT_EQ(0u, entry_metadata.GetInMemoryData()); entry_metadata = NewEntryMetadataWithValues(); CheckEntryMetadataValues(entry_metadata); const base::Time new_time = base::Time::Now(); entry_metadata.SetLastUsedTime(new_time); EXPECT_LT(new_time - base::Seconds(2), entry_metadata.GetLastUsedTime()); EXPECT_GT(new_time + base::Seconds(2), entry_metadata.GetLastUsedTime()); } // Tests that setting an unusually small/large last used time results in // truncation (rather than crashing). TEST_F(EntryMetadataTest, SaturatedLastUsedTime) { EntryMetadata entry_metadata; // Set a time that is too large to be represented internally as 32-bit unix // timestamp. Will saturate to a large timestamp (in year 2106). entry_metadata.SetLastUsedTime(base::Time::Max()); EXPECT_EQ(INT64_C(15939440895000000), entry_metadata.GetLastUsedTime().ToInternalValue()); // Set a time that is too small to be represented by a unix timestamp (before // 1970). entry_metadata.SetLastUsedTime( base::Time::FromInternalValue(7u)); // This is a date in 1601. EXPECT_EQ(base::Time::UnixEpoch() + base::Seconds(1), entry_metadata.GetLastUsedTime()); } TEST_F(EntryMetadataTest, Serialize) { EntryMetadata entry_metadata = NewEntryMetadataWithValues(); base::Pickle pickle; entry_metadata.Serialize(net::DISK_CACHE, &pickle); base::PickleIterator it(pickle); EntryMetadata new_entry_metadata; new_entry_metadata.Deserialize(net::DISK_CACHE, &it, true, true); CheckEntryMetadataValues(new_entry_metadata); // Test reading of old format --- the modern serialization of above entry // corresponds, in older format, to an entry with size = // RoundSize(kTestEntrySize) | kTestEntryMemoryData, which then gets // rounded again when stored by EntryMetadata. base::PickleIterator it2(pickle); EntryMetadata new_entry_metadata2; new_entry_metadata2.Deserialize(net::DISK_CACHE, &it2, false, false); EXPECT_EQ(RoundSize(RoundSize(kTestEntrySize) | kTestEntryMemoryData), new_entry_metadata2.GetEntrySize()); EXPECT_EQ(0, new_entry_metadata2.GetInMemoryData()); } TEST_F(SimpleIndexTest, IndexSizeCorrectOnMerge) { const unsigned int kSizeResolution = 256u; index()->SetMaxSize(100 * kSizeResolution); index()->Insert(hashes_.at<2>()); index()->UpdateEntrySize(hashes_.at<2>(), 2u * kSizeResolution); index()->Insert(hashes_.at<3>()); index()->UpdateEntrySize(hashes_.at<3>(), 3u * kSizeResolution); index()->Insert(hashes_.at<4>()); index()->UpdateEntrySize(hashes_.at<4>(), 4u * kSizeResolution); EXPECT_EQ(9u * kSizeResolution, index()->cache_size_); { auto result = std::make_unique(); result->did_load = true; index()->MergeInitializingSet(std::move(result)); } EXPECT_EQ(9u * kSizeResolution, index()->cache_size_); { auto result = std::make_unique(); result->did_load = true; const uint64_t new_hash_key = hashes_.at<11>(); result->entries.emplace( new_hash_key, EntryMetadata(base::Time::Now(), 11u * kSizeResolution)); const uint64_t redundant_hash_key = hashes_.at<4>(); result->entries.emplace( redundant_hash_key, EntryMetadata(base::Time::Now(), 4u * kSizeResolution)); index()->MergeInitializingSet(std::move(result)); } EXPECT_EQ((2u + 3u + 4u + 11u) * kSizeResolution, index()->cache_size_); } // State of index changes as expected with an insert and a remove. TEST_F(SimpleIndexTest, BasicInsertRemove) { // Confirm blank state. EntryMetadata metadata; EXPECT_EQ(base::Time(), metadata.GetLastUsedTime()); EXPECT_EQ(0U, metadata.GetEntrySize()); // Confirm state after insert. index()->Insert(hashes_.at<1>()); ASSERT_TRUE(GetEntryForTesting(hashes_.at<1>(), &metadata)); base::Time now(base::Time::Now()); EXPECT_LT(now - base::Minutes(1), metadata.GetLastUsedTime()); EXPECT_GT(now + base::Minutes(1), metadata.GetLastUsedTime()); EXPECT_EQ(0U, metadata.GetEntrySize()); // Confirm state after remove. metadata = EntryMetadata(); index()->Remove(hashes_.at<1>()); EXPECT_FALSE(GetEntryForTesting(hashes_.at<1>(), &metadata)); EXPECT_EQ(base::Time(), metadata.GetLastUsedTime()); EXPECT_EQ(0U, metadata.GetEntrySize()); } TEST_F(SimpleIndexTest, Has) { // Confirm the base index has dispatched the request for index entries. EXPECT_TRUE(index_file_.get()); EXPECT_EQ(1, index_file_->load_index_entries_calls()); // Confirm "Has()" always returns true before the callback is called. const uint64_t kHash1 = hashes_.at<1>(); EXPECT_TRUE(index()->Has(kHash1)); index()->Insert(kHash1); EXPECT_TRUE(index()->Has(kHash1)); index()->Remove(kHash1); // TODO(morlovich): Maybe return false on explicitly removed entries? EXPECT_TRUE(index()->Has(kHash1)); ReturnIndexFile(); // Confirm "Has() returns conditionally now. EXPECT_FALSE(index()->Has(kHash1)); index()->Insert(kHash1); EXPECT_TRUE(index()->Has(kHash1)); index()->Remove(kHash1); } TEST_F(SimpleIndexTest, UseIfExists) { // Confirm the base index has dispatched the request for index entries. EXPECT_TRUE(index_file_.get()); EXPECT_EQ(1, index_file_->load_index_entries_calls()); // Confirm "UseIfExists()" always returns true before the callback is called // and updates mod time if the entry was really there. const uint64_t kHash1 = hashes_.at<1>(); EntryMetadata metadata1, metadata2; EXPECT_TRUE(index()->UseIfExists(kHash1)); EXPECT_FALSE(GetEntryForTesting(kHash1, &metadata1)); index()->Insert(kHash1); EXPECT_TRUE(index()->UseIfExists(kHash1)); EXPECT_TRUE(GetEntryForTesting(kHash1, &metadata1)); WaitForTimeChange(); EXPECT_TRUE(GetEntryForTesting(kHash1, &metadata2)); EXPECT_EQ(metadata1.GetLastUsedTime(), metadata2.GetLastUsedTime()); EXPECT_TRUE(index()->UseIfExists(kHash1)); EXPECT_TRUE(GetEntryForTesting(kHash1, &metadata2)); EXPECT_LT(metadata1.GetLastUsedTime(), metadata2.GetLastUsedTime()); index()->Remove(kHash1); EXPECT_TRUE(index()->UseIfExists(kHash1)); ReturnIndexFile(); // Confirm "UseIfExists() returns conditionally now EXPECT_FALSE(index()->UseIfExists(kHash1)); EXPECT_FALSE(GetEntryForTesting(kHash1, &metadata1)); index()->Insert(kHash1); EXPECT_TRUE(index()->UseIfExists(kHash1)); EXPECT_TRUE(GetEntryForTesting(kHash1, &metadata1)); WaitForTimeChange(); EXPECT_TRUE(GetEntryForTesting(kHash1, &metadata2)); EXPECT_EQ(metadata1.GetLastUsedTime(), metadata2.GetLastUsedTime()); EXPECT_TRUE(index()->UseIfExists(kHash1)); EXPECT_TRUE(GetEntryForTesting(kHash1, &metadata2)); EXPECT_LT(metadata1.GetLastUsedTime(), metadata2.GetLastUsedTime()); index()->Remove(kHash1); EXPECT_FALSE(index()->UseIfExists(kHash1)); } TEST_F(SimpleIndexTest, UpdateEntrySize) { base::Time now(base::Time::Now()); index()->SetMaxSize(1000); const uint64_t kHash1 = hashes_.at<1>(); InsertIntoIndexFileReturn(kHash1, now - base::Days(2), 475); ReturnIndexFile(); EntryMetadata metadata; EXPECT_TRUE(GetEntryForTesting(kHash1, &metadata)); EXPECT_LT(now - base::Days(2) - base::Seconds(1), metadata.GetLastUsedTime()); EXPECT_GT(now - base::Days(2) + base::Seconds(1), metadata.GetLastUsedTime()); EXPECT_EQ(RoundSize(475u), metadata.GetEntrySize()); index()->UpdateEntrySize(kHash1, 600u); EXPECT_TRUE(GetEntryForTesting(kHash1, &metadata)); EXPECT_EQ(RoundSize(600u), metadata.GetEntrySize()); EXPECT_EQ(1, index()->GetEntryCount()); } TEST_F(SimpleIndexTest, GetEntryCount) { EXPECT_EQ(0, index()->GetEntryCount()); index()->Insert(hashes_.at<1>()); EXPECT_EQ(1, index()->GetEntryCount()); index()->Insert(hashes_.at<2>()); EXPECT_EQ(2, index()->GetEntryCount()); index()->Insert(hashes_.at<3>()); EXPECT_EQ(3, index()->GetEntryCount()); index()->Insert(hashes_.at<3>()); EXPECT_EQ(3, index()->GetEntryCount()); index()->Remove(hashes_.at<2>()); EXPECT_EQ(2, index()->GetEntryCount()); index()->Insert(hashes_.at<4>()); EXPECT_EQ(3, index()->GetEntryCount()); index()->Remove(hashes_.at<3>()); EXPECT_EQ(2, index()->GetEntryCount()); index()->Remove(hashes_.at<3>()); EXPECT_EQ(2, index()->GetEntryCount()); index()->Remove(hashes_.at<1>()); EXPECT_EQ(1, index()->GetEntryCount()); index()->Remove(hashes_.at<4>()); EXPECT_EQ(0, index()->GetEntryCount()); } // Confirm that we get the results we expect from a simple init. TEST_F(SimpleIndexTest, BasicInit) { base::Time now(base::Time::Now()); InsertIntoIndexFileReturn(hashes_.at<1>(), now - base::Days(2), 10u); InsertIntoIndexFileReturn(hashes_.at<2>(), now - base::Days(3), 1000u); ReturnIndexFile(); EntryMetadata metadata; EXPECT_TRUE(GetEntryForTesting(hashes_.at<1>(), &metadata)); EXPECT_EQ(metadata.GetLastUsedTime(), index()->GetLastUsedTime(hashes_.at<1>())); EXPECT_LT(now - base::Days(2) - base::Seconds(1), metadata.GetLastUsedTime()); EXPECT_GT(now - base::Days(2) + base::Seconds(1), metadata.GetLastUsedTime()); EXPECT_EQ(RoundSize(10u), metadata.GetEntrySize()); EXPECT_TRUE(GetEntryForTesting(hashes_.at<2>(), &metadata)); EXPECT_EQ(metadata.GetLastUsedTime(), index()->GetLastUsedTime(hashes_.at<2>())); EXPECT_LT(now - base::Days(3) - base::Seconds(1), metadata.GetLastUsedTime()); EXPECT_GT(now - base::Days(3) + base::Seconds(1), metadata.GetLastUsedTime()); EXPECT_EQ(RoundSize(1000u), metadata.GetEntrySize()); EXPECT_EQ(base::Time(), index()->GetLastUsedTime(hashes_.at<3>())); } // Remove something that's going to come in from the loaded index. TEST_F(SimpleIndexTest, RemoveBeforeInit) { const uint64_t kHash1 = hashes_.at<1>(); index()->Remove(kHash1); InsertIntoIndexFileReturn(kHash1, base::Time::Now() - base::Days(2), 10u); ReturnIndexFile(); EXPECT_FALSE(index()->Has(kHash1)); } // Insert something that's going to come in from the loaded index; correct // result? TEST_F(SimpleIndexTest, InsertBeforeInit) { const uint64_t kHash1 = hashes_.at<1>(); index()->Insert(kHash1); InsertIntoIndexFileReturn(kHash1, base::Time::Now() - base::Days(2), 10u); ReturnIndexFile(); EntryMetadata metadata; EXPECT_TRUE(GetEntryForTesting(kHash1, &metadata)); base::Time now(base::Time::Now()); EXPECT_LT(now - base::Minutes(1), metadata.GetLastUsedTime()); EXPECT_GT(now + base::Minutes(1), metadata.GetLastUsedTime()); EXPECT_EQ(0U, metadata.GetEntrySize()); } // Insert and Remove something that's going to come in from the loaded index. TEST_F(SimpleIndexTest, InsertRemoveBeforeInit) { const uint64_t kHash1 = hashes_.at<1>(); index()->Insert(kHash1); index()->Remove(kHash1); InsertIntoIndexFileReturn(kHash1, base::Time::Now() - base::Days(2), 10u); ReturnIndexFile(); EXPECT_FALSE(index()->Has(kHash1)); } // Insert and Remove something that's going to come in from the loaded index. TEST_F(SimpleIndexTest, RemoveInsertBeforeInit) { const uint64_t kHash1 = hashes_.at<1>(); index()->Remove(kHash1); index()->Insert(kHash1); InsertIntoIndexFileReturn(kHash1, base::Time::Now() - base::Days(2), 10u); ReturnIndexFile(); EntryMetadata metadata; EXPECT_TRUE(GetEntryForTesting(kHash1, &metadata)); base::Time now(base::Time::Now()); EXPECT_LT(now - base::Minutes(1), metadata.GetLastUsedTime()); EXPECT_GT(now + base::Minutes(1), metadata.GetLastUsedTime()); EXPECT_EQ(0U, metadata.GetEntrySize()); } // Do all above tests at once + a non-conflict to test for cross-key // interactions. TEST_F(SimpleIndexTest, AllInitConflicts) { base::Time now(base::Time::Now()); index()->Remove(hashes_.at<1>()); InsertIntoIndexFileReturn(hashes_.at<1>(), now - base::Days(2), 10u); index()->Insert(hashes_.at<2>()); InsertIntoIndexFileReturn(hashes_.at<2>(), now - base::Days(3), 100u); index()->Insert(hashes_.at<3>()); index()->Remove(hashes_.at<3>()); InsertIntoIndexFileReturn(hashes_.at<3>(), now - base::Days(4), 1000u); index()->Remove(hashes_.at<4>()); index()->Insert(hashes_.at<4>()); InsertIntoIndexFileReturn(hashes_.at<4>(), now - base::Days(5), 10000u); InsertIntoIndexFileReturn(hashes_.at<5>(), now - base::Days(6), 100000u); ReturnIndexFile(); EXPECT_FALSE(index()->Has(hashes_.at<1>())); EntryMetadata metadata; EXPECT_TRUE(GetEntryForTesting(hashes_.at<2>(), &metadata)); EXPECT_LT(now - base::Minutes(1), metadata.GetLastUsedTime()); EXPECT_GT(now + base::Minutes(1), metadata.GetLastUsedTime()); EXPECT_EQ(0U, metadata.GetEntrySize()); EXPECT_FALSE(index()->Has(hashes_.at<3>())); EXPECT_TRUE(GetEntryForTesting(hashes_.at<4>(), &metadata)); EXPECT_LT(now - base::Minutes(1), metadata.GetLastUsedTime()); EXPECT_GT(now + base::Minutes(1), metadata.GetLastUsedTime()); EXPECT_EQ(0U, metadata.GetEntrySize()); EXPECT_TRUE(GetEntryForTesting(hashes_.at<5>(), &metadata)); EXPECT_GT(now - base::Days(6) + base::Seconds(1), metadata.GetLastUsedTime()); EXPECT_LT(now - base::Days(6) - base::Seconds(1), metadata.GetLastUsedTime()); EXPECT_EQ(RoundSize(100000u), metadata.GetEntrySize()); } TEST_F(SimpleIndexTest, BasicEviction) { base::Time now(base::Time::Now()); index()->SetMaxSize(1000); InsertIntoIndexFileReturn(hashes_.at<1>(), now - base::Days(2), 475u); index()->Insert(hashes_.at<2>()); index()->UpdateEntrySize(hashes_.at<2>(), 475u); ReturnIndexFile(); WaitForTimeChange(); index()->Insert(hashes_.at<3>()); // Confirm index is as expected: No eviction, everything there. EXPECT_EQ(3, index()->GetEntryCount()); EXPECT_EQ(0, doom_entries_calls()); EXPECT_TRUE(index()->Has(hashes_.at<1>())); EXPECT_TRUE(index()->Has(hashes_.at<2>())); EXPECT_TRUE(index()->Has(hashes_.at<3>())); // Trigger an eviction, and make sure the right things are tossed. // TODO(morlovich): This is dependent on the innards of the implementation // as to at exactly what point we trigger eviction. Not sure how to fix // that. index()->UpdateEntrySize(hashes_.at<3>(), 475u); EXPECT_EQ(1, doom_entries_calls()); EXPECT_EQ(1, index()->GetEntryCount()); EXPECT_FALSE(index()->Has(hashes_.at<1>())); EXPECT_FALSE(index()->Has(hashes_.at<2>())); EXPECT_TRUE(index()->Has(hashes_.at<3>())); ASSERT_EQ(2u, last_doom_entry_hashes().size()); } TEST_F(SimpleIndexTest, EvictBySize) { base::Time now(base::Time::Now()); index()->SetMaxSize(50000); InsertIntoIndexFileReturn(hashes_.at<1>(), now - base::Days(2), 475u); InsertIntoIndexFileReturn(hashes_.at<2>(), now - base::Days(1), 40000u); ReturnIndexFile(); WaitForTimeChange(); index()->Insert(hashes_.at<3>()); // Confirm index is as expected: No eviction, everything there. EXPECT_EQ(3, index()->GetEntryCount()); EXPECT_EQ(0, doom_entries_calls()); EXPECT_TRUE(index()->Has(hashes_.at<1>())); EXPECT_TRUE(index()->Has(hashes_.at<2>())); EXPECT_TRUE(index()->Has(hashes_.at<3>())); // Trigger an eviction, and make sure the right things are tossed. // TODO(morlovich): This is dependent on the innards of the implementation // as to at exactly what point we trigger eviction. Not sure how to fix // that. index()->UpdateEntrySize(hashes_.at<3>(), 40000u); EXPECT_EQ(1, doom_entries_calls()); EXPECT_EQ(2, index()->GetEntryCount()); EXPECT_TRUE(index()->Has(hashes_.at<1>())); EXPECT_FALSE(index()->Has(hashes_.at<2>())); EXPECT_TRUE(index()->Has(hashes_.at<3>())); ASSERT_EQ(1u, last_doom_entry_hashes().size()); } TEST_F(SimpleIndexCodeCacheTest, DisableEvictBySize) { base::Time now(base::Time::Now()); index()->SetMaxSize(50000); InsertIntoIndexFileReturn(hashes_.at<1>(), now - base::Days(2), 475u); InsertIntoIndexFileReturn(hashes_.at<2>(), now - base::Days(1), 40000u); ReturnIndexFile(); WaitForTimeChange(); index()->Insert(hashes_.at<3>()); // Confirm index is as expected: No eviction, everything there. EXPECT_EQ(3, index()->GetEntryCount()); EXPECT_EQ(0, doom_entries_calls()); EXPECT_TRUE(index()->Has(hashes_.at<1>())); EXPECT_TRUE(index()->Has(hashes_.at<2>())); EXPECT_TRUE(index()->Has(hashes_.at<3>())); // Trigger an eviction, and make sure the right things are tossed. // Since evict by size is supposed to be disabled, it evicts in LRU order, // so entries 1 and 2 are both kicked out. index()->UpdateEntrySize(hashes_.at<3>(), 40000u); EXPECT_EQ(1, doom_entries_calls()); EXPECT_EQ(1, index()->GetEntryCount()); EXPECT_FALSE(index()->Has(hashes_.at<1>())); EXPECT_FALSE(index()->Has(hashes_.at<2>())); EXPECT_TRUE(index()->Has(hashes_.at<3>())); ASSERT_EQ(2u, last_doom_entry_hashes().size()); } // Same as test above, but using much older entries to make sure that small // things eventually get evictied. TEST_F(SimpleIndexTest, EvictBySize2) { base::Time now(base::Time::Now()); index()->SetMaxSize(50000); InsertIntoIndexFileReturn(hashes_.at<1>(), now - base::Days(200), 475u); InsertIntoIndexFileReturn(hashes_.at<2>(), now - base::Days(1), 40000u); ReturnIndexFile(); WaitForTimeChange(); index()->Insert(hashes_.at<3>()); // Confirm index is as expected: No eviction, everything there. EXPECT_EQ(3, index()->GetEntryCount()); EXPECT_EQ(0, doom_entries_calls()); EXPECT_TRUE(index()->Has(hashes_.at<1>())); EXPECT_TRUE(index()->Has(hashes_.at<2>())); EXPECT_TRUE(index()->Has(hashes_.at<3>())); // Trigger an eviction, and make sure the right things are tossed. // TODO(morlovich): This is dependent on the innards of the implementation // as to at exactly what point we trigger eviction. Not sure how to fix // that. index()->UpdateEntrySize(hashes_.at<3>(), 40000u); EXPECT_EQ(1, doom_entries_calls()); EXPECT_EQ(1, index()->GetEntryCount()); EXPECT_FALSE(index()->Has(hashes_.at<1>())); EXPECT_FALSE(index()->Has(hashes_.at<2>())); EXPECT_TRUE(index()->Has(hashes_.at<3>())); ASSERT_EQ(2u, last_doom_entry_hashes().size()); } class SimpleIndexPrioritizedCachingTest : public SimpleIndexTest { public: SimpleIndexPrioritizedCachingTest() { feature_list_.InitAndEnableFeature( net::features::kSimpleCachePrioritizedCaching); } ~SimpleIndexPrioritizedCachingTest() override = default; private: base::test::ScopedFeatureList feature_list_; }; TEST_F(SimpleIndexPrioritizedCachingTest, EvictPrioritization) { const auto caching_prioritization_period = net::features::kSimpleCachePrioritizedCachingPrioritizationPeriod.Get(); auto now = base::Time::Now(); index()->SetMaxSize(50000); InsertIntoIndexFileWithPrioritizeCachingFlagReturn( hashes_.at<1>(), now - caching_prioritization_period * 0.8, 20000u); InsertIntoIndexFileReturn(hashes_.at<2>(), now - caching_prioritization_period * 0.4, 20000u); ReturnIndexFile(); WaitForTimeChange(); index()->Insert(hashes_.at<3>()); // Confirm index is as expected: No eviction, everything there. EXPECT_EQ(3, index()->GetEntryCount()); EXPECT_EQ(0, doom_entries_calls()); EXPECT_TRUE(index()->Has(hashes_.at<1>())); EXPECT_TRUE(index()->Has(hashes_.at<2>())); EXPECT_TRUE(index()->Has(hashes_.at<3>())); // Trigger an eviction, and make sure the right things are tossed. index()->UpdateEntrySize(hashes_.at<3>(), 20000u); EXPECT_EQ(1, doom_entries_calls()); EXPECT_EQ(2, index()->GetEntryCount()); // The entry with the priority flag is kept, even if it's older. EXPECT_TRUE(index()->Has(hashes_.at<1>())); // The entry without the priority flag is evicted, even if it's newer. EXPECT_FALSE(index()->Has(hashes_.at<2>())); EXPECT_TRUE(index()->Has(hashes_.at<3>())); ASSERT_EQ(1u, last_doom_entry_hashes().size()); } TEST_F(SimpleIndexPrioritizedCachingTest, EvictPrioritizationOutOfPeriod) { const auto caching_prioritization_period = net::features::kSimpleCachePrioritizedCachingPrioritizationPeriod.Get(); auto now = base::Time::Now(); index()->SetMaxSize(50000); InsertIntoIndexFileWithPrioritizeCachingFlagReturn( hashes_.at<1>(), now - caching_prioritization_period * 2, 20000u); InsertIntoIndexFileReturn(hashes_.at<2>(), now - caching_prioritization_period, 20000u); ReturnIndexFile(); WaitForTimeChange(); index()->Insert(hashes_.at<3>()); // Confirm index is as expected: No eviction, everything there. EXPECT_EQ(3, index()->GetEntryCount()); EXPECT_EQ(0, doom_entries_calls()); EXPECT_TRUE(index()->Has(hashes_.at<1>())); EXPECT_TRUE(index()->Has(hashes_.at<2>())); EXPECT_TRUE(index()->Has(hashes_.at<3>())); // Trigger an eviction, and make sure the right things are tossed. index()->UpdateEntrySize(hashes_.at<3>(), 20000u); EXPECT_EQ(1, doom_entries_calls()); EXPECT_EQ(2, index()->GetEntryCount()); // The older entry is evicted, even if it has the priority flag, when the // entry is out of the prioritization period. EXPECT_FALSE(index()->Has(hashes_.at<1>())); EXPECT_TRUE(index()->Has(hashes_.at<2>())); EXPECT_TRUE(index()->Has(hashes_.at<3>())); ASSERT_EQ(1u, last_doom_entry_hashes().size()); } TEST_F(SimpleIndexTest, EvictPrioritizationFeatureDefaultDisabled) { const auto caching_prioritization_period = net::features::kSimpleCachePrioritizedCachingPrioritizationPeriod.Get(); auto now = base::Time::Now(); index()->SetMaxSize(50000); InsertIntoIndexFileWithPrioritizeCachingFlagReturn( hashes_.at<1>(), now - caching_prioritization_period * 0.8, 20000u); InsertIntoIndexFileReturn(hashes_.at<2>(), now - caching_prioritization_period * 0.4, 20000u); ReturnIndexFile(); WaitForTimeChange(); index()->Insert(hashes_.at<3>()); // Confirm index is as expected: No eviction, everything there. EXPECT_EQ(3, index()->GetEntryCount()); EXPECT_EQ(0, doom_entries_calls()); EXPECT_TRUE(index()->Has(hashes_.at<1>())); EXPECT_TRUE(index()->Has(hashes_.at<2>())); EXPECT_TRUE(index()->Has(hashes_.at<3>())); // Trigger an eviction, and make sure the right things are tossed. index()->UpdateEntrySize(hashes_.at<3>(), 20000u); EXPECT_EQ(1, doom_entries_calls()); EXPECT_EQ(2, index()->GetEntryCount()); // The older entry is evicted, even if it has the priority flag, when the // feature is disabled. EXPECT_FALSE(index()->Has(hashes_.at<1>())); // The newer entry is kept, even if it doesn't have the priority flag, when // the feature is disabled. EXPECT_TRUE(index()->Has(hashes_.at<2>())); EXPECT_TRUE(index()->Has(hashes_.at<3>())); ASSERT_EQ(1u, last_doom_entry_hashes().size()); } // Confirm all the operations queue a disk write at some point in the // future. TEST_F(SimpleIndexTest, DiskWriteQueued) { index()->SetMaxSize(1000); ReturnIndexFile(); EXPECT_FALSE(index()->HasPendingWrite()); const uint64_t kHash1 = hashes_.at<1>(); index()->Insert(kHash1); EXPECT_TRUE(index()->HasPendingWrite()); index()->write_to_disk_timer_.Stop(); EXPECT_FALSE(index()->HasPendingWrite()); // Attempting to insert a hash that already exists should not queue the // write timer. index()->Insert(kHash1); EXPECT_FALSE(index()->HasPendingWrite()); index()->UseIfExists(kHash1); EXPECT_TRUE(index()->HasPendingWrite()); index()->write_to_disk_timer_.Stop(); index()->UpdateEntrySize(kHash1, 20u); EXPECT_TRUE(index()->HasPendingWrite()); index()->write_to_disk_timer_.Stop(); // Updating to the same size should not queue the write timer. index()->UpdateEntrySize(kHash1, 20u); EXPECT_FALSE(index()->HasPendingWrite()); index()->Remove(kHash1); EXPECT_TRUE(index()->HasPendingWrite()); index()->write_to_disk_timer_.Stop(); // Removing a non-existent hash should not queue the write timer. index()->Remove(kHash1); EXPECT_FALSE(index()->HasPendingWrite()); } TEST_F(SimpleIndexTest, DiskWriteExecuted) { index()->SetMaxSize(1000); ReturnIndexFile(); EXPECT_FALSE(index()->HasPendingWrite()); const uint64_t kHash1 = hashes_.at<1>(); index()->Insert(kHash1); index()->UpdateEntrySize(kHash1, 20u); EXPECT_TRUE(index()->HasPendingWrite()); EXPECT_EQ(0, index_file_->disk_writes()); index()->write_to_disk_timer_.FireNow(); EXPECT_EQ(1, index_file_->disk_writes()); SimpleIndex::EntrySet entry_set; index_file_->GetAndResetDiskWriteEntrySet(&entry_set); uint64_t hash_key = kHash1; base::Time now(base::Time::Now()); ASSERT_EQ(1u, entry_set.size()); EXPECT_EQ(hash_key, entry_set.begin()->first); const EntryMetadata& entry1(entry_set.begin()->second); EXPECT_LT(now - base::Minutes(1), entry1.GetLastUsedTime()); EXPECT_GT(now + base::Minutes(1), entry1.GetLastUsedTime()); EXPECT_EQ(RoundSize(20u), entry1.GetEntrySize()); } TEST_F(SimpleIndexTest, DiskWritePostponed) { index()->SetMaxSize(1000); ReturnIndexFile(); EXPECT_FALSE(index()->HasPendingWrite()); index()->Insert(hashes_.at<1>()); index()->UpdateEntrySize(hashes_.at<1>(), 20u); EXPECT_TRUE(index()->HasPendingWrite()); base::TimeTicks expected_trigger( index()->write_to_disk_timer_.desired_run_time()); WaitForTimeChange(); EXPECT_EQ(expected_trigger, index()->write_to_disk_timer_.desired_run_time()); index()->Insert(hashes_.at<2>()); index()->UpdateEntrySize(hashes_.at<2>(), 40u); EXPECT_TRUE(index()->HasPendingWrite()); EXPECT_LT(expected_trigger, index()->write_to_disk_timer_.desired_run_time()); index()->write_to_disk_timer_.Stop(); } // net::APP_CACHE mode should not need to queue disk writes in as many places // as the default net::DISK_CACHE mode. TEST_F(SimpleIndexAppCacheTest, DiskWriteQueued) { index()->SetMaxSize(1000); ReturnIndexFile(); EXPECT_FALSE(index()->HasPendingWrite()); const uint64_t kHash1 = hashes_.at<1>(); index()->Insert(kHash1); EXPECT_TRUE(index()->HasPendingWrite()); index()->write_to_disk_timer_.Stop(); EXPECT_FALSE(index()->HasPendingWrite()); // Attempting to insert a hash that already exists should not queue the // write timer. index()->Insert(kHash1); EXPECT_FALSE(index()->HasPendingWrite()); // Since net::APP_CACHE does not evict or track access times using an // entry should not queue the write timer. index()->UseIfExists(kHash1); EXPECT_FALSE(index()->HasPendingWrite()); index()->UpdateEntrySize(kHash1, 20u); EXPECT_TRUE(index()->HasPendingWrite()); index()->write_to_disk_timer_.Stop(); // Updating to the same size should not queue the write timer. index()->UpdateEntrySize(kHash1, 20u); EXPECT_FALSE(index()->HasPendingWrite()); index()->Remove(kHash1); EXPECT_TRUE(index()->HasPendingWrite()); index()->write_to_disk_timer_.Stop(); // Removing a non-existent hash should not queue the write timer. index()->Remove(kHash1); EXPECT_FALSE(index()->HasPendingWrite()); } } // namespace disk_cache