1 // Copyright 2012 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include <stdint.h>
6
7 #include <memory>
8
9 #include "base/containers/queue.h"
10 #include "base/files/file.h"
11 #include "base/files/file_util.h"
12 #include "base/functional/bind.h"
13 #include "base/functional/callback.h"
14 #include "base/functional/callback_helpers.h"
15 #include "base/memory/memory_pressure_listener.h"
16 #include "base/memory/raw_ptr.h"
17 #include "base/metrics/field_trial.h"
18 #include "base/ranges/algorithm.h"
19 #include "base/run_loop.h"
20 #include "base/strings/string_number_conversions.h"
21 #include "base/strings/string_split.h"
22 #include "base/strings/string_util.h"
23 #include "base/strings/stringprintf.h"
24 #include "base/task/sequenced_task_runner.h"
25 #include "base/task/single_thread_task_runner.h"
26 #include "base/task/thread_pool.h"
27 #include "base/test/bind.h"
28 #include "base/test/metrics/histogram_tester.h"
29 #include "base/test/scoped_feature_list.h"
30 #include "base/test/simple_test_clock.h"
31 #include "base/third_party/dynamic_annotations/dynamic_annotations.h"
32 #include "base/threading/platform_thread.h"
33 #include "base/threading/thread_restrictions.h"
34 #include "base/time/time.h"
35 #include "base/trace_event/memory_allocator_dump.h"
36 #include "base/trace_event/process_memory_dump.h"
37 #include "build/build_config.h"
38 #include "net/base/cache_type.h"
39 #include "net/base/completion_once_callback.h"
40 #include "net/base/io_buffer.h"
41 #include "net/base/net_errors.h"
42 #include "net/base/request_priority.h"
43 #include "net/base/test_completion_callback.h"
44 #include "net/base/tracing.h"
45 #include "net/disk_cache/backend_cleanup_tracker.h"
46 #include "net/disk_cache/blockfile/backend_impl.h"
47 #include "net/disk_cache/blockfile/entry_impl.h"
48 #include "net/disk_cache/blockfile/experiments.h"
49 #include "net/disk_cache/blockfile/mapped_file.h"
50 #include "net/disk_cache/cache_util.h"
51 #include "net/disk_cache/disk_cache_test_base.h"
52 #include "net/disk_cache/disk_cache_test_util.h"
53 #include "net/disk_cache/memory/mem_backend_impl.h"
54 #include "net/disk_cache/simple/simple_backend_impl.h"
55 #include "net/disk_cache/simple/simple_entry_format.h"
56 #include "net/disk_cache/simple/simple_histogram_enums.h"
57 #include "net/disk_cache/simple/simple_index.h"
58 #include "net/disk_cache/simple/simple_synchronous_entry.h"
59 #include "net/disk_cache/simple/simple_test_util.h"
60 #include "net/disk_cache/simple/simple_util.h"
61 #include "net/test/gtest_util.h"
62 #include "testing/gmock/include/gmock/gmock.h"
63 #include "testing/gtest/include/gtest/gtest.h"
64 #include "third_party/abseil-cpp/absl/types/optional.h"
65
66 using disk_cache::EntryResult;
67 using net::test::IsError;
68 using net::test::IsOk;
69 using testing::ByRef;
70 using testing::Contains;
71 using testing::Eq;
72 using testing::Field;
73
74 #if BUILDFLAG(IS_WIN)
75 #include "base/win/scoped_handle.h"
76
77 #include <windows.h>
78 #endif
79
80 // TODO(crbug.com/949811): Fix memory leaks in tests and re-enable on LSAN.
81 #ifdef LEAK_SANITIZER
82 #define MAYBE_BlockFileOpenOrCreateEntry DISABLED_BlockFileOpenOrCreateEntry
83 #define MAYBE_NonEmptyCorruptSimpleCacheDoesNotRecover \
84 DISABLED_NonEmptyCorruptSimpleCacheDoesNotRecover
85 #define MAYBE_SimpleOpenOrCreateEntry DISABLED_SimpleOpenOrCreateEntry
86 #else
87 #define MAYBE_BlockFileOpenOrCreateEntry BlockFileOpenOrCreateEntry
88 #define MAYBE_NonEmptyCorruptSimpleCacheDoesNotRecover \
89 NonEmptyCorruptSimpleCacheDoesNotRecover
90 #define MAYBE_SimpleOpenOrCreateEntry SimpleOpenOrCreateEntry
91 #endif
92
93 using base::Time;
94
95 namespace {
96
97 const char kExistingEntryKey[] = "existing entry key";
98
CreateExistingEntryCache(const base::FilePath & cache_path)99 std::unique_ptr<disk_cache::BackendImpl> CreateExistingEntryCache(
100 const base::FilePath& cache_path) {
101 net::TestCompletionCallback cb;
102
103 std::unique_ptr<disk_cache::BackendImpl> cache(
104 std::make_unique<disk_cache::BackendImpl>(cache_path,
105 /* cleanup_tracker = */ nullptr,
106 /* cache_thread = */ nullptr,
107 net::DISK_CACHE,
108 /* net_log = */ nullptr));
109 cache->Init(cb.callback());
110 if (cb.WaitForResult() != net::OK)
111 return nullptr;
112
113 TestEntryResultCompletionCallback cb2;
114 EntryResult result =
115 cache->CreateEntry(kExistingEntryKey, net::HIGHEST, cb2.callback());
116 result = cb2.GetResult(std::move(result));
117 if (result.net_error() != net::OK)
118 return nullptr;
119
120 return cache;
121 }
122
123 #if BUILDFLAG(IS_FUCHSIA)
124 // Load tests with large numbers of file descriptors perform poorly on
125 // virtualized test execution environments.
126 // TODO(807882): Remove this workaround when virtualized test performance
127 // improves.
128 const int kLargeNumEntries = 100;
129 #else
130 const int kLargeNumEntries = 512;
131 #endif
132
133 } // namespace
134
135 // Tests that can run with different types of caches.
136 class DiskCacheBackendTest : public DiskCacheTestWithCache {
137 protected:
138 // Some utility methods:
139
140 // Perform IO operations on the cache until there is pending IO.
141 int GeneratePendingIO(net::TestCompletionCallback* cb);
142
143 // Adds 5 sparse entries. |doomed_start| and |doomed_end| if not NULL,
144 // will be filled with times, used by DoomEntriesSince and DoomEntriesBetween.
145 // There are 4 entries after doomed_start and 2 after doomed_end.
146 void InitSparseCache(base::Time* doomed_start, base::Time* doomed_end);
147
148 bool CreateSetOfRandomEntries(std::set<std::string>* key_pool);
149 bool EnumerateAndMatchKeys(int max_to_open,
150 TestIterator* iter,
151 std::set<std::string>* keys_to_match,
152 size_t* count);
153
154 // Computes the expected size of entry metadata, i.e. the total size without
155 // the actual data stored. This depends only on the entry's |key| size.
156 int GetEntryMetadataSize(std::string key);
157
158 // The Simple Backend only tracks the approximate sizes of entries. This
159 // rounds the exact size appropriately.
160 int GetRoundedSize(int exact_size);
161
162 // Create a default key with the name provided, populate it with
163 // CacheTestFillBuffer, and ensure this was done correctly.
164 void CreateKeyAndCheck(disk_cache::Backend* cache, std::string key);
165
166 // For the simple cache, wait until indexing has occurred and make sure
167 // completes successfully.
168 void WaitForSimpleCacheIndexAndCheck(disk_cache::Backend* cache);
169
170 // Run all of the task runners untile idle, covers cache worker pools.
171 void RunUntilIdle();
172
173 // Actual tests:
174 void BackendBasics();
175 void BackendKeying();
176 void BackendShutdownWithPendingFileIO(bool fast);
177 void BackendShutdownWithPendingIO(bool fast);
178 void BackendShutdownWithPendingCreate(bool fast);
179 void BackendShutdownWithPendingDoom();
180 void BackendSetSize();
181 void BackendLoad();
182 void BackendChain();
183 void BackendValidEntry();
184 void BackendInvalidEntry();
185 void BackendInvalidEntryRead();
186 void BackendInvalidEntryWithLoad();
187 void BackendTrimInvalidEntry();
188 void BackendTrimInvalidEntry2();
189 void BackendEnumerations();
190 void BackendEnumerations2();
191 void BackendDoomMidEnumeration();
192 void BackendInvalidEntryEnumeration();
193 void BackendFixEnumerators();
194 void BackendDoomRecent();
195 void BackendDoomBetween();
196 void BackendCalculateSizeOfAllEntries();
197 void BackendCalculateSizeOfEntriesBetween(
198 bool expect_access_time_range_comparisons);
199 void BackendTransaction(const std::string& name, int num_entries, bool load);
200 void BackendRecoverInsert();
201 void BackendRecoverRemove();
202 void BackendRecoverWithEviction();
203 void BackendInvalidEntry2();
204 void BackendInvalidEntry3();
205 void BackendInvalidEntry7();
206 void BackendInvalidEntry8();
207 void BackendInvalidEntry9(bool eviction);
208 void BackendInvalidEntry10(bool eviction);
209 void BackendInvalidEntry11(bool eviction);
210 void BackendTrimInvalidEntry12();
211 void BackendDoomAll();
212 void BackendDoomAll2();
213 void BackendInvalidRankings();
214 void BackendInvalidRankings2();
215 void BackendDisable();
216 void BackendDisable2();
217 void BackendDisable3();
218 void BackendDisable4();
219 void BackendDisabledAPI();
220 void BackendEviction();
221 void BackendOpenOrCreateEntry();
222 void BackendDeadOpenNextEntry();
223 void BackendIteratorConcurrentDoom();
224 void BackendValidateMigrated();
225 };
226
CreateKeyAndCheck(disk_cache::Backend * cache,std::string key)227 void DiskCacheBackendTest::CreateKeyAndCheck(disk_cache::Backend* cache,
228 std::string key) {
229 const int kBufSize = 4 * 1024;
230 auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(kBufSize);
231 CacheTestFillBuffer(buffer->data(), kBufSize, true);
232 TestEntryResultCompletionCallback cb_entry;
233 disk_cache::EntryResult result =
234 cache->CreateEntry(key, net::HIGHEST, cb_entry.callback());
235 result = cb_entry.GetResult(std::move(result));
236 ASSERT_EQ(net::OK, result.net_error());
237 disk_cache::Entry* entry = result.ReleaseEntry();
238 EXPECT_EQ(kBufSize, WriteData(entry, 0, 0, buffer.get(), kBufSize, false));
239 entry->Close();
240 RunUntilIdle();
241 }
242
WaitForSimpleCacheIndexAndCheck(disk_cache::Backend * cache)243 void DiskCacheBackendTest::WaitForSimpleCacheIndexAndCheck(
244 disk_cache::Backend* cache) {
245 net::TestCompletionCallback wait_for_index_cb;
246 static_cast<disk_cache::SimpleBackendImpl*>(cache)->index()->ExecuteWhenReady(
247 wait_for_index_cb.callback());
248 int rv = wait_for_index_cb.WaitForResult();
249 ASSERT_THAT(rv, IsOk());
250 RunUntilIdle();
251 }
252
RunUntilIdle()253 void DiskCacheBackendTest::RunUntilIdle() {
254 DiskCacheTestWithCache::RunUntilIdle();
255 base::RunLoop().RunUntilIdle();
256 disk_cache::FlushCacheThreadForTesting();
257 }
258
GeneratePendingIO(net::TestCompletionCallback * cb)259 int DiskCacheBackendTest::GeneratePendingIO(net::TestCompletionCallback* cb) {
260 if (!use_current_thread_ && !simple_cache_mode_) {
261 ADD_FAILURE();
262 return net::ERR_FAILED;
263 }
264
265 TestEntryResultCompletionCallback create_cb;
266 EntryResult entry_result;
267 entry_result =
268 cache_->CreateEntry("some key", net::HIGHEST, create_cb.callback());
269 entry_result = create_cb.GetResult(std::move(entry_result));
270 if (entry_result.net_error() != net::OK)
271 return net::ERR_CACHE_CREATE_FAILURE;
272 disk_cache::Entry* entry = entry_result.ReleaseEntry();
273
274 const int kSize = 25000;
275 auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
276 CacheTestFillBuffer(buffer->data(), kSize, false);
277
278 int rv = net::OK;
279 for (int i = 0; i < 10 * 1024 * 1024; i += 64 * 1024) {
280 // We are using the current thread as the cache thread because we want to
281 // be able to call directly this method to make sure that the OS (instead
282 // of us switching thread) is returning IO pending.
283 if (!simple_cache_mode_) {
284 rv = static_cast<disk_cache::EntryImpl*>(entry)->WriteDataImpl(
285 0, i, buffer.get(), kSize, cb->callback(), false);
286 } else {
287 rv = entry->WriteData(0, i, buffer.get(), kSize, cb->callback(), false);
288 }
289
290 if (rv == net::ERR_IO_PENDING)
291 break;
292 if (rv != kSize)
293 rv = net::ERR_FAILED;
294 }
295
296 // Don't call Close() to avoid going through the queue or we'll deadlock
297 // waiting for the operation to finish.
298 if (!simple_cache_mode_)
299 static_cast<disk_cache::EntryImpl*>(entry)->Release();
300 else
301 entry->Close();
302
303 return rv;
304 }
305
InitSparseCache(base::Time * doomed_start,base::Time * doomed_end)306 void DiskCacheBackendTest::InitSparseCache(base::Time* doomed_start,
307 base::Time* doomed_end) {
308 InitCache();
309
310 const int kSize = 50;
311 // This must be greater than MemEntryImpl::kMaxSparseEntrySize.
312 const int kOffset = 10 + 1024 * 1024;
313
314 disk_cache::Entry* entry0 = nullptr;
315 disk_cache::Entry* entry1 = nullptr;
316 disk_cache::Entry* entry2 = nullptr;
317
318 auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
319 CacheTestFillBuffer(buffer->data(), kSize, false);
320
321 ASSERT_THAT(CreateEntry("zeroth", &entry0), IsOk());
322 ASSERT_EQ(kSize, WriteSparseData(entry0, 0, buffer.get(), kSize));
323 ASSERT_EQ(kSize,
324 WriteSparseData(entry0, kOffset + kSize, buffer.get(), kSize));
325 entry0->Close();
326
327 FlushQueueForTest();
328 AddDelay();
329 if (doomed_start)
330 *doomed_start = base::Time::Now();
331
332 // Order in rankings list:
333 // first_part1, first_part2, second_part1, second_part2
334 ASSERT_THAT(CreateEntry("first", &entry1), IsOk());
335 ASSERT_EQ(kSize, WriteSparseData(entry1, 0, buffer.get(), kSize));
336 ASSERT_EQ(kSize,
337 WriteSparseData(entry1, kOffset + kSize, buffer.get(), kSize));
338 entry1->Close();
339
340 ASSERT_THAT(CreateEntry("second", &entry2), IsOk());
341 ASSERT_EQ(kSize, WriteSparseData(entry2, 0, buffer.get(), kSize));
342 ASSERT_EQ(kSize,
343 WriteSparseData(entry2, kOffset + kSize, buffer.get(), kSize));
344 entry2->Close();
345
346 FlushQueueForTest();
347 AddDelay();
348 if (doomed_end)
349 *doomed_end = base::Time::Now();
350
351 // Order in rankings list:
352 // third_part1, fourth_part1, third_part2, fourth_part2
353 disk_cache::Entry* entry3 = nullptr;
354 disk_cache::Entry* entry4 = nullptr;
355 ASSERT_THAT(CreateEntry("third", &entry3), IsOk());
356 ASSERT_EQ(kSize, WriteSparseData(entry3, 0, buffer.get(), kSize));
357 ASSERT_THAT(CreateEntry("fourth", &entry4), IsOk());
358 ASSERT_EQ(kSize, WriteSparseData(entry4, 0, buffer.get(), kSize));
359 ASSERT_EQ(kSize,
360 WriteSparseData(entry3, kOffset + kSize, buffer.get(), kSize));
361 ASSERT_EQ(kSize,
362 WriteSparseData(entry4, kOffset + kSize, buffer.get(), kSize));
363 entry3->Close();
364 entry4->Close();
365
366 FlushQueueForTest();
367 AddDelay();
368 }
369
370 // Creates entries based on random keys. Stores these keys in |key_pool|.
CreateSetOfRandomEntries(std::set<std::string> * key_pool)371 bool DiskCacheBackendTest::CreateSetOfRandomEntries(
372 std::set<std::string>* key_pool) {
373 const int kNumEntries = 10;
374 const int initial_entry_count = cache_->GetEntryCount();
375
376 for (int i = 0; i < kNumEntries; ++i) {
377 std::string key = GenerateKey(true);
378 disk_cache::Entry* entry;
379 if (CreateEntry(key, &entry) != net::OK) {
380 return false;
381 }
382 key_pool->insert(key);
383 entry->Close();
384 }
385 return key_pool->size() ==
386 static_cast<size_t>(cache_->GetEntryCount() - initial_entry_count);
387 }
388
389 // Performs iteration over the backend and checks that the keys of entries
390 // opened are in |keys_to_match|, then erases them. Up to |max_to_open| entries
391 // will be opened, if it is positive. Otherwise, iteration will continue until
392 // OpenNextEntry stops returning net::OK.
EnumerateAndMatchKeys(int max_to_open,TestIterator * iter,std::set<std::string> * keys_to_match,size_t * count)393 bool DiskCacheBackendTest::EnumerateAndMatchKeys(
394 int max_to_open,
395 TestIterator* iter,
396 std::set<std::string>* keys_to_match,
397 size_t* count) {
398 disk_cache::Entry* entry;
399
400 if (!iter)
401 return false;
402 while (iter->OpenNextEntry(&entry) == net::OK) {
403 if (!entry)
404 return false;
405 EXPECT_EQ(1U, keys_to_match->erase(entry->GetKey()));
406 entry->Close();
407 ++(*count);
408 if (max_to_open >= 0 && static_cast<int>(*count) >= max_to_open)
409 break;
410 };
411
412 return true;
413 }
414
GetEntryMetadataSize(std::string key)415 int DiskCacheBackendTest::GetEntryMetadataSize(std::string key) {
416 // For blockfile and memory backends, it is just the key size.
417 if (!simple_cache_mode_)
418 return key.size();
419
420 // For the simple cache, we must add the file header and EOF, and that for
421 // every stream.
422 return disk_cache::kSimpleEntryStreamCount *
423 (sizeof(disk_cache::SimpleFileHeader) +
424 sizeof(disk_cache::SimpleFileEOF) + key.size());
425 }
426
GetRoundedSize(int exact_size)427 int DiskCacheBackendTest::GetRoundedSize(int exact_size) {
428 if (!simple_cache_mode_)
429 return exact_size;
430
431 return (exact_size + 255) & 0xFFFFFF00;
432 }
433
BackendBasics()434 void DiskCacheBackendTest::BackendBasics() {
435 InitCache();
436 disk_cache::Entry *entry1 = nullptr, *entry2 = nullptr;
437 EXPECT_NE(net::OK, OpenEntry("the first key", &entry1));
438 ASSERT_THAT(CreateEntry("the first key", &entry1), IsOk());
439 ASSERT_TRUE(nullptr != entry1);
440 entry1->Close();
441 entry1 = nullptr;
442
443 ASSERT_THAT(OpenEntry("the first key", &entry1), IsOk());
444 ASSERT_TRUE(nullptr != entry1);
445 entry1->Close();
446 entry1 = nullptr;
447
448 EXPECT_NE(net::OK, CreateEntry("the first key", &entry1));
449 ASSERT_THAT(OpenEntry("the first key", &entry1), IsOk());
450 EXPECT_NE(net::OK, OpenEntry("some other key", &entry2));
451 ASSERT_THAT(CreateEntry("some other key", &entry2), IsOk());
452 ASSERT_TRUE(nullptr != entry1);
453 ASSERT_TRUE(nullptr != entry2);
454 EXPECT_EQ(2, cache_->GetEntryCount());
455
456 disk_cache::Entry* entry3 = nullptr;
457 ASSERT_THAT(OpenEntry("some other key", &entry3), IsOk());
458 ASSERT_TRUE(nullptr != entry3);
459 EXPECT_TRUE(entry2 == entry3);
460
461 EXPECT_THAT(DoomEntry("some other key"), IsOk());
462 EXPECT_EQ(1, cache_->GetEntryCount());
463 entry1->Close();
464 entry2->Close();
465 entry3->Close();
466
467 EXPECT_THAT(DoomEntry("the first key"), IsOk());
468 EXPECT_EQ(0, cache_->GetEntryCount());
469
470 ASSERT_THAT(CreateEntry("the first key", &entry1), IsOk());
471 ASSERT_THAT(CreateEntry("some other key", &entry2), IsOk());
472 entry1->Doom();
473 entry1->Close();
474 EXPECT_THAT(DoomEntry("some other key"), IsOk());
475 EXPECT_EQ(0, cache_->GetEntryCount());
476 entry2->Close();
477 }
478
TEST_F(DiskCacheBackendTest,Basics)479 TEST_F(DiskCacheBackendTest, Basics) {
480 BackendBasics();
481 }
482
TEST_F(DiskCacheBackendTest,NewEvictionBasics)483 TEST_F(DiskCacheBackendTest, NewEvictionBasics) {
484 SetNewEviction();
485 BackendBasics();
486 }
487
TEST_F(DiskCacheBackendTest,MemoryOnlyBasics)488 TEST_F(DiskCacheBackendTest, MemoryOnlyBasics) {
489 SetMemoryOnlyMode();
490 BackendBasics();
491 }
492
TEST_F(DiskCacheBackendTest,AppCacheBasics)493 TEST_F(DiskCacheBackendTest, AppCacheBasics) {
494 SetCacheType(net::APP_CACHE);
495 BackendBasics();
496 }
497
TEST_F(DiskCacheBackendTest,ShaderCacheBasics)498 TEST_F(DiskCacheBackendTest, ShaderCacheBasics) {
499 SetCacheType(net::SHADER_CACHE);
500 BackendBasics();
501 }
502
BackendKeying()503 void DiskCacheBackendTest::BackendKeying() {
504 InitCache();
505 const char kName1[] = "the first key";
506 const char kName2[] = "the first Key";
507 disk_cache::Entry *entry1, *entry2;
508 ASSERT_THAT(CreateEntry(kName1, &entry1), IsOk());
509
510 ASSERT_THAT(CreateEntry(kName2, &entry2), IsOk());
511 EXPECT_TRUE(entry1 != entry2) << "Case sensitive";
512 entry2->Close();
513
514 char buffer[30];
515 base::strlcpy(buffer, kName1, std::size(buffer));
516 ASSERT_THAT(OpenEntry(buffer, &entry2), IsOk());
517 EXPECT_TRUE(entry1 == entry2);
518 entry2->Close();
519
520 base::strlcpy(buffer + 1, kName1, std::size(buffer) - 1);
521 ASSERT_THAT(OpenEntry(buffer + 1, &entry2), IsOk());
522 EXPECT_TRUE(entry1 == entry2);
523 entry2->Close();
524
525 base::strlcpy(buffer + 3, kName1, std::size(buffer) - 3);
526 ASSERT_THAT(OpenEntry(buffer + 3, &entry2), IsOk());
527 EXPECT_TRUE(entry1 == entry2);
528 entry2->Close();
529
530 // Now verify long keys.
531 char buffer2[20000];
532 memset(buffer2, 's', sizeof(buffer2));
533 buffer2[1023] = '\0';
534 ASSERT_EQ(net::OK, CreateEntry(buffer2, &entry2)) << "key on block file";
535 entry2->Close();
536
537 buffer2[1023] = 'g';
538 buffer2[19999] = '\0';
539 ASSERT_EQ(net::OK, CreateEntry(buffer2, &entry2)) << "key on external file";
540 entry2->Close();
541 entry1->Close();
542
543 // Create entries with null terminator(s), and check equality. Note we create
544 // the strings via the ctor instead of using literals because literals are
545 // implicitly C strings which will stop at the first null terminator.
546 std::string key1(4, '\0');
547 key1[1] = 's';
548 std::string key2(3, '\0');
549 key2[1] = 's';
550 ASSERT_THAT(CreateEntry(key1, &entry1), IsOk());
551 ASSERT_THAT(CreateEntry(key2, &entry2), IsOk());
552 EXPECT_TRUE(entry1 != entry2) << "Different lengths";
553 EXPECT_EQ(entry1->GetKey(), key1);
554 EXPECT_EQ(entry2->GetKey(), key2);
555 entry1->Close();
556 entry2->Close();
557 }
558
TEST_F(DiskCacheBackendTest,Keying)559 TEST_F(DiskCacheBackendTest, Keying) {
560 BackendKeying();
561 }
562
TEST_F(DiskCacheBackendTest,NewEvictionKeying)563 TEST_F(DiskCacheBackendTest, NewEvictionKeying) {
564 SetNewEviction();
565 BackendKeying();
566 }
567
TEST_F(DiskCacheBackendTest,MemoryOnlyKeying)568 TEST_F(DiskCacheBackendTest, MemoryOnlyKeying) {
569 SetMemoryOnlyMode();
570 BackendKeying();
571 }
572
TEST_F(DiskCacheBackendTest,AppCacheKeying)573 TEST_F(DiskCacheBackendTest, AppCacheKeying) {
574 SetCacheType(net::APP_CACHE);
575 BackendKeying();
576 }
577
TEST_F(DiskCacheBackendTest,ShaderCacheKeying)578 TEST_F(DiskCacheBackendTest, ShaderCacheKeying) {
579 SetCacheType(net::SHADER_CACHE);
580 BackendKeying();
581 }
582
TEST_F(DiskCacheTest,CreateBackend)583 TEST_F(DiskCacheTest, CreateBackend) {
584 TestBackendResultCompletionCallback cb;
585
586 {
587 ASSERT_TRUE(CleanupCacheDir());
588
589 // Test the private factory method(s).
590 std::unique_ptr<disk_cache::Backend> cache;
591 cache = disk_cache::MemBackendImpl::CreateBackend(0, nullptr);
592 ASSERT_TRUE(cache.get());
593 cache.reset();
594
595 // Now test the public API.
596
597 disk_cache::BackendResult rv = disk_cache::CreateCacheBackend(
598 net::DISK_CACHE, net::CACHE_BACKEND_DEFAULT,
599 /*file_operations=*/nullptr, cache_path_, 0,
600 disk_cache::ResetHandling::kNeverReset, nullptr, cb.callback());
601 rv = cb.GetResult(std::move(rv));
602 ASSERT_THAT(rv.net_error, IsOk());
603 ASSERT_TRUE(rv.backend);
604 rv.backend.reset();
605
606 rv = disk_cache::CreateCacheBackend(
607 net::MEMORY_CACHE, net::CACHE_BACKEND_DEFAULT,
608 /*file_operations=*/nullptr, base::FilePath(), 0,
609 disk_cache::ResetHandling::kNeverReset, nullptr, cb.callback());
610 rv = cb.GetResult(std::move(rv));
611 ASSERT_THAT(rv.net_error, IsOk());
612 ASSERT_TRUE(rv.backend);
613 rv.backend.reset();
614 }
615
616 base::RunLoop().RunUntilIdle();
617 }
618
TEST_F(DiskCacheTest,MemBackendPostCleanupCallback)619 TEST_F(DiskCacheTest, MemBackendPostCleanupCallback) {
620 TestBackendResultCompletionCallback cb;
621
622 net::TestClosure on_cleanup;
623
624 disk_cache::BackendResult rv = disk_cache::CreateCacheBackend(
625 net::MEMORY_CACHE, net::CACHE_BACKEND_DEFAULT,
626 /*file_operations=*/nullptr, base::FilePath(), 0,
627 disk_cache::ResetHandling::kNeverReset, nullptr, on_cleanup.closure(),
628 cb.callback());
629 rv = cb.GetResult(std::move(rv));
630 ASSERT_THAT(rv.net_error, IsOk());
631 ASSERT_TRUE(rv.backend);
632 // The callback should be posted after backend is destroyed.
633 base::RunLoop().RunUntilIdle();
634 EXPECT_FALSE(on_cleanup.have_result());
635
636 rv.backend.reset();
637
638 EXPECT_FALSE(on_cleanup.have_result());
639 base::RunLoop().RunUntilIdle();
640 EXPECT_TRUE(on_cleanup.have_result());
641 }
642
TEST_F(DiskCacheTest,CreateBackendDouble)643 TEST_F(DiskCacheTest, CreateBackendDouble) {
644 // Make sure that creation for the second backend for same path happens
645 // after the first one completes.
646 TestBackendResultCompletionCallback cb, cb2;
647
648 disk_cache::BackendResult rv = disk_cache::CreateCacheBackend(
649 net::APP_CACHE, net::CACHE_BACKEND_DEFAULT, /*file_operations=*/nullptr,
650 cache_path_, 0, disk_cache::ResetHandling::kNeverReset,
651 /*net_log=*/nullptr, cb.callback());
652
653 disk_cache::BackendResult rv2 = disk_cache::CreateCacheBackend(
654 net::APP_CACHE, net::CACHE_BACKEND_DEFAULT, /*file_operations=*/nullptr,
655 cache_path_, 0, disk_cache::ResetHandling::kNeverReset,
656 /*net_log=*/nullptr, cb2.callback());
657
658 rv = cb.GetResult(std::move(rv));
659 EXPECT_THAT(rv.net_error, IsOk());
660 EXPECT_TRUE(rv.backend);
661 disk_cache::FlushCacheThreadForTesting();
662
663 // No rv2.backend yet.
664 EXPECT_EQ(net::ERR_IO_PENDING, rv2.net_error);
665 EXPECT_FALSE(rv2.backend);
666 EXPECT_FALSE(cb2.have_result());
667
668 rv.backend.reset();
669
670 // Now rv2.backend should exist.
671 rv2 = cb2.GetResult(std::move(rv2));
672 EXPECT_THAT(rv2.net_error, IsOk());
673 EXPECT_TRUE(rv2.backend);
674 }
675
TEST_F(DiskCacheBackendTest,CreateBackendDoubleOpenEntry)676 TEST_F(DiskCacheBackendTest, CreateBackendDoubleOpenEntry) {
677 // Demonstrate the creation sequencing with an open entry. This is done
678 // with SimpleCache since the block-file cache cancels most of I/O on
679 // destruction and blocks for what it can't cancel.
680
681 // Don't try to sanity-check things as a blockfile cache
682 SetSimpleCacheMode();
683
684 // Make sure that creation for the second backend for same path happens
685 // after the first one completes, and all of its ops complete.
686 TestBackendResultCompletionCallback cb, cb2;
687
688 disk_cache::BackendResult rv = disk_cache::CreateCacheBackend(
689 net::APP_CACHE, net::CACHE_BACKEND_SIMPLE, /*file_operations=*/nullptr,
690 cache_path_, 0, disk_cache::ResetHandling::kNeverReset,
691 /*net_log=*/nullptr, cb.callback());
692
693 disk_cache::BackendResult rv2 = disk_cache::CreateCacheBackend(
694 net::APP_CACHE, net::CACHE_BACKEND_SIMPLE, /*file_operations=*/nullptr,
695 cache_path_, 0, disk_cache::ResetHandling::kNeverReset,
696 /*net_log=*/nullptr, cb2.callback());
697
698 rv = cb.GetResult(std::move(rv));
699 EXPECT_THAT(rv.net_error, IsOk());
700 ASSERT_TRUE(rv.backend);
701 disk_cache::FlushCacheThreadForTesting();
702
703 // No cache 2 yet.
704 EXPECT_EQ(net::ERR_IO_PENDING, rv2.net_error);
705 EXPECT_FALSE(rv2.backend);
706 EXPECT_FALSE(cb2.have_result());
707
708 TestEntryResultCompletionCallback cb3;
709 EntryResult entry_result =
710 rv.backend->CreateEntry("key", net::HIGHEST, cb3.callback());
711 entry_result = cb3.GetResult(std::move(entry_result));
712 ASSERT_EQ(net::OK, entry_result.net_error());
713
714 rv.backend.reset();
715
716 // Still doesn't exist.
717 EXPECT_FALSE(cb2.have_result());
718
719 entry_result.ReleaseEntry()->Close();
720
721 // Now should exist.
722 rv2 = cb2.GetResult(std::move(rv2));
723 EXPECT_THAT(rv2.net_error, IsOk());
724 EXPECT_TRUE(rv2.backend);
725 }
726
TEST_F(DiskCacheBackendTest,CreateBackendPostCleanup)727 TEST_F(DiskCacheBackendTest, CreateBackendPostCleanup) {
728 // Test for the explicit PostCleanupCallback parameter to CreateCacheBackend.
729
730 // Extravagant size payload to make reproducing races easier.
731 const int kBufSize = 256 * 1024;
732 auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(kBufSize);
733 CacheTestFillBuffer(buffer->data(), kBufSize, true);
734
735 SetSimpleCacheMode();
736 CleanupCacheDir();
737
738 base::RunLoop run_loop;
739 TestBackendResultCompletionCallback cb;
740
741 disk_cache::BackendResult rv = disk_cache::CreateCacheBackend(
742 net::APP_CACHE, net::CACHE_BACKEND_SIMPLE, /*file_operations=*/nullptr,
743 cache_path_, 0, disk_cache::ResetHandling::kNeverReset,
744 /*net_log=*/nullptr, run_loop.QuitClosure(), cb.callback());
745 rv = cb.GetResult(std::move(rv));
746 EXPECT_THAT(rv.net_error, IsOk());
747 ASSERT_TRUE(rv.backend);
748
749 TestEntryResultCompletionCallback cb2;
750 EntryResult result =
751 rv.backend->CreateEntry("key", net::HIGHEST, cb2.callback());
752 result = cb2.GetResult(std::move(result));
753 ASSERT_EQ(net::OK, result.net_error());
754 disk_cache::Entry* entry = result.ReleaseEntry();
755 EXPECT_EQ(kBufSize, WriteData(entry, 0, 0, buffer.get(), kBufSize, false));
756 entry->Close();
757
758 rv.backend.reset();
759
760 // Wait till the post-cleanup callback.
761 run_loop.Run();
762
763 // All of the payload should be on disk, despite stream 0 being written
764 // back in the async Close()
765 base::FilePath entry_path = cache_path_.AppendASCII(
766 disk_cache::simple_util::GetFilenameFromKeyAndFileIndex("key", 0));
767 int64_t size = 0;
768 EXPECT_TRUE(base::GetFileSize(entry_path, &size));
769 EXPECT_GT(size, kBufSize);
770 }
771
TEST_F(DiskCacheBackendTest,SimpleCreateBackendRecoveryAppCache)772 TEST_F(DiskCacheBackendTest, SimpleCreateBackendRecoveryAppCache) {
773 // Tests index recovery in APP_CACHE mode. (This is harder to test for
774 // DISK_CACHE since post-cleanup callbacks aren't permitted there).
775 const int kBufSize = 4 * 1024;
776 auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(kBufSize);
777 CacheTestFillBuffer(buffer->data(), kBufSize, true);
778
779 SetSimpleCacheMode();
780 SetCacheType(net::APP_CACHE);
781 DisableFirstCleanup();
782 CleanupCacheDir();
783
784 base::RunLoop run_loop;
785 TestBackendResultCompletionCallback cb;
786
787 // Create a backend with post-cleanup callback specified, in order to know
788 // when the index has been written back (so it can be deleted race-free).
789 disk_cache::BackendResult rv = disk_cache::CreateCacheBackend(
790 net::APP_CACHE, net::CACHE_BACKEND_SIMPLE, /*file_operations=*/nullptr,
791 cache_path_, 0, disk_cache::ResetHandling::kNeverReset,
792 /*net_log=*/nullptr, run_loop.QuitClosure(), cb.callback());
793 rv = cb.GetResult(std::move(rv));
794 EXPECT_THAT(rv.net_error, IsOk());
795 ASSERT_TRUE(rv.backend);
796
797 // Create an entry.
798 TestEntryResultCompletionCallback cb2;
799 disk_cache::EntryResult result =
800 rv.backend->CreateEntry("key", net::HIGHEST, cb2.callback());
801 result = cb2.GetResult(std::move(result));
802 ASSERT_EQ(net::OK, result.net_error());
803 disk_cache::Entry* entry = result.ReleaseEntry();
804 EXPECT_EQ(kBufSize, WriteData(entry, 0, 0, buffer.get(), kBufSize, false));
805 entry->Close();
806
807 rv.backend.reset();
808
809 // Wait till the post-cleanup callback.
810 run_loop.Run();
811
812 // Delete the index.
813 base::DeleteFile(
814 cache_path_.AppendASCII("index-dir").AppendASCII("the-real-index"));
815
816 // Open the cache again. The fixture will also waits for index init.
817 InitCache();
818
819 // Entry should not have a trailer size, since can't tell what it should be
820 // when doing recovery (and definitely shouldn't interpret last use time as
821 // such).
822 EXPECT_EQ(0, simple_cache_impl_->index()->GetTrailerPrefetchSize(
823 disk_cache::simple_util::GetEntryHashKey("key")));
824 }
825
826 // Tests that |BackendImpl| fails to initialize with a missing file.
TEST_F(DiskCacheBackendTest,CreateBackend_MissingFile)827 TEST_F(DiskCacheBackendTest, CreateBackend_MissingFile) {
828 ASSERT_TRUE(CopyTestCache("bad_entry"));
829 base::FilePath filename = cache_path_.AppendASCII("data_1");
830 base::DeleteFile(filename);
831 net::TestCompletionCallback cb;
832
833 // Blocking shouldn't be needed to create the cache.
834 absl::optional<base::ScopedDisallowBlocking> disallow_blocking(
835 absl::in_place);
836 std::unique_ptr<disk_cache::BackendImpl> cache(
837 std::make_unique<disk_cache::BackendImpl>(cache_path_, nullptr, nullptr,
838 net::DISK_CACHE, nullptr));
839 cache->Init(cb.callback());
840 EXPECT_THAT(cb.WaitForResult(), IsError(net::ERR_FAILED));
841 disallow_blocking.reset();
842
843 cache.reset();
844 DisableIntegrityCheck();
845 }
846
TEST_F(DiskCacheBackendTest,MemoryListensToMemoryPressure)847 TEST_F(DiskCacheBackendTest, MemoryListensToMemoryPressure) {
848 const int kLimit = 16 * 1024;
849 const int kEntrySize = 256;
850 SetMaxSize(kLimit);
851 SetMemoryOnlyMode();
852 InitCache();
853
854 // Fill in to about 80-90% full.
855 auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(kEntrySize);
856 CacheTestFillBuffer(buffer->data(), kEntrySize, false);
857
858 for (int i = 0; i < 0.9 * (kLimit / kEntrySize); ++i) {
859 disk_cache::Entry* entry = nullptr;
860 ASSERT_EQ(net::OK, CreateEntry(base::NumberToString(i), &entry));
861 EXPECT_EQ(kEntrySize,
862 WriteData(entry, 0, 0, buffer.get(), kEntrySize, true));
863 entry->Close();
864 }
865
866 EXPECT_GT(CalculateSizeOfAllEntries(), 0.8 * kLimit);
867
868 // Signal low-memory of various sorts, and see how small it gets.
869 base::MemoryPressureListener::NotifyMemoryPressure(
870 base::MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE);
871 base::RunLoop().RunUntilIdle();
872 EXPECT_LT(CalculateSizeOfAllEntries(), 0.5 * kLimit);
873
874 base::MemoryPressureListener::NotifyMemoryPressure(
875 base::MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL);
876 base::RunLoop().RunUntilIdle();
877 EXPECT_LT(CalculateSizeOfAllEntries(), 0.1 * kLimit);
878 }
879
TEST_F(DiskCacheBackendTest,ExternalFiles)880 TEST_F(DiskCacheBackendTest, ExternalFiles) {
881 InitCache();
882 // First, let's create a file on the folder.
883 base::FilePath filename = cache_path_.AppendASCII("f_000001");
884
885 const int kSize = 50;
886 auto buffer1 = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
887 CacheTestFillBuffer(buffer1->data(), kSize, false);
888 ASSERT_TRUE(base::WriteFile(
889 filename,
890 base::StringPiece(buffer1->data(), static_cast<size_t>(kSize))));
891
892 // Now let's create a file with the cache.
893 disk_cache::Entry* entry;
894 ASSERT_THAT(CreateEntry("key", &entry), IsOk());
895 ASSERT_EQ(0, WriteData(entry, 0, 20000, buffer1.get(), 0, false));
896 entry->Close();
897
898 // And verify that the first file is still there.
899 auto buffer2(base::MakeRefCounted<net::IOBufferWithSize>(kSize));
900 ASSERT_EQ(kSize, base::ReadFile(filename, buffer2->data(), kSize));
901 EXPECT_EQ(0, memcmp(buffer1->data(), buffer2->data(), kSize));
902 }
903
904 // Tests that we deal with file-level pending operations at destruction time.
BackendShutdownWithPendingFileIO(bool fast)905 void DiskCacheBackendTest::BackendShutdownWithPendingFileIO(bool fast) {
906 ASSERT_TRUE(CleanupCacheDir());
907 uint32_t flags = disk_cache::kNoBuffering;
908 if (!fast)
909 flags |= disk_cache::kNoRandom;
910
911 if (!simple_cache_mode_)
912 UseCurrentThread();
913 CreateBackend(flags);
914
915 net::TestCompletionCallback cb;
916 int rv = GeneratePendingIO(&cb);
917
918 // The cache destructor will see one pending operation here.
919 ResetCaches();
920
921 if (rv == net::ERR_IO_PENDING) {
922 if (fast || simple_cache_mode_)
923 EXPECT_FALSE(cb.have_result());
924 else
925 EXPECT_TRUE(cb.have_result());
926 }
927
928 base::RunLoop().RunUntilIdle();
929
930 #if !BUILDFLAG(IS_IOS)
931 // Wait for the actual operation to complete, or we'll keep a file handle that
932 // may cause issues later. Note that on iOS systems even though this test
933 // uses a single thread, the actual IO is posted to a worker thread and the
934 // cache destructor breaks the link to reach cb when the operation completes.
935 rv = cb.GetResult(rv);
936 #endif
937 }
938
TEST_F(DiskCacheBackendTest,ShutdownWithPendingFileIO)939 TEST_F(DiskCacheBackendTest, ShutdownWithPendingFileIO) {
940 BackendShutdownWithPendingFileIO(false);
941 }
942
943 // Here and below, tests that simulate crashes are not compiled in LeakSanitizer
944 // builds because they contain a lot of intentional memory leaks.
945 #if !defined(LEAK_SANITIZER)
946 // We'll be leaking from this test.
TEST_F(DiskCacheBackendTest,ShutdownWithPendingFileIO_Fast)947 TEST_F(DiskCacheBackendTest, ShutdownWithPendingFileIO_Fast) {
948 // The integrity test sets kNoRandom so there's a version mismatch if we don't
949 // force new eviction.
950 SetNewEviction();
951 BackendShutdownWithPendingFileIO(true);
952 }
953 #endif
954
955 // See crbug.com/330074
956 #if !BUILDFLAG(IS_IOS)
957 // Tests that one cache instance is not affected by another one going away.
TEST_F(DiskCacheBackendTest,MultipleInstancesWithPendingFileIO)958 TEST_F(DiskCacheBackendTest, MultipleInstancesWithPendingFileIO) {
959 base::ScopedTempDir store;
960 ASSERT_TRUE(store.CreateUniqueTempDir());
961
962 net::TestCompletionCallback cb;
963 TestBackendResultCompletionCallback create_cb;
964 disk_cache::BackendResult backend_rv = disk_cache::CreateCacheBackend(
965 net::DISK_CACHE, net::CACHE_BACKEND_DEFAULT, /*file_operations=*/nullptr,
966 store.GetPath(), 0, disk_cache::ResetHandling::kNeverReset,
967 /* net_log = */ nullptr, create_cb.callback());
968 backend_rv = create_cb.GetResult(std::move(backend_rv));
969 ASSERT_THAT(backend_rv.net_error, IsOk());
970 ASSERT_TRUE(backend_rv.backend);
971
972 ASSERT_TRUE(CleanupCacheDir());
973 SetNewEviction(); // Match the expected behavior for integrity verification.
974 UseCurrentThread();
975
976 CreateBackend(disk_cache::kNoBuffering);
977 int rv = GeneratePendingIO(&cb);
978
979 // cache_ has a pending operation, and backend_rv.backend will go away.
980 backend_rv.backend.reset();
981
982 if (rv == net::ERR_IO_PENDING)
983 EXPECT_FALSE(cb.have_result());
984
985 disk_cache::FlushCacheThreadForTesting();
986 base::RunLoop().RunUntilIdle();
987
988 // Wait for the actual operation to complete, or we'll keep a file handle that
989 // may cause issues later.
990 rv = cb.GetResult(rv);
991 }
992 #endif
993
994 // Tests that we deal with background-thread pending operations.
BackendShutdownWithPendingIO(bool fast)995 void DiskCacheBackendTest::BackendShutdownWithPendingIO(bool fast) {
996 TestEntryResultCompletionCallback cb;
997
998 {
999 ASSERT_TRUE(CleanupCacheDir());
1000
1001 uint32_t flags = disk_cache::kNoBuffering;
1002 if (!fast)
1003 flags |= disk_cache::kNoRandom;
1004
1005 CreateBackend(flags);
1006
1007 EntryResult result =
1008 cache_->CreateEntry("some key", net::HIGHEST, cb.callback());
1009 result = cb.GetResult(std::move(result));
1010 ASSERT_THAT(result.net_error(), IsOk());
1011
1012 result.ReleaseEntry()->Close();
1013
1014 // The cache destructor will see one pending operation here.
1015 ResetCaches();
1016 }
1017
1018 base::RunLoop().RunUntilIdle();
1019 EXPECT_FALSE(cb.have_result());
1020 }
1021
TEST_F(DiskCacheBackendTest,ShutdownWithPendingIO)1022 TEST_F(DiskCacheBackendTest, ShutdownWithPendingIO) {
1023 BackendShutdownWithPendingIO(false);
1024 }
1025
1026 #if !defined(LEAK_SANITIZER)
1027 // We'll be leaking from this test.
TEST_F(DiskCacheBackendTest,ShutdownWithPendingIO_Fast)1028 TEST_F(DiskCacheBackendTest, ShutdownWithPendingIO_Fast) {
1029 // The integrity test sets kNoRandom so there's a version mismatch if we don't
1030 // force new eviction.
1031 SetNewEviction();
1032 BackendShutdownWithPendingIO(true);
1033 }
1034 #endif
1035
1036 // Tests that we deal with create-type pending operations.
BackendShutdownWithPendingCreate(bool fast)1037 void DiskCacheBackendTest::BackendShutdownWithPendingCreate(bool fast) {
1038 TestEntryResultCompletionCallback cb;
1039
1040 {
1041 ASSERT_TRUE(CleanupCacheDir());
1042
1043 disk_cache::BackendFlags flags =
1044 fast ? disk_cache::kNone : disk_cache::kNoRandom;
1045 CreateBackend(flags);
1046
1047 EntryResult result =
1048 cache_->CreateEntry("some key", net::HIGHEST, cb.callback());
1049 ASSERT_THAT(result.net_error(), IsError(net::ERR_IO_PENDING));
1050
1051 ResetCaches();
1052 EXPECT_FALSE(cb.have_result());
1053 }
1054
1055 base::RunLoop().RunUntilIdle();
1056 EXPECT_FALSE(cb.have_result());
1057 }
1058
TEST_F(DiskCacheBackendTest,ShutdownWithPendingCreate)1059 TEST_F(DiskCacheBackendTest, ShutdownWithPendingCreate) {
1060 BackendShutdownWithPendingCreate(false);
1061 }
1062
1063 #if !defined(LEAK_SANITIZER)
1064 // We'll be leaking an entry from this test.
TEST_F(DiskCacheBackendTest,ShutdownWithPendingCreate_Fast)1065 TEST_F(DiskCacheBackendTest, ShutdownWithPendingCreate_Fast) {
1066 // The integrity test sets kNoRandom so there's a version mismatch if we don't
1067 // force new eviction.
1068 SetNewEviction();
1069 BackendShutdownWithPendingCreate(true);
1070 }
1071 #endif
1072
BackendShutdownWithPendingDoom()1073 void DiskCacheBackendTest::BackendShutdownWithPendingDoom() {
1074 net::TestCompletionCallback cb;
1075 {
1076 ASSERT_TRUE(CleanupCacheDir());
1077
1078 disk_cache::BackendFlags flags = disk_cache::kNoRandom;
1079 CreateBackend(flags);
1080
1081 TestEntryResultCompletionCallback cb2;
1082 EntryResult result =
1083 cache_->CreateEntry("some key", net::HIGHEST, cb2.callback());
1084 result = cb2.GetResult(std::move(result));
1085 ASSERT_THAT(result.net_error(), IsOk());
1086 result.ReleaseEntry()->Close();
1087
1088 int rv = cache_->DoomEntry("some key", net::HIGHEST, cb.callback());
1089 ASSERT_THAT(rv, IsError(net::ERR_IO_PENDING));
1090
1091 ResetCaches();
1092 EXPECT_FALSE(cb.have_result());
1093 }
1094
1095 base::RunLoop().RunUntilIdle();
1096 EXPECT_FALSE(cb.have_result());
1097 }
1098
TEST_F(DiskCacheBackendTest,ShutdownWithPendingDoom)1099 TEST_F(DiskCacheBackendTest, ShutdownWithPendingDoom) {
1100 BackendShutdownWithPendingDoom();
1101 }
1102
1103 // Disabled on android since this test requires cache creator to create
1104 // blockfile caches.
1105 #if !BUILDFLAG(IS_ANDROID)
TEST_F(DiskCacheTest,TruncatedIndex)1106 TEST_F(DiskCacheTest, TruncatedIndex) {
1107 ASSERT_TRUE(CleanupCacheDir());
1108 base::FilePath index = cache_path_.AppendASCII("index");
1109 ASSERT_TRUE(base::WriteFile(index, "hello"));
1110
1111 TestBackendResultCompletionCallback cb;
1112
1113 disk_cache::BackendResult rv = disk_cache::CreateCacheBackend(
1114 net::DISK_CACHE, net::CACHE_BACKEND_BLOCKFILE,
1115 /*file_operations=*/nullptr, cache_path_, 0,
1116 disk_cache::ResetHandling::kNeverReset, /*net_log=*/nullptr,
1117 cb.callback());
1118 rv = cb.GetResult(std::move(rv));
1119 ASSERT_NE(net::OK, rv.net_error);
1120 ASSERT_FALSE(rv.backend);
1121 }
1122 #endif
1123
BackendSetSize()1124 void DiskCacheBackendTest::BackendSetSize() {
1125 const int cache_size = 0x10000; // 64 kB
1126 SetMaxSize(cache_size);
1127 InitCache();
1128
1129 std::string first("some key");
1130 std::string second("something else");
1131 disk_cache::Entry* entry;
1132 ASSERT_THAT(CreateEntry(first, &entry), IsOk());
1133
1134 auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(cache_size);
1135 memset(buffer->data(), 0, cache_size);
1136 EXPECT_EQ(cache_size / 10,
1137 WriteData(entry, 0, 0, buffer.get(), cache_size / 10, false))
1138 << "normal file";
1139
1140 EXPECT_EQ(net::ERR_FAILED,
1141 WriteData(entry, 1, 0, buffer.get(), cache_size / 5, false))
1142 << "file size above the limit";
1143
1144 // By doubling the total size, we make this file cacheable.
1145 SetMaxSize(cache_size * 2);
1146 EXPECT_EQ(cache_size / 5,
1147 WriteData(entry, 1, 0, buffer.get(), cache_size / 5, false));
1148
1149 // Let's fill up the cache!.
1150 SetMaxSize(cache_size * 10);
1151 EXPECT_EQ(cache_size * 3 / 4,
1152 WriteData(entry, 0, 0, buffer.get(), cache_size * 3 / 4, false));
1153 entry->Close();
1154 FlushQueueForTest();
1155
1156 SetMaxSize(cache_size);
1157
1158 // The cache is 95% full.
1159
1160 ASSERT_THAT(CreateEntry(second, &entry), IsOk());
1161 EXPECT_EQ(cache_size / 10,
1162 WriteData(entry, 0, 0, buffer.get(), cache_size / 10, false));
1163
1164 disk_cache::Entry* entry2;
1165 ASSERT_THAT(CreateEntry("an extra key", &entry2), IsOk());
1166 EXPECT_EQ(cache_size / 10,
1167 WriteData(entry2, 0, 0, buffer.get(), cache_size / 10, false));
1168 entry2->Close(); // This will trigger the cache trim.
1169
1170 EXPECT_NE(net::OK, OpenEntry(first, &entry2));
1171
1172 FlushQueueForTest(); // Make sure that we are done trimming the cache.
1173 FlushQueueForTest(); // We may have posted two tasks to evict stuff.
1174
1175 entry->Close();
1176 ASSERT_THAT(OpenEntry(second, &entry), IsOk());
1177 EXPECT_EQ(cache_size / 10, entry->GetDataSize(0));
1178 entry->Close();
1179 }
1180
TEST_F(DiskCacheBackendTest,SetSize)1181 TEST_F(DiskCacheBackendTest, SetSize) {
1182 BackendSetSize();
1183 }
1184
TEST_F(DiskCacheBackendTest,NewEvictionSetSize)1185 TEST_F(DiskCacheBackendTest, NewEvictionSetSize) {
1186 SetNewEviction();
1187 BackendSetSize();
1188 }
1189
TEST_F(DiskCacheBackendTest,MemoryOnlySetSize)1190 TEST_F(DiskCacheBackendTest, MemoryOnlySetSize) {
1191 SetMemoryOnlyMode();
1192 BackendSetSize();
1193 }
1194
BackendLoad()1195 void DiskCacheBackendTest::BackendLoad() {
1196 InitCache();
1197 int seed = static_cast<int>(Time::Now().ToInternalValue());
1198 srand(seed);
1199
1200 disk_cache::Entry* entries[kLargeNumEntries];
1201 for (auto*& entry : entries) {
1202 std::string key = GenerateKey(true);
1203 ASSERT_THAT(CreateEntry(key, &entry), IsOk());
1204 }
1205 EXPECT_EQ(kLargeNumEntries, cache_->GetEntryCount());
1206
1207 for (int i = 0; i < kLargeNumEntries; i++) {
1208 int source1 = rand() % kLargeNumEntries;
1209 int source2 = rand() % kLargeNumEntries;
1210 disk_cache::Entry* temp = entries[source1];
1211 entries[source1] = entries[source2];
1212 entries[source2] = temp;
1213 }
1214
1215 for (auto* entry : entries) {
1216 disk_cache::Entry* new_entry;
1217 ASSERT_THAT(OpenEntry(entry->GetKey(), &new_entry), IsOk());
1218 EXPECT_TRUE(new_entry == entry);
1219 new_entry->Close();
1220 entry->Doom();
1221 entry->Close();
1222 }
1223 FlushQueueForTest();
1224 EXPECT_EQ(0, cache_->GetEntryCount());
1225 }
1226
TEST_F(DiskCacheBackendTest,Load)1227 TEST_F(DiskCacheBackendTest, Load) {
1228 // Work with a tiny index table (16 entries)
1229 SetMask(0xf);
1230 SetMaxSize(0x100000);
1231 BackendLoad();
1232 }
1233
TEST_F(DiskCacheBackendTest,NewEvictionLoad)1234 TEST_F(DiskCacheBackendTest, NewEvictionLoad) {
1235 SetNewEviction();
1236 // Work with a tiny index table (16 entries)
1237 SetMask(0xf);
1238 SetMaxSize(0x100000);
1239 BackendLoad();
1240 }
1241
TEST_F(DiskCacheBackendTest,MemoryOnlyLoad)1242 TEST_F(DiskCacheBackendTest, MemoryOnlyLoad) {
1243 SetMaxSize(0x100000);
1244 SetMemoryOnlyMode();
1245 BackendLoad();
1246 }
1247
TEST_F(DiskCacheBackendTest,AppCacheLoad)1248 TEST_F(DiskCacheBackendTest, AppCacheLoad) {
1249 SetCacheType(net::APP_CACHE);
1250 // Work with a tiny index table (16 entries)
1251 SetMask(0xf);
1252 SetMaxSize(0x100000);
1253 BackendLoad();
1254 }
1255
TEST_F(DiskCacheBackendTest,ShaderCacheLoad)1256 TEST_F(DiskCacheBackendTest, ShaderCacheLoad) {
1257 SetCacheType(net::SHADER_CACHE);
1258 // Work with a tiny index table (16 entries)
1259 SetMask(0xf);
1260 SetMaxSize(0x100000);
1261 BackendLoad();
1262 }
1263
1264 // Tests the chaining of an entry to the current head.
BackendChain()1265 void DiskCacheBackendTest::BackendChain() {
1266 SetMask(0x1); // 2-entry table.
1267 SetMaxSize(0x3000); // 12 kB.
1268 InitCache();
1269
1270 disk_cache::Entry* entry;
1271 ASSERT_THAT(CreateEntry("The first key", &entry), IsOk());
1272 entry->Close();
1273 ASSERT_THAT(CreateEntry("The Second key", &entry), IsOk());
1274 entry->Close();
1275 }
1276
TEST_F(DiskCacheBackendTest,Chain)1277 TEST_F(DiskCacheBackendTest, Chain) {
1278 BackendChain();
1279 }
1280
TEST_F(DiskCacheBackendTest,NewEvictionChain)1281 TEST_F(DiskCacheBackendTest, NewEvictionChain) {
1282 SetNewEviction();
1283 BackendChain();
1284 }
1285
TEST_F(DiskCacheBackendTest,AppCacheChain)1286 TEST_F(DiskCacheBackendTest, AppCacheChain) {
1287 SetCacheType(net::APP_CACHE);
1288 BackendChain();
1289 }
1290
TEST_F(DiskCacheBackendTest,ShaderCacheChain)1291 TEST_F(DiskCacheBackendTest, ShaderCacheChain) {
1292 SetCacheType(net::SHADER_CACHE);
1293 BackendChain();
1294 }
1295
TEST_F(DiskCacheBackendTest,NewEvictionTrim)1296 TEST_F(DiskCacheBackendTest, NewEvictionTrim) {
1297 SetNewEviction();
1298 InitCache();
1299
1300 disk_cache::Entry* entry;
1301 for (int i = 0; i < 100; i++) {
1302 std::string name(base::StringPrintf("Key %d", i));
1303 ASSERT_THAT(CreateEntry(name, &entry), IsOk());
1304 entry->Close();
1305 if (i < 90) {
1306 // Entries 0 to 89 are in list 1; 90 to 99 are in list 0.
1307 ASSERT_THAT(OpenEntry(name, &entry), IsOk());
1308 entry->Close();
1309 }
1310 }
1311
1312 // The first eviction must come from list 1 (10% limit), the second must come
1313 // from list 0.
1314 TrimForTest(false);
1315 EXPECT_NE(net::OK, OpenEntry("Key 0", &entry));
1316 TrimForTest(false);
1317 EXPECT_NE(net::OK, OpenEntry("Key 90", &entry));
1318
1319 // Double check that we still have the list tails.
1320 ASSERT_THAT(OpenEntry("Key 1", &entry), IsOk());
1321 entry->Close();
1322 ASSERT_THAT(OpenEntry("Key 91", &entry), IsOk());
1323 entry->Close();
1324 }
1325
1326 // Before looking for invalid entries, let's check a valid entry.
BackendValidEntry()1327 void DiskCacheBackendTest::BackendValidEntry() {
1328 InitCache();
1329
1330 std::string key("Some key");
1331 disk_cache::Entry* entry;
1332 ASSERT_THAT(CreateEntry(key, &entry), IsOk());
1333
1334 const int kSize = 50;
1335 auto buffer1 = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
1336 memset(buffer1->data(), 0, kSize);
1337 base::strlcpy(buffer1->data(), "And the data to save", kSize);
1338 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer1.get(), kSize, false));
1339 entry->Close();
1340 SimulateCrash();
1341
1342 ASSERT_THAT(OpenEntry(key, &entry), IsOk());
1343
1344 auto buffer2 = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
1345 memset(buffer2->data(), 0, kSize);
1346 EXPECT_EQ(kSize, ReadData(entry, 0, 0, buffer2.get(), kSize));
1347 entry->Close();
1348 EXPECT_STREQ(buffer1->data(), buffer2->data());
1349 }
1350
TEST_F(DiskCacheBackendTest,ValidEntry)1351 TEST_F(DiskCacheBackendTest, ValidEntry) {
1352 BackendValidEntry();
1353 }
1354
TEST_F(DiskCacheBackendTest,NewEvictionValidEntry)1355 TEST_F(DiskCacheBackendTest, NewEvictionValidEntry) {
1356 SetNewEviction();
1357 BackendValidEntry();
1358 }
1359
1360 // The same logic of the previous test (ValidEntry), but this time force the
1361 // entry to be invalid, simulating a crash in the middle.
1362 // We'll be leaking memory from this test.
BackendInvalidEntry()1363 void DiskCacheBackendTest::BackendInvalidEntry() {
1364 InitCache();
1365
1366 std::string key("Some key");
1367 disk_cache::Entry* entry;
1368 ASSERT_THAT(CreateEntry(key, &entry), IsOk());
1369
1370 const int kSize = 50;
1371 auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
1372 memset(buffer->data(), 0, kSize);
1373 base::strlcpy(buffer->data(), "And the data to save", kSize);
1374 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
1375 SimulateCrash();
1376
1377 EXPECT_NE(net::OK, OpenEntry(key, &entry));
1378 EXPECT_EQ(0, cache_->GetEntryCount());
1379 }
1380
1381 #if !defined(LEAK_SANITIZER)
1382 // We'll be leaking memory from this test.
TEST_F(DiskCacheBackendTest,InvalidEntry)1383 TEST_F(DiskCacheBackendTest, InvalidEntry) {
1384 BackendInvalidEntry();
1385 }
1386
1387 // We'll be leaking memory from this test.
TEST_F(DiskCacheBackendTest,NewEvictionInvalidEntry)1388 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry) {
1389 SetNewEviction();
1390 BackendInvalidEntry();
1391 }
1392
1393 // We'll be leaking memory from this test.
TEST_F(DiskCacheBackendTest,AppCacheInvalidEntry)1394 TEST_F(DiskCacheBackendTest, AppCacheInvalidEntry) {
1395 SetCacheType(net::APP_CACHE);
1396 BackendInvalidEntry();
1397 }
1398
1399 // We'll be leaking memory from this test.
TEST_F(DiskCacheBackendTest,ShaderCacheInvalidEntry)1400 TEST_F(DiskCacheBackendTest, ShaderCacheInvalidEntry) {
1401 SetCacheType(net::SHADER_CACHE);
1402 BackendInvalidEntry();
1403 }
1404
1405 // Almost the same test, but this time crash the cache after reading an entry.
1406 // We'll be leaking memory from this test.
BackendInvalidEntryRead()1407 void DiskCacheBackendTest::BackendInvalidEntryRead() {
1408 InitCache();
1409
1410 std::string key("Some key");
1411 disk_cache::Entry* entry;
1412 ASSERT_THAT(CreateEntry(key, &entry), IsOk());
1413
1414 const int kSize = 50;
1415 auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
1416 memset(buffer->data(), 0, kSize);
1417 base::strlcpy(buffer->data(), "And the data to save", kSize);
1418 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
1419 entry->Close();
1420 ASSERT_THAT(OpenEntry(key, &entry), IsOk());
1421 EXPECT_EQ(kSize, ReadData(entry, 0, 0, buffer.get(), kSize));
1422
1423 SimulateCrash();
1424
1425 if (type_ == net::APP_CACHE) {
1426 // Reading an entry and crashing should not make it dirty.
1427 ASSERT_THAT(OpenEntry(key, &entry), IsOk());
1428 EXPECT_EQ(1, cache_->GetEntryCount());
1429 entry->Close();
1430 } else {
1431 EXPECT_NE(net::OK, OpenEntry(key, &entry));
1432 EXPECT_EQ(0, cache_->GetEntryCount());
1433 }
1434 }
1435
1436 // We'll be leaking memory from this test.
TEST_F(DiskCacheBackendTest,InvalidEntryRead)1437 TEST_F(DiskCacheBackendTest, InvalidEntryRead) {
1438 BackendInvalidEntryRead();
1439 }
1440
1441 // We'll be leaking memory from this test.
TEST_F(DiskCacheBackendTest,NewEvictionInvalidEntryRead)1442 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntryRead) {
1443 SetNewEviction();
1444 BackendInvalidEntryRead();
1445 }
1446
1447 // We'll be leaking memory from this test.
TEST_F(DiskCacheBackendTest,AppCacheInvalidEntryRead)1448 TEST_F(DiskCacheBackendTest, AppCacheInvalidEntryRead) {
1449 SetCacheType(net::APP_CACHE);
1450 BackendInvalidEntryRead();
1451 }
1452
1453 // We'll be leaking memory from this test.
TEST_F(DiskCacheBackendTest,ShaderCacheInvalidEntryRead)1454 TEST_F(DiskCacheBackendTest, ShaderCacheInvalidEntryRead) {
1455 SetCacheType(net::SHADER_CACHE);
1456 BackendInvalidEntryRead();
1457 }
1458
1459 // We'll be leaking memory from this test.
BackendInvalidEntryWithLoad()1460 void DiskCacheBackendTest::BackendInvalidEntryWithLoad() {
1461 // Work with a tiny index table (16 entries)
1462 SetMask(0xf);
1463 SetMaxSize(0x100000);
1464 InitCache();
1465
1466 int seed = static_cast<int>(Time::Now().ToInternalValue());
1467 srand(seed);
1468
1469 const int kNumEntries = 100;
1470 disk_cache::Entry* entries[kNumEntries];
1471 for (auto*& entry : entries) {
1472 std::string key = GenerateKey(true);
1473 ASSERT_THAT(CreateEntry(key, &entry), IsOk());
1474 }
1475 EXPECT_EQ(kNumEntries, cache_->GetEntryCount());
1476
1477 for (int i = 0; i < kNumEntries; i++) {
1478 int source1 = rand() % kNumEntries;
1479 int source2 = rand() % kNumEntries;
1480 disk_cache::Entry* temp = entries[source1];
1481 entries[source1] = entries[source2];
1482 entries[source2] = temp;
1483 }
1484
1485 std::string keys[kNumEntries];
1486 for (int i = 0; i < kNumEntries; i++) {
1487 keys[i] = entries[i]->GetKey();
1488 if (i < kNumEntries / 2)
1489 entries[i]->Close();
1490 }
1491
1492 SimulateCrash();
1493
1494 for (int i = kNumEntries / 2; i < kNumEntries; i++) {
1495 disk_cache::Entry* entry;
1496 EXPECT_NE(net::OK, OpenEntry(keys[i], &entry));
1497 }
1498
1499 for (int i = 0; i < kNumEntries / 2; i++) {
1500 disk_cache::Entry* entry;
1501 ASSERT_THAT(OpenEntry(keys[i], &entry), IsOk());
1502 entry->Close();
1503 }
1504
1505 EXPECT_EQ(kNumEntries / 2, cache_->GetEntryCount());
1506 }
1507
1508 // We'll be leaking memory from this test.
TEST_F(DiskCacheBackendTest,InvalidEntryWithLoad)1509 TEST_F(DiskCacheBackendTest, InvalidEntryWithLoad) {
1510 BackendInvalidEntryWithLoad();
1511 }
1512
1513 // We'll be leaking memory from this test.
TEST_F(DiskCacheBackendTest,NewEvictionInvalidEntryWithLoad)1514 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntryWithLoad) {
1515 SetNewEviction();
1516 BackendInvalidEntryWithLoad();
1517 }
1518
1519 // We'll be leaking memory from this test.
TEST_F(DiskCacheBackendTest,AppCacheInvalidEntryWithLoad)1520 TEST_F(DiskCacheBackendTest, AppCacheInvalidEntryWithLoad) {
1521 SetCacheType(net::APP_CACHE);
1522 BackendInvalidEntryWithLoad();
1523 }
1524
1525 // We'll be leaking memory from this test.
TEST_F(DiskCacheBackendTest,ShaderCacheInvalidEntryWithLoad)1526 TEST_F(DiskCacheBackendTest, ShaderCacheInvalidEntryWithLoad) {
1527 SetCacheType(net::SHADER_CACHE);
1528 BackendInvalidEntryWithLoad();
1529 }
1530
1531 // We'll be leaking memory from this test.
BackendTrimInvalidEntry()1532 void DiskCacheBackendTest::BackendTrimInvalidEntry() {
1533 const int kSize = 0x3000; // 12 kB
1534 SetMaxSize(kSize * 10);
1535 InitCache();
1536
1537 std::string first("some key");
1538 std::string second("something else");
1539 disk_cache::Entry* entry;
1540 ASSERT_THAT(CreateEntry(first, &entry), IsOk());
1541
1542 auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
1543 memset(buffer->data(), 0, kSize);
1544 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
1545
1546 // Simulate a crash.
1547 SimulateCrash();
1548
1549 ASSERT_THAT(CreateEntry(second, &entry), IsOk());
1550 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
1551
1552 EXPECT_EQ(2, cache_->GetEntryCount());
1553 SetMaxSize(kSize);
1554 entry->Close(); // Trim the cache.
1555 FlushQueueForTest();
1556
1557 // If we evicted the entry in less than 20mS, we have one entry in the cache;
1558 // if it took more than that, we posted a task and we'll delete the second
1559 // entry too.
1560 base::RunLoop().RunUntilIdle();
1561
1562 // This may be not thread-safe in general, but for now it's OK so add some
1563 // ThreadSanitizer annotations to ignore data races on cache_.
1564 // See http://crbug.com/55970
1565 ANNOTATE_IGNORE_READS_BEGIN();
1566 EXPECT_GE(1, cache_->GetEntryCount());
1567 ANNOTATE_IGNORE_READS_END();
1568
1569 EXPECT_NE(net::OK, OpenEntry(first, &entry));
1570 }
1571
1572 // We'll be leaking memory from this test.
TEST_F(DiskCacheBackendTest,TrimInvalidEntry)1573 TEST_F(DiskCacheBackendTest, TrimInvalidEntry) {
1574 BackendTrimInvalidEntry();
1575 }
1576
1577 // We'll be leaking memory from this test.
TEST_F(DiskCacheBackendTest,NewEvictionTrimInvalidEntry)1578 TEST_F(DiskCacheBackendTest, NewEvictionTrimInvalidEntry) {
1579 SetNewEviction();
1580 BackendTrimInvalidEntry();
1581 }
1582
1583 // We'll be leaking memory from this test.
BackendTrimInvalidEntry2()1584 void DiskCacheBackendTest::BackendTrimInvalidEntry2() {
1585 SetMask(0xf); // 16-entry table.
1586
1587 const int kSize = 0x3000; // 12 kB
1588 SetMaxSize(kSize * 40);
1589 InitCache();
1590
1591 auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
1592 memset(buffer->data(), 0, kSize);
1593 disk_cache::Entry* entry;
1594
1595 // Writing 32 entries to this cache chains most of them.
1596 for (int i = 0; i < 32; i++) {
1597 std::string key(base::StringPrintf("some key %d", i));
1598 ASSERT_THAT(CreateEntry(key, &entry), IsOk());
1599 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
1600 entry->Close();
1601 ASSERT_THAT(OpenEntry(key, &entry), IsOk());
1602 // Note that we are not closing the entries.
1603 }
1604
1605 // Simulate a crash.
1606 SimulateCrash();
1607
1608 ASSERT_THAT(CreateEntry("Something else", &entry), IsOk());
1609 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
1610
1611 FlushQueueForTest();
1612 EXPECT_EQ(33, cache_->GetEntryCount());
1613 SetMaxSize(kSize);
1614
1615 // For the new eviction code, all corrupt entries are on the second list so
1616 // they are not going away that easy.
1617 if (new_eviction_) {
1618 EXPECT_THAT(DoomAllEntries(), IsOk());
1619 }
1620
1621 entry->Close(); // Trim the cache.
1622 FlushQueueForTest();
1623
1624 // We may abort the eviction before cleaning up everything.
1625 base::RunLoop().RunUntilIdle();
1626 FlushQueueForTest();
1627 // If it's not clear enough: we may still have eviction tasks running at this
1628 // time, so the number of entries is changing while we read it.
1629 ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN();
1630 EXPECT_GE(30, cache_->GetEntryCount());
1631 ANNOTATE_IGNORE_READS_AND_WRITES_END();
1632
1633 // For extra messiness, the integrity check for the cache can actually cause
1634 // evictions if it's over-capacity, which would race with above. So change the
1635 // size we pass to CheckCacheIntegrity (but don't mess with existing backend's
1636 // state.
1637 size_ = 0;
1638 }
1639
1640 // We'll be leaking memory from this test.
TEST_F(DiskCacheBackendTest,TrimInvalidEntry2)1641 TEST_F(DiskCacheBackendTest, TrimInvalidEntry2) {
1642 BackendTrimInvalidEntry2();
1643 }
1644
1645 // We'll be leaking memory from this test.
TEST_F(DiskCacheBackendTest,NewEvictionTrimInvalidEntry2)1646 TEST_F(DiskCacheBackendTest, NewEvictionTrimInvalidEntry2) {
1647 SetNewEviction();
1648 BackendTrimInvalidEntry2();
1649 }
1650 #endif // !defined(LEAK_SANITIZER)
1651
BackendEnumerations()1652 void DiskCacheBackendTest::BackendEnumerations() {
1653 InitCache();
1654 Time initial = Time::Now();
1655
1656 const int kNumEntries = 100;
1657 for (int i = 0; i < kNumEntries; i++) {
1658 std::string key = GenerateKey(true);
1659 disk_cache::Entry* entry;
1660 ASSERT_THAT(CreateEntry(key, &entry), IsOk());
1661 entry->Close();
1662 }
1663 EXPECT_EQ(kNumEntries, cache_->GetEntryCount());
1664 Time final = Time::Now();
1665
1666 disk_cache::Entry* entry;
1667 std::unique_ptr<TestIterator> iter = CreateIterator();
1668 int count = 0;
1669 Time last_modified[kNumEntries];
1670 Time last_used[kNumEntries];
1671 while (iter->OpenNextEntry(&entry) == net::OK) {
1672 ASSERT_TRUE(nullptr != entry);
1673 if (count < kNumEntries) {
1674 last_modified[count] = entry->GetLastModified();
1675 last_used[count] = entry->GetLastUsed();
1676 EXPECT_TRUE(initial <= last_modified[count]);
1677 EXPECT_TRUE(final >= last_modified[count]);
1678 }
1679
1680 entry->Close();
1681 count++;
1682 };
1683 EXPECT_EQ(kNumEntries, count);
1684
1685 iter = CreateIterator();
1686 count = 0;
1687 // The previous enumeration should not have changed the timestamps.
1688 while (iter->OpenNextEntry(&entry) == net::OK) {
1689 ASSERT_TRUE(nullptr != entry);
1690 if (count < kNumEntries) {
1691 EXPECT_TRUE(last_modified[count] == entry->GetLastModified());
1692 EXPECT_TRUE(last_used[count] == entry->GetLastUsed());
1693 }
1694 entry->Close();
1695 count++;
1696 };
1697 EXPECT_EQ(kNumEntries, count);
1698 }
1699
TEST_F(DiskCacheBackendTest,Enumerations)1700 TEST_F(DiskCacheBackendTest, Enumerations) {
1701 BackendEnumerations();
1702 }
1703
TEST_F(DiskCacheBackendTest,NewEvictionEnumerations)1704 TEST_F(DiskCacheBackendTest, NewEvictionEnumerations) {
1705 SetNewEviction();
1706 BackendEnumerations();
1707 }
1708
TEST_F(DiskCacheBackendTest,MemoryOnlyEnumerations)1709 TEST_F(DiskCacheBackendTest, MemoryOnlyEnumerations) {
1710 SetMemoryOnlyMode();
1711 BackendEnumerations();
1712 }
1713
TEST_F(DiskCacheBackendTest,ShaderCacheEnumerations)1714 TEST_F(DiskCacheBackendTest, ShaderCacheEnumerations) {
1715 SetCacheType(net::SHADER_CACHE);
1716 BackendEnumerations();
1717 }
1718
TEST_F(DiskCacheBackendTest,AppCacheEnumerations)1719 TEST_F(DiskCacheBackendTest, AppCacheEnumerations) {
1720 SetCacheType(net::APP_CACHE);
1721 BackendEnumerations();
1722 }
1723
1724 // Verifies enumerations while entries are open.
BackendEnumerations2()1725 void DiskCacheBackendTest::BackendEnumerations2() {
1726 InitCache();
1727 const std::string first("first");
1728 const std::string second("second");
1729 disk_cache::Entry *entry1, *entry2;
1730 ASSERT_THAT(CreateEntry(first, &entry1), IsOk());
1731 entry1->Close();
1732 ASSERT_THAT(CreateEntry(second, &entry2), IsOk());
1733 entry2->Close();
1734 FlushQueueForTest();
1735
1736 // Make sure that the timestamp is not the same.
1737 AddDelay();
1738 ASSERT_THAT(OpenEntry(second, &entry1), IsOk());
1739 std::unique_ptr<TestIterator> iter = CreateIterator();
1740 ASSERT_THAT(iter->OpenNextEntry(&entry2), IsOk());
1741 EXPECT_EQ(entry2->GetKey(), second);
1742
1743 // Two entries and the iterator pointing at "first".
1744 entry1->Close();
1745 entry2->Close();
1746
1747 // The iterator should still be valid, so we should not crash.
1748 ASSERT_THAT(iter->OpenNextEntry(&entry2), IsOk());
1749 EXPECT_EQ(entry2->GetKey(), first);
1750 entry2->Close();
1751 iter = CreateIterator();
1752
1753 // Modify the oldest entry and get the newest element.
1754 ASSERT_THAT(OpenEntry(first, &entry1), IsOk());
1755 EXPECT_EQ(0, WriteData(entry1, 0, 200, nullptr, 0, false));
1756 ASSERT_THAT(iter->OpenNextEntry(&entry2), IsOk());
1757 if (type_ == net::APP_CACHE) {
1758 // The list is not updated.
1759 EXPECT_EQ(entry2->GetKey(), second);
1760 } else {
1761 EXPECT_EQ(entry2->GetKey(), first);
1762 }
1763
1764 entry1->Close();
1765 entry2->Close();
1766 }
1767
TEST_F(DiskCacheBackendTest,Enumerations2)1768 TEST_F(DiskCacheBackendTest, Enumerations2) {
1769 BackendEnumerations2();
1770 }
1771
TEST_F(DiskCacheBackendTest,NewEvictionEnumerations2)1772 TEST_F(DiskCacheBackendTest, NewEvictionEnumerations2) {
1773 SetNewEviction();
1774 BackendEnumerations2();
1775 }
1776
TEST_F(DiskCacheBackendTest,AppCacheEnumerations2)1777 TEST_F(DiskCacheBackendTest, AppCacheEnumerations2) {
1778 SetCacheType(net::APP_CACHE);
1779 BackendEnumerations2();
1780 }
1781
TEST_F(DiskCacheBackendTest,ShaderCacheEnumerations2)1782 TEST_F(DiskCacheBackendTest, ShaderCacheEnumerations2) {
1783 SetCacheType(net::SHADER_CACHE);
1784 BackendEnumerations2();
1785 }
1786
BackendDoomMidEnumeration()1787 void DiskCacheBackendTest::BackendDoomMidEnumeration() {
1788 InitCache();
1789
1790 const int kNumEntries = 100;
1791 std::set<std::string> keys;
1792 for (int i = 0; i < kNumEntries; i++) {
1793 std::string key = GenerateKey(true);
1794 keys.insert(key);
1795 disk_cache::Entry* entry;
1796 ASSERT_THAT(CreateEntry(key, &entry), IsOk());
1797 entry->Close();
1798 }
1799
1800 disk_cache::Entry* entry;
1801 std::unique_ptr<TestIterator> iter = CreateIterator();
1802 int count = 0;
1803 while (iter->OpenNextEntry(&entry) == net::OK) {
1804 if (count == 0) {
1805 // Delete a random entry from the cache while in the midst of iteration.
1806 auto key_to_doom = keys.begin();
1807 while (*key_to_doom == entry->GetKey())
1808 key_to_doom++;
1809 ASSERT_THAT(DoomEntry(*key_to_doom), IsOk());
1810 ASSERT_EQ(1u, keys.erase(*key_to_doom));
1811 }
1812 ASSERT_NE(nullptr, entry);
1813 EXPECT_EQ(1u, keys.erase(entry->GetKey()));
1814 entry->Close();
1815 count++;
1816 };
1817
1818 EXPECT_EQ(kNumEntries - 1, cache_->GetEntryCount());
1819 EXPECT_EQ(0u, keys.size());
1820 }
1821
TEST_F(DiskCacheBackendTest,DoomEnumerations)1822 TEST_F(DiskCacheBackendTest, DoomEnumerations) {
1823 BackendDoomMidEnumeration();
1824 }
1825
TEST_F(DiskCacheBackendTest,NewEvictionDoomEnumerations)1826 TEST_F(DiskCacheBackendTest, NewEvictionDoomEnumerations) {
1827 SetNewEviction();
1828 BackendDoomMidEnumeration();
1829 }
1830
TEST_F(DiskCacheBackendTest,MemoryOnlyDoomEnumerations)1831 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomEnumerations) {
1832 SetMemoryOnlyMode();
1833 BackendDoomMidEnumeration();
1834 }
1835
TEST_F(DiskCacheBackendTest,ShaderCacheDoomEnumerations)1836 TEST_F(DiskCacheBackendTest, ShaderCacheDoomEnumerations) {
1837 SetCacheType(net::SHADER_CACHE);
1838 BackendDoomMidEnumeration();
1839 }
1840
TEST_F(DiskCacheBackendTest,AppCacheDoomEnumerations)1841 TEST_F(DiskCacheBackendTest, AppCacheDoomEnumerations) {
1842 SetCacheType(net::APP_CACHE);
1843 BackendDoomMidEnumeration();
1844 }
1845
TEST_F(DiskCacheBackendTest,SimpleDoomEnumerations)1846 TEST_F(DiskCacheBackendTest, SimpleDoomEnumerations) {
1847 SetSimpleCacheMode();
1848 BackendDoomMidEnumeration();
1849 }
1850
1851 // Verify that ReadData calls do not update the LRU cache
1852 // when using the SHADER_CACHE type.
TEST_F(DiskCacheBackendTest,ShaderCacheEnumerationReadData)1853 TEST_F(DiskCacheBackendTest, ShaderCacheEnumerationReadData) {
1854 SetCacheType(net::SHADER_CACHE);
1855 InitCache();
1856 const std::string first("first");
1857 const std::string second("second");
1858 disk_cache::Entry *entry1, *entry2;
1859 const int kSize = 50;
1860 auto buffer1 = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
1861
1862 ASSERT_THAT(CreateEntry(first, &entry1), IsOk());
1863 memset(buffer1->data(), 0, kSize);
1864 base::strlcpy(buffer1->data(), "And the data to save", kSize);
1865 EXPECT_EQ(kSize, WriteData(entry1, 0, 0, buffer1.get(), kSize, false));
1866
1867 ASSERT_THAT(CreateEntry(second, &entry2), IsOk());
1868 entry2->Close();
1869
1870 FlushQueueForTest();
1871
1872 // Make sure that the timestamp is not the same.
1873 AddDelay();
1874
1875 // Read from the last item in the LRU.
1876 EXPECT_EQ(kSize, ReadData(entry1, 0, 0, buffer1.get(), kSize));
1877 entry1->Close();
1878
1879 std::unique_ptr<TestIterator> iter = CreateIterator();
1880 ASSERT_THAT(iter->OpenNextEntry(&entry2), IsOk());
1881 EXPECT_EQ(entry2->GetKey(), second);
1882 entry2->Close();
1883 }
1884
1885 #if !defined(LEAK_SANITIZER)
1886 // Verify handling of invalid entries while doing enumerations.
1887 // We'll be leaking memory from this test.
BackendInvalidEntryEnumeration()1888 void DiskCacheBackendTest::BackendInvalidEntryEnumeration() {
1889 InitCache();
1890
1891 std::string key("Some key");
1892 disk_cache::Entry *entry, *entry1, *entry2;
1893 ASSERT_THAT(CreateEntry(key, &entry1), IsOk());
1894
1895 const int kSize = 50;
1896 auto buffer1 = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
1897 memset(buffer1->data(), 0, kSize);
1898 base::strlcpy(buffer1->data(), "And the data to save", kSize);
1899 EXPECT_EQ(kSize, WriteData(entry1, 0, 0, buffer1.get(), kSize, false));
1900 entry1->Close();
1901 ASSERT_THAT(OpenEntry(key, &entry1), IsOk());
1902 EXPECT_EQ(kSize, ReadData(entry1, 0, 0, buffer1.get(), kSize));
1903
1904 std::string key2("Another key");
1905 ASSERT_THAT(CreateEntry(key2, &entry2), IsOk());
1906 entry2->Close();
1907 ASSERT_EQ(2, cache_->GetEntryCount());
1908
1909 SimulateCrash();
1910
1911 std::unique_ptr<TestIterator> iter = CreateIterator();
1912 int count = 0;
1913 while (iter->OpenNextEntry(&entry) == net::OK) {
1914 ASSERT_TRUE(nullptr != entry);
1915 EXPECT_EQ(key2, entry->GetKey());
1916 entry->Close();
1917 count++;
1918 };
1919 EXPECT_EQ(1, count);
1920 EXPECT_EQ(1, cache_->GetEntryCount());
1921 }
1922
1923 // We'll be leaking memory from this test.
TEST_F(DiskCacheBackendTest,InvalidEntryEnumeration)1924 TEST_F(DiskCacheBackendTest, InvalidEntryEnumeration) {
1925 BackendInvalidEntryEnumeration();
1926 }
1927
1928 // We'll be leaking memory from this test.
TEST_F(DiskCacheBackendTest,NewEvictionInvalidEntryEnumeration)1929 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntryEnumeration) {
1930 SetNewEviction();
1931 BackendInvalidEntryEnumeration();
1932 }
1933 #endif // !defined(LEAK_SANITIZER)
1934
1935 // Tests that if for some reason entries are modified close to existing cache
1936 // iterators, we don't generate fatal errors or reset the cache.
BackendFixEnumerators()1937 void DiskCacheBackendTest::BackendFixEnumerators() {
1938 InitCache();
1939
1940 int seed = static_cast<int>(Time::Now().ToInternalValue());
1941 srand(seed);
1942
1943 const int kNumEntries = 10;
1944 for (int i = 0; i < kNumEntries; i++) {
1945 std::string key = GenerateKey(true);
1946 disk_cache::Entry* entry;
1947 ASSERT_THAT(CreateEntry(key, &entry), IsOk());
1948 entry->Close();
1949 }
1950 EXPECT_EQ(kNumEntries, cache_->GetEntryCount());
1951
1952 disk_cache::Entry *entry1, *entry2;
1953 std::unique_ptr<TestIterator> iter1 = CreateIterator(),
1954 iter2 = CreateIterator();
1955 ASSERT_THAT(iter1->OpenNextEntry(&entry1), IsOk());
1956 ASSERT_TRUE(nullptr != entry1);
1957 entry1->Close();
1958 entry1 = nullptr;
1959
1960 // Let's go to the middle of the list.
1961 for (int i = 0; i < kNumEntries / 2; i++) {
1962 if (entry1)
1963 entry1->Close();
1964 ASSERT_THAT(iter1->OpenNextEntry(&entry1), IsOk());
1965 ASSERT_TRUE(nullptr != entry1);
1966
1967 ASSERT_THAT(iter2->OpenNextEntry(&entry2), IsOk());
1968 ASSERT_TRUE(nullptr != entry2);
1969 entry2->Close();
1970 }
1971
1972 // Messing up with entry1 will modify entry2->next.
1973 entry1->Doom();
1974 ASSERT_THAT(iter2->OpenNextEntry(&entry2), IsOk());
1975 ASSERT_TRUE(nullptr != entry2);
1976
1977 // The link entry2->entry1 should be broken.
1978 EXPECT_NE(entry2->GetKey(), entry1->GetKey());
1979 entry1->Close();
1980 entry2->Close();
1981
1982 // And the second iterator should keep working.
1983 ASSERT_THAT(iter2->OpenNextEntry(&entry2), IsOk());
1984 ASSERT_TRUE(nullptr != entry2);
1985 entry2->Close();
1986 }
1987
TEST_F(DiskCacheBackendTest,FixEnumerators)1988 TEST_F(DiskCacheBackendTest, FixEnumerators) {
1989 BackendFixEnumerators();
1990 }
1991
TEST_F(DiskCacheBackendTest,NewEvictionFixEnumerators)1992 TEST_F(DiskCacheBackendTest, NewEvictionFixEnumerators) {
1993 SetNewEviction();
1994 BackendFixEnumerators();
1995 }
1996
BackendDoomRecent()1997 void DiskCacheBackendTest::BackendDoomRecent() {
1998 InitCache();
1999
2000 disk_cache::Entry* entry;
2001 ASSERT_THAT(CreateEntry("first", &entry), IsOk());
2002 entry->Close();
2003 ASSERT_THAT(CreateEntry("second", &entry), IsOk());
2004 entry->Close();
2005 FlushQueueForTest();
2006
2007 AddDelay();
2008 Time middle = Time::Now();
2009
2010 ASSERT_THAT(CreateEntry("third", &entry), IsOk());
2011 entry->Close();
2012 ASSERT_THAT(CreateEntry("fourth", &entry), IsOk());
2013 entry->Close();
2014 FlushQueueForTest();
2015
2016 AddDelay();
2017 Time final = Time::Now();
2018
2019 ASSERT_EQ(4, cache_->GetEntryCount());
2020 EXPECT_THAT(DoomEntriesSince(final), IsOk());
2021 ASSERT_EQ(4, cache_->GetEntryCount());
2022
2023 EXPECT_THAT(DoomEntriesSince(middle), IsOk());
2024 ASSERT_EQ(2, cache_->GetEntryCount());
2025
2026 ASSERT_THAT(OpenEntry("second", &entry), IsOk());
2027 entry->Close();
2028 }
2029
TEST_F(DiskCacheBackendTest,DoomRecent)2030 TEST_F(DiskCacheBackendTest, DoomRecent) {
2031 BackendDoomRecent();
2032 }
2033
TEST_F(DiskCacheBackendTest,NewEvictionDoomRecent)2034 TEST_F(DiskCacheBackendTest, NewEvictionDoomRecent) {
2035 SetNewEviction();
2036 BackendDoomRecent();
2037 }
2038
TEST_F(DiskCacheBackendTest,MemoryOnlyDoomRecent)2039 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomRecent) {
2040 SetMemoryOnlyMode();
2041 BackendDoomRecent();
2042 }
2043
TEST_F(DiskCacheBackendTest,MemoryOnlyDoomEntriesSinceSparse)2044 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomEntriesSinceSparse) {
2045 SetMemoryOnlyMode();
2046 base::Time start;
2047 InitSparseCache(&start, nullptr);
2048 DoomEntriesSince(start);
2049 EXPECT_EQ(1, cache_->GetEntryCount());
2050 }
2051
TEST_F(DiskCacheBackendTest,DoomEntriesSinceSparse)2052 TEST_F(DiskCacheBackendTest, DoomEntriesSinceSparse) {
2053 base::Time start;
2054 InitSparseCache(&start, nullptr);
2055 DoomEntriesSince(start);
2056 // NOTE: BackendImpl counts child entries in its GetEntryCount(), while
2057 // MemBackendImpl does not. Thats why expected value differs here from
2058 // MemoryOnlyDoomEntriesSinceSparse.
2059 EXPECT_EQ(3, cache_->GetEntryCount());
2060 }
2061
TEST_F(DiskCacheBackendTest,MemoryOnlyDoomAllSparse)2062 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomAllSparse) {
2063 SetMemoryOnlyMode();
2064 InitSparseCache(nullptr, nullptr);
2065 EXPECT_THAT(DoomAllEntries(), IsOk());
2066 EXPECT_EQ(0, cache_->GetEntryCount());
2067 }
2068
TEST_F(DiskCacheBackendTest,DoomAllSparse)2069 TEST_F(DiskCacheBackendTest, DoomAllSparse) {
2070 InitSparseCache(nullptr, nullptr);
2071 EXPECT_THAT(DoomAllEntries(), IsOk());
2072 EXPECT_EQ(0, cache_->GetEntryCount());
2073 }
2074
2075 // This test is for https://crbug.com/827492.
TEST_F(DiskCacheBackendTest,InMemorySparseEvict)2076 TEST_F(DiskCacheBackendTest, InMemorySparseEvict) {
2077 const int kMaxSize = 512;
2078
2079 SetMaxSize(kMaxSize);
2080 SetMemoryOnlyMode();
2081 InitCache();
2082
2083 auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(64);
2084 CacheTestFillBuffer(buffer->data(), 64, false /* no_nulls */);
2085
2086 std::vector<disk_cache::ScopedEntryPtr> entries;
2087
2088 disk_cache::Entry* entry = nullptr;
2089 // Create a bunch of entries
2090 for (size_t i = 0; i < 14; i++) {
2091 std::string name = "http://www." + base::NumberToString(i) + ".com/";
2092 ASSERT_THAT(CreateEntry(name, &entry), IsOk());
2093 entries.push_back(disk_cache::ScopedEntryPtr(entry));
2094 }
2095
2096 // Create several sparse entries and fill with enough data to
2097 // pass eviction threshold
2098 ASSERT_EQ(64, WriteSparseData(entries[0].get(), 0, buffer.get(), 64));
2099 ASSERT_EQ(net::ERR_FAILED,
2100 WriteSparseData(entries[0].get(), 10000, buffer.get(), 4));
2101 ASSERT_EQ(63, WriteSparseData(entries[1].get(), 0, buffer.get(), 63));
2102 ASSERT_EQ(64, WriteSparseData(entries[2].get(), 0, buffer.get(), 64));
2103 ASSERT_EQ(64, WriteSparseData(entries[3].get(), 0, buffer.get(), 64));
2104
2105 // Close all the entries, leaving a populated LRU list
2106 // with all entries having refcount 0 (doom implies deletion)
2107 entries.clear();
2108
2109 // Create a new entry, triggering buggy eviction
2110 ASSERT_THAT(CreateEntry("http://www.14.com/", &entry), IsOk());
2111 entry->Close();
2112 }
2113
BackendDoomBetween()2114 void DiskCacheBackendTest::BackendDoomBetween() {
2115 InitCache();
2116
2117 disk_cache::Entry* entry;
2118 ASSERT_THAT(CreateEntry("first", &entry), IsOk());
2119 entry->Close();
2120 FlushQueueForTest();
2121
2122 AddDelay();
2123 Time middle_start = Time::Now();
2124
2125 ASSERT_THAT(CreateEntry("second", &entry), IsOk());
2126 entry->Close();
2127 ASSERT_THAT(CreateEntry("third", &entry), IsOk());
2128 entry->Close();
2129 FlushQueueForTest();
2130
2131 AddDelay();
2132 Time middle_end = Time::Now();
2133
2134 ASSERT_THAT(CreateEntry("fourth", &entry), IsOk());
2135 entry->Close();
2136 ASSERT_THAT(OpenEntry("fourth", &entry), IsOk());
2137 entry->Close();
2138 FlushQueueForTest();
2139
2140 AddDelay();
2141 Time final = Time::Now();
2142
2143 ASSERT_EQ(4, cache_->GetEntryCount());
2144 EXPECT_THAT(DoomEntriesBetween(middle_start, middle_end), IsOk());
2145 ASSERT_EQ(2, cache_->GetEntryCount());
2146
2147 ASSERT_THAT(OpenEntry("fourth", &entry), IsOk());
2148 entry->Close();
2149
2150 EXPECT_THAT(DoomEntriesBetween(middle_start, final), IsOk());
2151 ASSERT_EQ(1, cache_->GetEntryCount());
2152
2153 ASSERT_THAT(OpenEntry("first", &entry), IsOk());
2154 entry->Close();
2155 }
2156
TEST_F(DiskCacheBackendTest,DoomBetween)2157 TEST_F(DiskCacheBackendTest, DoomBetween) {
2158 BackendDoomBetween();
2159 }
2160
TEST_F(DiskCacheBackendTest,NewEvictionDoomBetween)2161 TEST_F(DiskCacheBackendTest, NewEvictionDoomBetween) {
2162 SetNewEviction();
2163 BackendDoomBetween();
2164 }
2165
TEST_F(DiskCacheBackendTest,MemoryOnlyDoomBetween)2166 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomBetween) {
2167 SetMemoryOnlyMode();
2168 BackendDoomBetween();
2169 }
2170
TEST_F(DiskCacheBackendTest,MemoryOnlyDoomEntriesBetweenSparse)2171 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomEntriesBetweenSparse) {
2172 SetMemoryOnlyMode();
2173 base::Time start, end;
2174 InitSparseCache(&start, &end);
2175 DoomEntriesBetween(start, end);
2176 EXPECT_EQ(3, cache_->GetEntryCount());
2177
2178 start = end;
2179 end = base::Time::Now();
2180 DoomEntriesBetween(start, end);
2181 EXPECT_EQ(1, cache_->GetEntryCount());
2182 }
2183
TEST_F(DiskCacheBackendTest,DoomEntriesBetweenSparse)2184 TEST_F(DiskCacheBackendTest, DoomEntriesBetweenSparse) {
2185 base::Time start, end;
2186 InitSparseCache(&start, &end);
2187 DoomEntriesBetween(start, end);
2188 EXPECT_EQ(9, cache_->GetEntryCount());
2189
2190 start = end;
2191 end = base::Time::Now();
2192 DoomEntriesBetween(start, end);
2193 EXPECT_EQ(3, cache_->GetEntryCount());
2194 }
2195
BackendCalculateSizeOfAllEntries()2196 void DiskCacheBackendTest::BackendCalculateSizeOfAllEntries() {
2197 InitCache();
2198
2199 // The cache is initially empty.
2200 EXPECT_EQ(0, CalculateSizeOfAllEntries());
2201
2202 // Generate random entries and populate them with data of respective
2203 // sizes 0, 1, ..., count - 1 bytes.
2204 std::set<std::string> key_pool;
2205 CreateSetOfRandomEntries(&key_pool);
2206
2207 int count = 0;
2208 int total_size = 0;
2209 for (std::string key : key_pool) {
2210 std::string data(count, ' ');
2211 scoped_refptr<net::StringIOBuffer> buffer =
2212 base::MakeRefCounted<net::StringIOBuffer>(data);
2213
2214 // Alternate between writing to first two streams to test that we do not
2215 // take only one stream into account.
2216 disk_cache::Entry* entry;
2217 ASSERT_THAT(OpenEntry(key, &entry), IsOk());
2218 ASSERT_EQ(count, WriteData(entry, count % 2, 0, buffer.get(), count, true));
2219 entry->Close();
2220
2221 total_size += GetRoundedSize(count + GetEntryMetadataSize(key));
2222 ++count;
2223 }
2224
2225 int result = CalculateSizeOfAllEntries();
2226 EXPECT_EQ(total_size, result);
2227
2228 // Add another entry and test if the size is updated. Then remove it and test
2229 // if the size is back to original value.
2230 {
2231 const int last_entry_size = 47;
2232 std::string data(last_entry_size, ' ');
2233 scoped_refptr<net::StringIOBuffer> buffer =
2234 base::MakeRefCounted<net::StringIOBuffer>(data);
2235
2236 disk_cache::Entry* entry;
2237 std::string key = GenerateKey(true);
2238 ASSERT_THAT(CreateEntry(key, &entry), IsOk());
2239 ASSERT_EQ(last_entry_size,
2240 WriteData(entry, 0, 0, buffer.get(), last_entry_size, true));
2241 entry->Close();
2242
2243 int new_result = CalculateSizeOfAllEntries();
2244 EXPECT_EQ(
2245 result + GetRoundedSize(last_entry_size + GetEntryMetadataSize(key)),
2246 new_result);
2247
2248 DoomEntry(key);
2249 new_result = CalculateSizeOfAllEntries();
2250 EXPECT_EQ(result, new_result);
2251 }
2252
2253 // After dooming the entries, the size should be back to zero.
2254 ASSERT_THAT(DoomAllEntries(), IsOk());
2255 EXPECT_EQ(0, CalculateSizeOfAllEntries());
2256 }
2257
TEST_F(DiskCacheBackendTest,CalculateSizeOfAllEntries)2258 TEST_F(DiskCacheBackendTest, CalculateSizeOfAllEntries) {
2259 BackendCalculateSizeOfAllEntries();
2260 }
2261
TEST_F(DiskCacheBackendTest,MemoryOnlyCalculateSizeOfAllEntries)2262 TEST_F(DiskCacheBackendTest, MemoryOnlyCalculateSizeOfAllEntries) {
2263 SetMemoryOnlyMode();
2264 BackendCalculateSizeOfAllEntries();
2265 }
2266
TEST_F(DiskCacheBackendTest,SimpleCacheCalculateSizeOfAllEntries)2267 TEST_F(DiskCacheBackendTest, SimpleCacheCalculateSizeOfAllEntries) {
2268 // Use net::APP_CACHE to make size estimations deterministic via
2269 // non-optimistic writes.
2270 SetCacheType(net::APP_CACHE);
2271 SetSimpleCacheMode();
2272 BackendCalculateSizeOfAllEntries();
2273 }
2274
BackendCalculateSizeOfEntriesBetween(bool expect_access_time_comparisons)2275 void DiskCacheBackendTest::BackendCalculateSizeOfEntriesBetween(
2276 bool expect_access_time_comparisons) {
2277 InitCache();
2278
2279 EXPECT_EQ(0, CalculateSizeOfEntriesBetween(base::Time(), base::Time::Max()));
2280
2281 Time start = Time::Now();
2282
2283 disk_cache::Entry* entry;
2284 ASSERT_THAT(CreateEntry("first", &entry), IsOk());
2285 entry->Close();
2286 FlushQueueForTest();
2287 base::RunLoop().RunUntilIdle();
2288
2289 AddDelay();
2290 Time middle = Time::Now();
2291 AddDelay();
2292
2293 ASSERT_THAT(CreateEntry("second", &entry), IsOk());
2294 entry->Close();
2295 ASSERT_THAT(CreateEntry("third_entry", &entry), IsOk());
2296 entry->Close();
2297 FlushQueueForTest();
2298 base::RunLoop().RunUntilIdle();
2299
2300 AddDelay();
2301 Time end = Time::Now();
2302
2303 int size_1 = GetRoundedSize(GetEntryMetadataSize("first"));
2304 int size_2 = GetRoundedSize(GetEntryMetadataSize("second"));
2305 int size_3 = GetRoundedSize(GetEntryMetadataSize("third_entry"));
2306
2307 ASSERT_EQ(3, cache_->GetEntryCount());
2308 ASSERT_EQ(CalculateSizeOfAllEntries(),
2309 CalculateSizeOfEntriesBetween(base::Time(), base::Time::Max()));
2310
2311 if (expect_access_time_comparisons) {
2312 int start_end = CalculateSizeOfEntriesBetween(start, end);
2313 ASSERT_EQ(CalculateSizeOfAllEntries(), start_end);
2314 ASSERT_EQ(size_1 + size_2 + size_3, start_end);
2315
2316 ASSERT_EQ(size_1, CalculateSizeOfEntriesBetween(start, middle));
2317 ASSERT_EQ(size_2 + size_3, CalculateSizeOfEntriesBetween(middle, end));
2318 }
2319
2320 // After dooming the entries, the size should be back to zero.
2321 ASSERT_THAT(DoomAllEntries(), IsOk());
2322 EXPECT_EQ(0, CalculateSizeOfEntriesBetween(base::Time(), base::Time::Max()));
2323 }
2324
TEST_F(DiskCacheBackendTest,CalculateSizeOfEntriesBetween)2325 TEST_F(DiskCacheBackendTest, CalculateSizeOfEntriesBetween) {
2326 InitCache();
2327 ASSERT_EQ(net::ERR_NOT_IMPLEMENTED,
2328 CalculateSizeOfEntriesBetween(base::Time(), base::Time::Max()));
2329 }
2330
TEST_F(DiskCacheBackendTest,MemoryOnlyCalculateSizeOfEntriesBetween)2331 TEST_F(DiskCacheBackendTest, MemoryOnlyCalculateSizeOfEntriesBetween) {
2332 SetMemoryOnlyMode();
2333 BackendCalculateSizeOfEntriesBetween(true);
2334 }
2335
TEST_F(DiskCacheBackendTest,SimpleCacheCalculateSizeOfEntriesBetween)2336 TEST_F(DiskCacheBackendTest, SimpleCacheCalculateSizeOfEntriesBetween) {
2337 // Test normal mode in where access time range comparisons are supported.
2338 SetSimpleCacheMode();
2339 BackendCalculateSizeOfEntriesBetween(true);
2340 }
2341
TEST_F(DiskCacheBackendTest,SimpleCacheAppCacheCalculateSizeOfEntriesBetween)2342 TEST_F(DiskCacheBackendTest, SimpleCacheAppCacheCalculateSizeOfEntriesBetween) {
2343 // Test SimpleCache in APP_CACHE mode separately since it does not support
2344 // access time range comparisons.
2345 SetCacheType(net::APP_CACHE);
2346 SetSimpleCacheMode();
2347 BackendCalculateSizeOfEntriesBetween(false);
2348 }
2349
BackendTransaction(const std::string & name,int num_entries,bool load)2350 void DiskCacheBackendTest::BackendTransaction(const std::string& name,
2351 int num_entries,
2352 bool load) {
2353 success_ = false;
2354 ASSERT_TRUE(CopyTestCache(name));
2355 DisableFirstCleanup();
2356
2357 uint32_t mask;
2358 if (load) {
2359 mask = 0xf;
2360 SetMaxSize(0x100000);
2361 } else {
2362 // Clear the settings from the previous run.
2363 mask = 0;
2364 SetMaxSize(0);
2365 }
2366 SetMask(mask);
2367
2368 InitCache();
2369 ASSERT_EQ(num_entries + 1, cache_->GetEntryCount());
2370
2371 std::string key("the first key");
2372 disk_cache::Entry* entry1;
2373 ASSERT_NE(net::OK, OpenEntry(key, &entry1));
2374
2375 int actual = cache_->GetEntryCount();
2376 if (num_entries != actual) {
2377 ASSERT_TRUE(load);
2378 // If there is a heavy load, inserting an entry will make another entry
2379 // dirty (on the hash bucket) so two entries are removed.
2380 ASSERT_EQ(num_entries - 1, actual);
2381 }
2382
2383 ResetCaches();
2384
2385 ASSERT_TRUE(CheckCacheIntegrity(cache_path_, new_eviction_, MaxSize(), mask));
2386 success_ = true;
2387 }
2388
BackendRecoverInsert()2389 void DiskCacheBackendTest::BackendRecoverInsert() {
2390 // Tests with an empty cache.
2391 BackendTransaction("insert_empty1", 0, false);
2392 ASSERT_TRUE(success_) << "insert_empty1";
2393 BackendTransaction("insert_empty2", 0, false);
2394 ASSERT_TRUE(success_) << "insert_empty2";
2395 BackendTransaction("insert_empty3", 0, false);
2396 ASSERT_TRUE(success_) << "insert_empty3";
2397
2398 // Tests with one entry on the cache.
2399 BackendTransaction("insert_one1", 1, false);
2400 ASSERT_TRUE(success_) << "insert_one1";
2401 BackendTransaction("insert_one2", 1, false);
2402 ASSERT_TRUE(success_) << "insert_one2";
2403 BackendTransaction("insert_one3", 1, false);
2404 ASSERT_TRUE(success_) << "insert_one3";
2405
2406 // Tests with one hundred entries on the cache, tiny index.
2407 BackendTransaction("insert_load1", 100, true);
2408 ASSERT_TRUE(success_) << "insert_load1";
2409 BackendTransaction("insert_load2", 100, true);
2410 ASSERT_TRUE(success_) << "insert_load2";
2411 }
2412
TEST_F(DiskCacheBackendTest,RecoverInsert)2413 TEST_F(DiskCacheBackendTest, RecoverInsert) {
2414 BackendRecoverInsert();
2415 }
2416
TEST_F(DiskCacheBackendTest,NewEvictionRecoverInsert)2417 TEST_F(DiskCacheBackendTest, NewEvictionRecoverInsert) {
2418 SetNewEviction();
2419 BackendRecoverInsert();
2420 }
2421
BackendRecoverRemove()2422 void DiskCacheBackendTest::BackendRecoverRemove() {
2423 // Removing the only element.
2424 BackendTransaction("remove_one1", 0, false);
2425 ASSERT_TRUE(success_) << "remove_one1";
2426 BackendTransaction("remove_one2", 0, false);
2427 ASSERT_TRUE(success_) << "remove_one2";
2428 BackendTransaction("remove_one3", 0, false);
2429 ASSERT_TRUE(success_) << "remove_one3";
2430
2431 // Removing the head.
2432 BackendTransaction("remove_head1", 1, false);
2433 ASSERT_TRUE(success_) << "remove_head1";
2434 BackendTransaction("remove_head2", 1, false);
2435 ASSERT_TRUE(success_) << "remove_head2";
2436 BackendTransaction("remove_head3", 1, false);
2437 ASSERT_TRUE(success_) << "remove_head3";
2438
2439 // Removing the tail.
2440 BackendTransaction("remove_tail1", 1, false);
2441 ASSERT_TRUE(success_) << "remove_tail1";
2442 BackendTransaction("remove_tail2", 1, false);
2443 ASSERT_TRUE(success_) << "remove_tail2";
2444 BackendTransaction("remove_tail3", 1, false);
2445 ASSERT_TRUE(success_) << "remove_tail3";
2446
2447 // Removing with one hundred entries on the cache, tiny index.
2448 BackendTransaction("remove_load1", 100, true);
2449 ASSERT_TRUE(success_) << "remove_load1";
2450 BackendTransaction("remove_load2", 100, true);
2451 ASSERT_TRUE(success_) << "remove_load2";
2452 BackendTransaction("remove_load3", 100, true);
2453 ASSERT_TRUE(success_) << "remove_load3";
2454
2455 // This case cannot be reverted.
2456 BackendTransaction("remove_one4", 0, false);
2457 ASSERT_TRUE(success_) << "remove_one4";
2458 BackendTransaction("remove_head4", 1, false);
2459 ASSERT_TRUE(success_) << "remove_head4";
2460 }
2461
2462 #if BUILDFLAG(IS_WIN)
2463 // http://crbug.com/396392
2464 #define MAYBE_RecoverRemove DISABLED_RecoverRemove
2465 #else
2466 #define MAYBE_RecoverRemove RecoverRemove
2467 #endif
TEST_F(DiskCacheBackendTest,MAYBE_RecoverRemove)2468 TEST_F(DiskCacheBackendTest, MAYBE_RecoverRemove) {
2469 BackendRecoverRemove();
2470 }
2471
2472 #if BUILDFLAG(IS_WIN)
2473 // http://crbug.com/396392
2474 #define MAYBE_NewEvictionRecoverRemove DISABLED_NewEvictionRecoverRemove
2475 #else
2476 #define MAYBE_NewEvictionRecoverRemove NewEvictionRecoverRemove
2477 #endif
TEST_F(DiskCacheBackendTest,MAYBE_NewEvictionRecoverRemove)2478 TEST_F(DiskCacheBackendTest, MAYBE_NewEvictionRecoverRemove) {
2479 SetNewEviction();
2480 BackendRecoverRemove();
2481 }
2482
BackendRecoverWithEviction()2483 void DiskCacheBackendTest::BackendRecoverWithEviction() {
2484 success_ = false;
2485 ASSERT_TRUE(CopyTestCache("insert_load1"));
2486 DisableFirstCleanup();
2487
2488 SetMask(0xf);
2489 SetMaxSize(0x1000);
2490
2491 // We should not crash here.
2492 InitCache();
2493 DisableIntegrityCheck();
2494 }
2495
TEST_F(DiskCacheBackendTest,RecoverWithEviction)2496 TEST_F(DiskCacheBackendTest, RecoverWithEviction) {
2497 BackendRecoverWithEviction();
2498 }
2499
TEST_F(DiskCacheBackendTest,NewEvictionRecoverWithEviction)2500 TEST_F(DiskCacheBackendTest, NewEvictionRecoverWithEviction) {
2501 SetNewEviction();
2502 BackendRecoverWithEviction();
2503 }
2504
2505 // Tests that the |BackendImpl| fails to start with the wrong cache version.
TEST_F(DiskCacheTest,WrongVersion)2506 TEST_F(DiskCacheTest, WrongVersion) {
2507 ASSERT_TRUE(CopyTestCache("wrong_version"));
2508 net::TestCompletionCallback cb;
2509
2510 std::unique_ptr<disk_cache::BackendImpl> cache(
2511 std::make_unique<disk_cache::BackendImpl>(cache_path_, nullptr, nullptr,
2512 net::DISK_CACHE, nullptr));
2513 cache->Init(cb.callback());
2514 ASSERT_THAT(cb.WaitForResult(), IsError(net::ERR_FAILED));
2515 }
2516
2517 // Tests that the disk cache successfully joins the control group, dropping the
2518 // existing cache in favour of a new empty cache.
2519 // Disabled on android since this test requires cache creator to create
2520 // blockfile caches.
2521 #if !BUILDFLAG(IS_ANDROID)
TEST_F(DiskCacheTest,SimpleCacheControlJoin)2522 TEST_F(DiskCacheTest, SimpleCacheControlJoin) {
2523 std::unique_ptr<disk_cache::BackendImpl> cache =
2524 CreateExistingEntryCache(cache_path_);
2525 ASSERT_TRUE(cache.get());
2526 cache.reset();
2527
2528 // Instantiate the SimpleCacheTrial, forcing this run into the
2529 // ExperimentControl group.
2530 base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial",
2531 "ExperimentControl");
2532 TestBackendResultCompletionCallback cb;
2533 disk_cache::BackendResult rv = disk_cache::CreateCacheBackend(
2534 net::DISK_CACHE, net::CACHE_BACKEND_BLOCKFILE,
2535 /*file_operations=*/nullptr, cache_path_, 0,
2536 disk_cache::ResetHandling::kResetOnError, /*net_log=*/nullptr,
2537 cb.callback());
2538 rv = cb.GetResult(std::move(rv));
2539 ASSERT_THAT(rv.net_error, IsOk());
2540 EXPECT_EQ(0, rv.backend->GetEntryCount());
2541 }
2542 #endif
2543
2544 // Tests that the disk cache can restart in the control group preserving
2545 // existing entries.
TEST_F(DiskCacheTest,SimpleCacheControlRestart)2546 TEST_F(DiskCacheTest, SimpleCacheControlRestart) {
2547 // Instantiate the SimpleCacheTrial, forcing this run into the
2548 // ExperimentControl group.
2549 base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial",
2550 "ExperimentControl");
2551
2552 std::unique_ptr<disk_cache::BackendImpl> cache =
2553 CreateExistingEntryCache(cache_path_);
2554 ASSERT_TRUE(cache.get());
2555
2556 net::TestCompletionCallback cb;
2557
2558 const int kRestartCount = 5;
2559 for (int i = 0; i < kRestartCount; ++i) {
2560 cache = std::make_unique<disk_cache::BackendImpl>(
2561 cache_path_, nullptr, nullptr, net::DISK_CACHE, nullptr);
2562 cache->Init(cb.callback());
2563 ASSERT_THAT(cb.WaitForResult(), IsOk());
2564 EXPECT_EQ(1, cache->GetEntryCount());
2565
2566 TestEntryResultCompletionCallback cb2;
2567 EntryResult result =
2568 cache->OpenEntry(kExistingEntryKey, net::HIGHEST, cb2.callback());
2569 result = cb2.GetResult(std::move(result));
2570 result.ReleaseEntry()->Close();
2571 }
2572 }
2573
2574 // Tests that the disk cache can leave the control group preserving existing
2575 // entries.
TEST_F(DiskCacheTest,SimpleCacheControlLeave)2576 TEST_F(DiskCacheTest, SimpleCacheControlLeave) {
2577 {
2578 // Instantiate the SimpleCacheTrial, forcing this run into the
2579 // ExperimentControl group.
2580 base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial",
2581 "ExperimentControl");
2582
2583 std::unique_ptr<disk_cache::BackendImpl> cache =
2584 CreateExistingEntryCache(cache_path_);
2585 ASSERT_TRUE(cache.get());
2586 }
2587
2588 // Instantiate the SimpleCacheTrial, forcing this run into the
2589 // ExperimentNo group.
2590 base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial", "ExperimentNo");
2591 net::TestCompletionCallback cb;
2592
2593 const int kRestartCount = 5;
2594 for (int i = 0; i < kRestartCount; ++i) {
2595 std::unique_ptr<disk_cache::BackendImpl> cache(
2596 std::make_unique<disk_cache::BackendImpl>(cache_path_, nullptr, nullptr,
2597 net::DISK_CACHE, nullptr));
2598 cache->Init(cb.callback());
2599 ASSERT_THAT(cb.WaitForResult(), IsOk());
2600 EXPECT_EQ(1, cache->GetEntryCount());
2601
2602 TestEntryResultCompletionCallback cb2;
2603 EntryResult result =
2604 cache->OpenEntry(kExistingEntryKey, net::HIGHEST, cb2.callback());
2605 result = cb2.GetResult(std::move(result));
2606 ASSERT_THAT(result.net_error(), IsOk());
2607 result.ReleaseEntry()->Close();
2608 }
2609 }
2610
2611 // Tests that the cache is properly restarted on recovery error.
2612 // Disabled on android since this test requires cache creator to create
2613 // blockfile caches.
2614 #if !BUILDFLAG(IS_ANDROID)
TEST_F(DiskCacheBackendTest,DeleteOld)2615 TEST_F(DiskCacheBackendTest, DeleteOld) {
2616 ASSERT_TRUE(CopyTestCache("wrong_version"));
2617 SetNewEviction();
2618
2619 TestBackendResultCompletionCallback cb;
2620 {
2621 base::ScopedDisallowBlocking disallow_blocking;
2622 base::FilePath path(cache_path_);
2623 disk_cache::BackendResult rv = disk_cache::CreateCacheBackend(
2624 net::DISK_CACHE, net::CACHE_BACKEND_BLOCKFILE,
2625 /*file_operations=*/nullptr, path, 0,
2626 disk_cache::ResetHandling::kResetOnError, /*net_log=*/nullptr,
2627 cb.callback());
2628 path.clear(); // Make sure path was captured by the previous call.
2629 rv = cb.GetResult(std::move(rv));
2630 ASSERT_THAT(rv.net_error, IsOk());
2631 }
2632 EXPECT_TRUE(CheckCacheIntegrity(cache_path_, new_eviction_, /*max_size = */ 0,
2633 mask_));
2634 }
2635 #endif
2636
2637 // We want to be able to deal with messed up entries on disk.
BackendInvalidEntry2()2638 void DiskCacheBackendTest::BackendInvalidEntry2() {
2639 ASSERT_TRUE(CopyTestCache("bad_entry"));
2640 DisableFirstCleanup();
2641 InitCache();
2642
2643 disk_cache::Entry *entry1, *entry2;
2644 ASSERT_THAT(OpenEntry("the first key", &entry1), IsOk());
2645 EXPECT_NE(net::OK, OpenEntry("some other key", &entry2));
2646 entry1->Close();
2647
2648 // CheckCacheIntegrity will fail at this point.
2649 DisableIntegrityCheck();
2650 }
2651
TEST_F(DiskCacheBackendTest,InvalidEntry2)2652 TEST_F(DiskCacheBackendTest, InvalidEntry2) {
2653 BackendInvalidEntry2();
2654 }
2655
TEST_F(DiskCacheBackendTest,NewEvictionInvalidEntry2)2656 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry2) {
2657 SetNewEviction();
2658 BackendInvalidEntry2();
2659 }
2660
2661 // Tests that we don't crash or hang when enumerating this cache.
BackendInvalidEntry3()2662 void DiskCacheBackendTest::BackendInvalidEntry3() {
2663 SetMask(0x1); // 2-entry table.
2664 SetMaxSize(0x3000); // 12 kB.
2665 DisableFirstCleanup();
2666 InitCache();
2667
2668 disk_cache::Entry* entry;
2669 std::unique_ptr<TestIterator> iter = CreateIterator();
2670 while (iter->OpenNextEntry(&entry) == net::OK) {
2671 entry->Close();
2672 }
2673 }
2674
TEST_F(DiskCacheBackendTest,InvalidEntry3)2675 TEST_F(DiskCacheBackendTest, InvalidEntry3) {
2676 ASSERT_TRUE(CopyTestCache("dirty_entry3"));
2677 BackendInvalidEntry3();
2678 }
2679
TEST_F(DiskCacheBackendTest,NewEvictionInvalidEntry3)2680 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry3) {
2681 ASSERT_TRUE(CopyTestCache("dirty_entry4"));
2682 SetNewEviction();
2683 BackendInvalidEntry3();
2684 DisableIntegrityCheck();
2685 }
2686
2687 // Test that we handle a dirty entry on the LRU list, already replaced with
2688 // the same key, and with hash collisions.
TEST_F(DiskCacheBackendTest,InvalidEntry4)2689 TEST_F(DiskCacheBackendTest, InvalidEntry4) {
2690 ASSERT_TRUE(CopyTestCache("dirty_entry3"));
2691 SetMask(0x1); // 2-entry table.
2692 SetMaxSize(0x3000); // 12 kB.
2693 DisableFirstCleanup();
2694 InitCache();
2695
2696 TrimForTest(false);
2697 }
2698
2699 // Test that we handle a dirty entry on the deleted list, already replaced with
2700 // the same key, and with hash collisions.
TEST_F(DiskCacheBackendTest,InvalidEntry5)2701 TEST_F(DiskCacheBackendTest, InvalidEntry5) {
2702 ASSERT_TRUE(CopyTestCache("dirty_entry4"));
2703 SetNewEviction();
2704 SetMask(0x1); // 2-entry table.
2705 SetMaxSize(0x3000); // 12 kB.
2706 DisableFirstCleanup();
2707 InitCache();
2708
2709 TrimDeletedListForTest(false);
2710 }
2711
TEST_F(DiskCacheBackendTest,InvalidEntry6)2712 TEST_F(DiskCacheBackendTest, InvalidEntry6) {
2713 ASSERT_TRUE(CopyTestCache("dirty_entry5"));
2714 SetMask(0x1); // 2-entry table.
2715 SetMaxSize(0x3000); // 12 kB.
2716 DisableFirstCleanup();
2717 InitCache();
2718
2719 // There is a dirty entry (but marked as clean) at the end, pointing to a
2720 // deleted entry through the hash collision list. We should not re-insert the
2721 // deleted entry into the index table.
2722
2723 TrimForTest(false);
2724 // The cache should be clean (as detected by CheckCacheIntegrity).
2725 }
2726
2727 // Tests that we don't hang when there is a loop on the hash collision list.
2728 // The test cache could be a result of bug 69135.
TEST_F(DiskCacheBackendTest,BadNextEntry1)2729 TEST_F(DiskCacheBackendTest, BadNextEntry1) {
2730 ASSERT_TRUE(CopyTestCache("list_loop2"));
2731 SetMask(0x1); // 2-entry table.
2732 SetMaxSize(0x3000); // 12 kB.
2733 DisableFirstCleanup();
2734 InitCache();
2735
2736 // The second entry points at itselft, and the first entry is not accessible
2737 // though the index, but it is at the head of the LRU.
2738
2739 disk_cache::Entry* entry;
2740 ASSERT_THAT(CreateEntry("The first key", &entry), IsOk());
2741 entry->Close();
2742
2743 TrimForTest(false);
2744 TrimForTest(false);
2745 ASSERT_THAT(OpenEntry("The first key", &entry), IsOk());
2746 entry->Close();
2747 EXPECT_EQ(1, cache_->GetEntryCount());
2748 }
2749
2750 // Tests that we don't hang when there is a loop on the hash collision list.
2751 // The test cache could be a result of bug 69135.
TEST_F(DiskCacheBackendTest,BadNextEntry2)2752 TEST_F(DiskCacheBackendTest, BadNextEntry2) {
2753 ASSERT_TRUE(CopyTestCache("list_loop3"));
2754 SetMask(0x1); // 2-entry table.
2755 SetMaxSize(0x3000); // 12 kB.
2756 DisableFirstCleanup();
2757 InitCache();
2758
2759 // There is a wide loop of 5 entries.
2760
2761 disk_cache::Entry* entry;
2762 ASSERT_NE(net::OK, OpenEntry("Not present key", &entry));
2763 }
2764
TEST_F(DiskCacheBackendTest,NewEvictionInvalidEntry6)2765 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry6) {
2766 ASSERT_TRUE(CopyTestCache("bad_rankings3"));
2767 DisableFirstCleanup();
2768 SetNewEviction();
2769 InitCache();
2770
2771 // The second entry is dirty, but removing it should not corrupt the list.
2772 disk_cache::Entry* entry;
2773 ASSERT_NE(net::OK, OpenEntry("the second key", &entry));
2774 ASSERT_THAT(OpenEntry("the first key", &entry), IsOk());
2775
2776 // This should not delete the cache.
2777 entry->Doom();
2778 FlushQueueForTest();
2779 entry->Close();
2780
2781 ASSERT_THAT(OpenEntry("some other key", &entry), IsOk());
2782 entry->Close();
2783 }
2784
2785 // Tests handling of corrupt entries by keeping the rankings node around, with
2786 // a fatal failure.
BackendInvalidEntry7()2787 void DiskCacheBackendTest::BackendInvalidEntry7() {
2788 const int kSize = 0x3000; // 12 kB.
2789 SetMaxSize(kSize * 10);
2790 InitCache();
2791
2792 std::string first("some key");
2793 std::string second("something else");
2794 disk_cache::Entry* entry;
2795 ASSERT_THAT(CreateEntry(first, &entry), IsOk());
2796 entry->Close();
2797 ASSERT_THAT(CreateEntry(second, &entry), IsOk());
2798
2799 // Corrupt this entry.
2800 disk_cache::EntryImpl* entry_impl =
2801 static_cast<disk_cache::EntryImpl*>(entry);
2802
2803 entry_impl->rankings()->Data()->next = 0;
2804 entry_impl->rankings()->Store();
2805 entry->Close();
2806 FlushQueueForTest();
2807 EXPECT_EQ(2, cache_->GetEntryCount());
2808
2809 // This should detect the bad entry.
2810 EXPECT_NE(net::OK, OpenEntry(second, &entry));
2811 EXPECT_EQ(1, cache_->GetEntryCount());
2812
2813 // We should delete the cache. The list still has a corrupt node.
2814 std::unique_ptr<TestIterator> iter = CreateIterator();
2815 EXPECT_NE(net::OK, iter->OpenNextEntry(&entry));
2816 FlushQueueForTest();
2817 EXPECT_EQ(0, cache_->GetEntryCount());
2818 }
2819
TEST_F(DiskCacheBackendTest,InvalidEntry7)2820 TEST_F(DiskCacheBackendTest, InvalidEntry7) {
2821 BackendInvalidEntry7();
2822 }
2823
TEST_F(DiskCacheBackendTest,NewEvictionInvalidEntry7)2824 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry7) {
2825 SetNewEviction();
2826 BackendInvalidEntry7();
2827 }
2828
2829 // Tests handling of corrupt entries by keeping the rankings node around, with
2830 // a non fatal failure.
BackendInvalidEntry8()2831 void DiskCacheBackendTest::BackendInvalidEntry8() {
2832 const int kSize = 0x3000; // 12 kB
2833 SetMaxSize(kSize * 10);
2834 InitCache();
2835
2836 std::string first("some key");
2837 std::string second("something else");
2838 disk_cache::Entry* entry;
2839 ASSERT_THAT(CreateEntry(first, &entry), IsOk());
2840 entry->Close();
2841 ASSERT_THAT(CreateEntry(second, &entry), IsOk());
2842
2843 // Corrupt this entry.
2844 disk_cache::EntryImpl* entry_impl =
2845 static_cast<disk_cache::EntryImpl*>(entry);
2846
2847 entry_impl->rankings()->Data()->contents = 0;
2848 entry_impl->rankings()->Store();
2849 entry->Close();
2850 FlushQueueForTest();
2851 EXPECT_EQ(2, cache_->GetEntryCount());
2852
2853 // This should detect the bad entry.
2854 EXPECT_NE(net::OK, OpenEntry(second, &entry));
2855 EXPECT_EQ(1, cache_->GetEntryCount());
2856
2857 // We should not delete the cache.
2858 std::unique_ptr<TestIterator> iter = CreateIterator();
2859 ASSERT_THAT(iter->OpenNextEntry(&entry), IsOk());
2860 entry->Close();
2861 EXPECT_NE(net::OK, iter->OpenNextEntry(&entry));
2862 EXPECT_EQ(1, cache_->GetEntryCount());
2863 }
2864
TEST_F(DiskCacheBackendTest,InvalidEntry8)2865 TEST_F(DiskCacheBackendTest, InvalidEntry8) {
2866 BackendInvalidEntry8();
2867 }
2868
TEST_F(DiskCacheBackendTest,NewEvictionInvalidEntry8)2869 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry8) {
2870 SetNewEviction();
2871 BackendInvalidEntry8();
2872 }
2873
2874 // Tests handling of corrupt entries detected by enumerations. Note that these
2875 // tests (xx9 to xx11) are basically just going though slightly different
2876 // codepaths so they are tighlty coupled with the code, but that is better than
2877 // not testing error handling code.
BackendInvalidEntry9(bool eviction)2878 void DiskCacheBackendTest::BackendInvalidEntry9(bool eviction) {
2879 const int kSize = 0x3000; // 12 kB.
2880 SetMaxSize(kSize * 10);
2881 InitCache();
2882
2883 std::string first("some key");
2884 std::string second("something else");
2885 disk_cache::Entry* entry;
2886 ASSERT_THAT(CreateEntry(first, &entry), IsOk());
2887 entry->Close();
2888 ASSERT_THAT(CreateEntry(second, &entry), IsOk());
2889
2890 // Corrupt this entry.
2891 disk_cache::EntryImpl* entry_impl =
2892 static_cast<disk_cache::EntryImpl*>(entry);
2893
2894 entry_impl->entry()->Data()->state = 0xbad;
2895 entry_impl->entry()->Store();
2896 entry->Close();
2897 FlushQueueForTest();
2898 EXPECT_EQ(2, cache_->GetEntryCount());
2899
2900 if (eviction) {
2901 TrimForTest(false);
2902 EXPECT_EQ(1, cache_->GetEntryCount());
2903 TrimForTest(false);
2904 EXPECT_EQ(1, cache_->GetEntryCount());
2905 } else {
2906 // We should detect the problem through the list, but we should not delete
2907 // the entry, just fail the iteration.
2908 std::unique_ptr<TestIterator> iter = CreateIterator();
2909 EXPECT_NE(net::OK, iter->OpenNextEntry(&entry));
2910
2911 // Now a full iteration will work, and return one entry.
2912 ASSERT_THAT(iter->OpenNextEntry(&entry), IsOk());
2913 entry->Close();
2914 EXPECT_NE(net::OK, iter->OpenNextEntry(&entry));
2915
2916 // This should detect what's left of the bad entry.
2917 EXPECT_NE(net::OK, OpenEntry(second, &entry));
2918 EXPECT_EQ(2, cache_->GetEntryCount());
2919 }
2920 DisableIntegrityCheck();
2921 }
2922
TEST_F(DiskCacheBackendTest,InvalidEntry9)2923 TEST_F(DiskCacheBackendTest, InvalidEntry9) {
2924 BackendInvalidEntry9(false);
2925 }
2926
TEST_F(DiskCacheBackendTest,NewEvictionInvalidEntry9)2927 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry9) {
2928 SetNewEviction();
2929 BackendInvalidEntry9(false);
2930 }
2931
TEST_F(DiskCacheBackendTest,TrimInvalidEntry9)2932 TEST_F(DiskCacheBackendTest, TrimInvalidEntry9) {
2933 BackendInvalidEntry9(true);
2934 }
2935
TEST_F(DiskCacheBackendTest,NewEvictionTrimInvalidEntry9)2936 TEST_F(DiskCacheBackendTest, NewEvictionTrimInvalidEntry9) {
2937 SetNewEviction();
2938 BackendInvalidEntry9(true);
2939 }
2940
2941 // Tests handling of corrupt entries detected by enumerations.
BackendInvalidEntry10(bool eviction)2942 void DiskCacheBackendTest::BackendInvalidEntry10(bool eviction) {
2943 const int kSize = 0x3000; // 12 kB.
2944 SetMaxSize(kSize * 10);
2945 SetNewEviction();
2946 InitCache();
2947
2948 std::string first("some key");
2949 std::string second("something else");
2950 disk_cache::Entry* entry;
2951 ASSERT_THAT(CreateEntry(first, &entry), IsOk());
2952 entry->Close();
2953 ASSERT_THAT(OpenEntry(first, &entry), IsOk());
2954 EXPECT_EQ(0, WriteData(entry, 0, 200, nullptr, 0, false));
2955 entry->Close();
2956 ASSERT_THAT(CreateEntry(second, &entry), IsOk());
2957
2958 // Corrupt this entry.
2959 disk_cache::EntryImpl* entry_impl =
2960 static_cast<disk_cache::EntryImpl*>(entry);
2961
2962 entry_impl->entry()->Data()->state = 0xbad;
2963 entry_impl->entry()->Store();
2964 entry->Close();
2965 ASSERT_THAT(CreateEntry("third", &entry), IsOk());
2966 entry->Close();
2967 EXPECT_EQ(3, cache_->GetEntryCount());
2968
2969 // We have:
2970 // List 0: third -> second (bad).
2971 // List 1: first.
2972
2973 if (eviction) {
2974 // Detection order: second -> first -> third.
2975 TrimForTest(false);
2976 EXPECT_EQ(3, cache_->GetEntryCount());
2977 TrimForTest(false);
2978 EXPECT_EQ(2, cache_->GetEntryCount());
2979 TrimForTest(false);
2980 EXPECT_EQ(1, cache_->GetEntryCount());
2981 } else {
2982 // Detection order: third -> second -> first.
2983 // We should detect the problem through the list, but we should not delete
2984 // the entry.
2985 std::unique_ptr<TestIterator> iter = CreateIterator();
2986 ASSERT_THAT(iter->OpenNextEntry(&entry), IsOk());
2987 entry->Close();
2988 ASSERT_THAT(iter->OpenNextEntry(&entry), IsOk());
2989 EXPECT_EQ(first, entry->GetKey());
2990 entry->Close();
2991 EXPECT_NE(net::OK, iter->OpenNextEntry(&entry));
2992 }
2993 DisableIntegrityCheck();
2994 }
2995
TEST_F(DiskCacheBackendTest,InvalidEntry10)2996 TEST_F(DiskCacheBackendTest, InvalidEntry10) {
2997 BackendInvalidEntry10(false);
2998 }
2999
TEST_F(DiskCacheBackendTest,TrimInvalidEntry10)3000 TEST_F(DiskCacheBackendTest, TrimInvalidEntry10) {
3001 BackendInvalidEntry10(true);
3002 }
3003
3004 // Tests handling of corrupt entries detected by enumerations.
BackendInvalidEntry11(bool eviction)3005 void DiskCacheBackendTest::BackendInvalidEntry11(bool eviction) {
3006 const int kSize = 0x3000; // 12 kB.
3007 SetMaxSize(kSize * 10);
3008 SetNewEviction();
3009 InitCache();
3010
3011 std::string first("some key");
3012 std::string second("something else");
3013 disk_cache::Entry* entry;
3014 ASSERT_THAT(CreateEntry(first, &entry), IsOk());
3015 entry->Close();
3016 ASSERT_THAT(OpenEntry(first, &entry), IsOk());
3017 EXPECT_EQ(0, WriteData(entry, 0, 200, nullptr, 0, false));
3018 entry->Close();
3019 ASSERT_THAT(CreateEntry(second, &entry), IsOk());
3020 entry->Close();
3021 ASSERT_THAT(OpenEntry(second, &entry), IsOk());
3022 EXPECT_EQ(0, WriteData(entry, 0, 200, nullptr, 0, false));
3023
3024 // Corrupt this entry.
3025 disk_cache::EntryImpl* entry_impl =
3026 static_cast<disk_cache::EntryImpl*>(entry);
3027
3028 entry_impl->entry()->Data()->state = 0xbad;
3029 entry_impl->entry()->Store();
3030 entry->Close();
3031 ASSERT_THAT(CreateEntry("third", &entry), IsOk());
3032 entry->Close();
3033 FlushQueueForTest();
3034 EXPECT_EQ(3, cache_->GetEntryCount());
3035
3036 // We have:
3037 // List 0: third.
3038 // List 1: second (bad) -> first.
3039
3040 if (eviction) {
3041 // Detection order: third -> first -> second.
3042 TrimForTest(false);
3043 EXPECT_EQ(2, cache_->GetEntryCount());
3044 TrimForTest(false);
3045 EXPECT_EQ(1, cache_->GetEntryCount());
3046 TrimForTest(false);
3047 EXPECT_EQ(1, cache_->GetEntryCount());
3048 } else {
3049 // Detection order: third -> second.
3050 // We should detect the problem through the list, but we should not delete
3051 // the entry, just fail the iteration.
3052 std::unique_ptr<TestIterator> iter = CreateIterator();
3053 ASSERT_THAT(iter->OpenNextEntry(&entry), IsOk());
3054 entry->Close();
3055 EXPECT_NE(net::OK, iter->OpenNextEntry(&entry));
3056
3057 // Now a full iteration will work, and return two entries.
3058 ASSERT_THAT(iter->OpenNextEntry(&entry), IsOk());
3059 entry->Close();
3060 ASSERT_THAT(iter->OpenNextEntry(&entry), IsOk());
3061 entry->Close();
3062 EXPECT_NE(net::OK, iter->OpenNextEntry(&entry));
3063 }
3064 DisableIntegrityCheck();
3065 }
3066
TEST_F(DiskCacheBackendTest,InvalidEntry11)3067 TEST_F(DiskCacheBackendTest, InvalidEntry11) {
3068 BackendInvalidEntry11(false);
3069 }
3070
TEST_F(DiskCacheBackendTest,TrimInvalidEntry11)3071 TEST_F(DiskCacheBackendTest, TrimInvalidEntry11) {
3072 BackendInvalidEntry11(true);
3073 }
3074
3075 // Tests handling of corrupt entries in the middle of a long eviction run.
BackendTrimInvalidEntry12()3076 void DiskCacheBackendTest::BackendTrimInvalidEntry12() {
3077 const int kSize = 0x3000; // 12 kB
3078 SetMaxSize(kSize * 10);
3079 InitCache();
3080
3081 std::string first("some key");
3082 std::string second("something else");
3083 disk_cache::Entry* entry;
3084 ASSERT_THAT(CreateEntry(first, &entry), IsOk());
3085 entry->Close();
3086 ASSERT_THAT(CreateEntry(second, &entry), IsOk());
3087
3088 // Corrupt this entry.
3089 disk_cache::EntryImpl* entry_impl =
3090 static_cast<disk_cache::EntryImpl*>(entry);
3091
3092 entry_impl->entry()->Data()->state = 0xbad;
3093 entry_impl->entry()->Store();
3094 entry->Close();
3095 ASSERT_THAT(CreateEntry("third", &entry), IsOk());
3096 entry->Close();
3097 ASSERT_THAT(CreateEntry("fourth", &entry), IsOk());
3098 TrimForTest(true);
3099 EXPECT_EQ(1, cache_->GetEntryCount());
3100 entry->Close();
3101 DisableIntegrityCheck();
3102 }
3103
TEST_F(DiskCacheBackendTest,TrimInvalidEntry12)3104 TEST_F(DiskCacheBackendTest, TrimInvalidEntry12) {
3105 BackendTrimInvalidEntry12();
3106 }
3107
TEST_F(DiskCacheBackendTest,NewEvictionTrimInvalidEntry12)3108 TEST_F(DiskCacheBackendTest, NewEvictionTrimInvalidEntry12) {
3109 SetNewEviction();
3110 BackendTrimInvalidEntry12();
3111 }
3112
3113 // We want to be able to deal with messed up entries on disk.
BackendInvalidRankings2()3114 void DiskCacheBackendTest::BackendInvalidRankings2() {
3115 ASSERT_TRUE(CopyTestCache("bad_rankings"));
3116 DisableFirstCleanup();
3117 InitCache();
3118
3119 disk_cache::Entry *entry1, *entry2;
3120 EXPECT_NE(net::OK, OpenEntry("the first key", &entry1));
3121 ASSERT_THAT(OpenEntry("some other key", &entry2), IsOk());
3122 entry2->Close();
3123
3124 // CheckCacheIntegrity will fail at this point.
3125 DisableIntegrityCheck();
3126 }
3127
TEST_F(DiskCacheBackendTest,InvalidRankings2)3128 TEST_F(DiskCacheBackendTest, InvalidRankings2) {
3129 BackendInvalidRankings2();
3130 }
3131
TEST_F(DiskCacheBackendTest,NewEvictionInvalidRankings2)3132 TEST_F(DiskCacheBackendTest, NewEvictionInvalidRankings2) {
3133 SetNewEviction();
3134 BackendInvalidRankings2();
3135 }
3136
3137 // If the LRU is corrupt, we delete the cache.
BackendInvalidRankings()3138 void DiskCacheBackendTest::BackendInvalidRankings() {
3139 disk_cache::Entry* entry;
3140 std::unique_ptr<TestIterator> iter = CreateIterator();
3141 ASSERT_THAT(iter->OpenNextEntry(&entry), IsOk());
3142 entry->Close();
3143 EXPECT_EQ(2, cache_->GetEntryCount());
3144
3145 EXPECT_NE(net::OK, iter->OpenNextEntry(&entry));
3146 FlushQueueForTest(); // Allow the restart to finish.
3147 EXPECT_EQ(0, cache_->GetEntryCount());
3148 }
3149
TEST_F(DiskCacheBackendTest,InvalidRankingsSuccess)3150 TEST_F(DiskCacheBackendTest, InvalidRankingsSuccess) {
3151 ASSERT_TRUE(CopyTestCache("bad_rankings"));
3152 DisableFirstCleanup();
3153 InitCache();
3154 BackendInvalidRankings();
3155 }
3156
TEST_F(DiskCacheBackendTest,NewEvictionInvalidRankingsSuccess)3157 TEST_F(DiskCacheBackendTest, NewEvictionInvalidRankingsSuccess) {
3158 ASSERT_TRUE(CopyTestCache("bad_rankings"));
3159 DisableFirstCleanup();
3160 SetNewEviction();
3161 InitCache();
3162 BackendInvalidRankings();
3163 }
3164
TEST_F(DiskCacheBackendTest,InvalidRankingsFailure)3165 TEST_F(DiskCacheBackendTest, InvalidRankingsFailure) {
3166 ASSERT_TRUE(CopyTestCache("bad_rankings"));
3167 DisableFirstCleanup();
3168 InitCache();
3169 SetTestMode(); // Fail cache reinitialization.
3170 BackendInvalidRankings();
3171 }
3172
TEST_F(DiskCacheBackendTest,NewEvictionInvalidRankingsFailure)3173 TEST_F(DiskCacheBackendTest, NewEvictionInvalidRankingsFailure) {
3174 ASSERT_TRUE(CopyTestCache("bad_rankings"));
3175 DisableFirstCleanup();
3176 SetNewEviction();
3177 InitCache();
3178 SetTestMode(); // Fail cache reinitialization.
3179 BackendInvalidRankings();
3180 }
3181
3182 // If the LRU is corrupt and we have open entries, we disable the cache.
BackendDisable()3183 void DiskCacheBackendTest::BackendDisable() {
3184 disk_cache::Entry *entry1, *entry2;
3185 std::unique_ptr<TestIterator> iter = CreateIterator();
3186 ASSERT_THAT(iter->OpenNextEntry(&entry1), IsOk());
3187
3188 EXPECT_NE(net::OK, iter->OpenNextEntry(&entry2));
3189 EXPECT_EQ(0, cache_->GetEntryCount());
3190 EXPECT_NE(net::OK, CreateEntry("Something new", &entry2));
3191
3192 entry1->Close();
3193 FlushQueueForTest(); // Flushing the Close posts a task to restart the cache.
3194 FlushQueueForTest(); // This one actually allows that task to complete.
3195
3196 EXPECT_EQ(0, cache_->GetEntryCount());
3197 }
3198
TEST_F(DiskCacheBackendTest,DisableSuccess)3199 TEST_F(DiskCacheBackendTest, DisableSuccess) {
3200 ASSERT_TRUE(CopyTestCache("bad_rankings"));
3201 DisableFirstCleanup();
3202 InitCache();
3203 BackendDisable();
3204 }
3205
TEST_F(DiskCacheBackendTest,NewEvictionDisableSuccess)3206 TEST_F(DiskCacheBackendTest, NewEvictionDisableSuccess) {
3207 ASSERT_TRUE(CopyTestCache("bad_rankings"));
3208 DisableFirstCleanup();
3209 SetNewEviction();
3210 InitCache();
3211 BackendDisable();
3212 }
3213
TEST_F(DiskCacheBackendTest,DisableFailure)3214 TEST_F(DiskCacheBackendTest, DisableFailure) {
3215 ASSERT_TRUE(CopyTestCache("bad_rankings"));
3216 DisableFirstCleanup();
3217 InitCache();
3218 SetTestMode(); // Fail cache reinitialization.
3219 BackendDisable();
3220 }
3221
TEST_F(DiskCacheBackendTest,NewEvictionDisableFailure)3222 TEST_F(DiskCacheBackendTest, NewEvictionDisableFailure) {
3223 ASSERT_TRUE(CopyTestCache("bad_rankings"));
3224 DisableFirstCleanup();
3225 SetNewEviction();
3226 InitCache();
3227 SetTestMode(); // Fail cache reinitialization.
3228 BackendDisable();
3229 }
3230
3231 // This is another type of corruption on the LRU; disable the cache.
BackendDisable2()3232 void DiskCacheBackendTest::BackendDisable2() {
3233 EXPECT_EQ(8, cache_->GetEntryCount());
3234
3235 disk_cache::Entry* entry;
3236 std::unique_ptr<TestIterator> iter = CreateIterator();
3237 int count = 0;
3238 while (iter->OpenNextEntry(&entry) == net::OK) {
3239 ASSERT_TRUE(nullptr != entry);
3240 entry->Close();
3241 count++;
3242 ASSERT_LT(count, 9);
3243 };
3244
3245 FlushQueueForTest();
3246 EXPECT_EQ(0, cache_->GetEntryCount());
3247 }
3248
TEST_F(DiskCacheBackendTest,DisableSuccess2)3249 TEST_F(DiskCacheBackendTest, DisableSuccess2) {
3250 ASSERT_TRUE(CopyTestCache("list_loop"));
3251 DisableFirstCleanup();
3252 InitCache();
3253 BackendDisable2();
3254 }
3255
TEST_F(DiskCacheBackendTest,NewEvictionDisableSuccess2)3256 TEST_F(DiskCacheBackendTest, NewEvictionDisableSuccess2) {
3257 ASSERT_TRUE(CopyTestCache("list_loop"));
3258 DisableFirstCleanup();
3259 SetNewEviction();
3260 InitCache();
3261 BackendDisable2();
3262 }
3263
TEST_F(DiskCacheBackendTest,DisableFailure2)3264 TEST_F(DiskCacheBackendTest, DisableFailure2) {
3265 ASSERT_TRUE(CopyTestCache("list_loop"));
3266 DisableFirstCleanup();
3267 InitCache();
3268 SetTestMode(); // Fail cache reinitialization.
3269 BackendDisable2();
3270 }
3271
TEST_F(DiskCacheBackendTest,NewEvictionDisableFailure2)3272 TEST_F(DiskCacheBackendTest, NewEvictionDisableFailure2) {
3273 ASSERT_TRUE(CopyTestCache("list_loop"));
3274 DisableFirstCleanup();
3275 SetNewEviction();
3276 InitCache();
3277 SetTestMode(); // Fail cache reinitialization.
3278 BackendDisable2();
3279 }
3280
3281 // If the index size changes when we disable the cache, we should not crash.
BackendDisable3()3282 void DiskCacheBackendTest::BackendDisable3() {
3283 disk_cache::Entry *entry1, *entry2;
3284 std::unique_ptr<TestIterator> iter = CreateIterator();
3285 EXPECT_EQ(2, cache_->GetEntryCount());
3286 ASSERT_THAT(iter->OpenNextEntry(&entry1), IsOk());
3287 entry1->Close();
3288
3289 EXPECT_NE(net::OK, iter->OpenNextEntry(&entry2));
3290 FlushQueueForTest();
3291
3292 ASSERT_THAT(CreateEntry("Something new", &entry2), IsOk());
3293 entry2->Close();
3294
3295 EXPECT_EQ(1, cache_->GetEntryCount());
3296 }
3297
TEST_F(DiskCacheBackendTest,DisableSuccess3)3298 TEST_F(DiskCacheBackendTest, DisableSuccess3) {
3299 ASSERT_TRUE(CopyTestCache("bad_rankings2"));
3300 DisableFirstCleanup();
3301 SetMaxSize(20 * 1024 * 1024);
3302 InitCache();
3303 BackendDisable3();
3304 }
3305
TEST_F(DiskCacheBackendTest,NewEvictionDisableSuccess3)3306 TEST_F(DiskCacheBackendTest, NewEvictionDisableSuccess3) {
3307 ASSERT_TRUE(CopyTestCache("bad_rankings2"));
3308 DisableFirstCleanup();
3309 SetMaxSize(20 * 1024 * 1024);
3310 SetNewEviction();
3311 InitCache();
3312 BackendDisable3();
3313 }
3314
3315 // If we disable the cache, already open entries should work as far as possible.
BackendDisable4()3316 void DiskCacheBackendTest::BackendDisable4() {
3317 disk_cache::Entry *entry1, *entry2, *entry3, *entry4;
3318 std::unique_ptr<TestIterator> iter = CreateIterator();
3319 ASSERT_THAT(iter->OpenNextEntry(&entry1), IsOk());
3320
3321 char key2[2000];
3322 char key3[20000];
3323 CacheTestFillBuffer(key2, sizeof(key2), true);
3324 CacheTestFillBuffer(key3, sizeof(key3), true);
3325 key2[sizeof(key2) - 1] = '\0';
3326 key3[sizeof(key3) - 1] = '\0';
3327 ASSERT_THAT(CreateEntry(key2, &entry2), IsOk());
3328 ASSERT_THAT(CreateEntry(key3, &entry3), IsOk());
3329
3330 const int kBufSize = 20000;
3331 auto buf = base::MakeRefCounted<net::IOBufferWithSize>(kBufSize);
3332 memset(buf->data(), 0, kBufSize);
3333 EXPECT_EQ(100, WriteData(entry2, 0, 0, buf.get(), 100, false));
3334 EXPECT_EQ(kBufSize, WriteData(entry3, 0, 0, buf.get(), kBufSize, false));
3335
3336 // This line should disable the cache but not delete it.
3337 EXPECT_NE(net::OK, iter->OpenNextEntry(&entry4));
3338 EXPECT_EQ(0, cache_->GetEntryCount());
3339
3340 EXPECT_NE(net::OK, CreateEntry("cache is disabled", &entry4));
3341
3342 EXPECT_EQ(100, ReadData(entry2, 0, 0, buf.get(), 100));
3343 EXPECT_EQ(100, WriteData(entry2, 0, 0, buf.get(), 100, false));
3344 EXPECT_EQ(100, WriteData(entry2, 1, 0, buf.get(), 100, false));
3345
3346 EXPECT_EQ(kBufSize, ReadData(entry3, 0, 0, buf.get(), kBufSize));
3347 EXPECT_EQ(kBufSize, WriteData(entry3, 0, 0, buf.get(), kBufSize, false));
3348 EXPECT_EQ(kBufSize, WriteData(entry3, 1, 0, buf.get(), kBufSize, false));
3349
3350 std::string key = entry2->GetKey();
3351 EXPECT_EQ(sizeof(key2) - 1, key.size());
3352 key = entry3->GetKey();
3353 EXPECT_EQ(sizeof(key3) - 1, key.size());
3354
3355 entry1->Close();
3356 entry2->Close();
3357 entry3->Close();
3358 FlushQueueForTest(); // Flushing the Close posts a task to restart the cache.
3359 FlushQueueForTest(); // This one actually allows that task to complete.
3360
3361 EXPECT_EQ(0, cache_->GetEntryCount());
3362 }
3363
TEST_F(DiskCacheBackendTest,DisableSuccess4)3364 TEST_F(DiskCacheBackendTest, DisableSuccess4) {
3365 ASSERT_TRUE(CopyTestCache("bad_rankings"));
3366 DisableFirstCleanup();
3367 InitCache();
3368 BackendDisable4();
3369 }
3370
TEST_F(DiskCacheBackendTest,NewEvictionDisableSuccess4)3371 TEST_F(DiskCacheBackendTest, NewEvictionDisableSuccess4) {
3372 ASSERT_TRUE(CopyTestCache("bad_rankings"));
3373 DisableFirstCleanup();
3374 SetNewEviction();
3375 InitCache();
3376 BackendDisable4();
3377 }
3378
3379 // Tests the exposed API with a disabled cache.
BackendDisabledAPI()3380 void DiskCacheBackendTest::BackendDisabledAPI() {
3381 cache_impl_->SetUnitTestMode(); // Simulate failure restarting the cache.
3382
3383 disk_cache::Entry *entry1, *entry2;
3384 std::unique_ptr<TestIterator> iter = CreateIterator();
3385 EXPECT_EQ(2, cache_->GetEntryCount());
3386 ASSERT_THAT(iter->OpenNextEntry(&entry1), IsOk());
3387 entry1->Close();
3388 EXPECT_NE(net::OK, iter->OpenNextEntry(&entry2));
3389 FlushQueueForTest();
3390 // The cache should be disabled.
3391
3392 EXPECT_EQ(net::DISK_CACHE, cache_->GetCacheType());
3393 EXPECT_EQ(0, cache_->GetEntryCount());
3394 EXPECT_NE(net::OK, OpenEntry("First", &entry2));
3395 EXPECT_NE(net::OK, CreateEntry("Something new", &entry2));
3396 EXPECT_NE(net::OK, DoomEntry("First"));
3397 EXPECT_NE(net::OK, DoomAllEntries());
3398 EXPECT_NE(net::OK, DoomEntriesBetween(Time(), Time::Now()));
3399 EXPECT_NE(net::OK, DoomEntriesSince(Time()));
3400 iter = CreateIterator();
3401 EXPECT_NE(net::OK, iter->OpenNextEntry(&entry2));
3402
3403 base::StringPairs stats;
3404 cache_->GetStats(&stats);
3405 EXPECT_TRUE(stats.empty());
3406 OnExternalCacheHit("First");
3407 }
3408
TEST_F(DiskCacheBackendTest,DisabledAPI)3409 TEST_F(DiskCacheBackendTest, DisabledAPI) {
3410 ASSERT_TRUE(CopyTestCache("bad_rankings2"));
3411 DisableFirstCleanup();
3412 InitCache();
3413 BackendDisabledAPI();
3414 }
3415
TEST_F(DiskCacheBackendTest,NewEvictionDisabledAPI)3416 TEST_F(DiskCacheBackendTest, NewEvictionDisabledAPI) {
3417 ASSERT_TRUE(CopyTestCache("bad_rankings2"));
3418 DisableFirstCleanup();
3419 SetNewEviction();
3420 InitCache();
3421 BackendDisabledAPI();
3422 }
3423
3424 // Test that some eviction of some kind happens.
BackendEviction()3425 void DiskCacheBackendTest::BackendEviction() {
3426 const int kMaxSize = 200 * 1024;
3427 const int kMaxEntryCount = 20;
3428 const int kWriteSize = kMaxSize / kMaxEntryCount;
3429
3430 const int kWriteEntryCount = kMaxEntryCount * 2;
3431
3432 static_assert(kWriteEntryCount * kWriteSize > kMaxSize,
3433 "must write more than MaxSize");
3434
3435 SetMaxSize(kMaxSize);
3436 InitSparseCache(nullptr, nullptr);
3437
3438 auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(kWriteSize);
3439 CacheTestFillBuffer(buffer->data(), kWriteSize, false);
3440
3441 std::string key_prefix("prefix");
3442 for (int i = 0; i < kWriteEntryCount; ++i) {
3443 AddDelay();
3444 disk_cache::Entry* entry = nullptr;
3445 ASSERT_THAT(CreateEntry(key_prefix + base::NumberToString(i), &entry),
3446 IsOk());
3447 disk_cache::ScopedEntryPtr entry_closer(entry);
3448 EXPECT_EQ(kWriteSize,
3449 WriteData(entry, 1, 0, buffer.get(), kWriteSize, false));
3450 }
3451
3452 int size = CalculateSizeOfAllEntries();
3453 EXPECT_GT(kMaxSize, size);
3454 }
3455
TEST_F(DiskCacheBackendTest,BackendEviction)3456 TEST_F(DiskCacheBackendTest, BackendEviction) {
3457 BackendEviction();
3458 }
3459
TEST_F(DiskCacheBackendTest,MemoryOnlyBackendEviction)3460 TEST_F(DiskCacheBackendTest, MemoryOnlyBackendEviction) {
3461 SetMemoryOnlyMode();
3462 BackendEviction();
3463 }
3464
3465 // TODO(morlovich): Enable BackendEviction test for simple cache after
3466 // performance problems are addressed. See crbug.com/588184 for more
3467 // information.
3468
3469 // This overly specific looking test is a regression test aimed at
3470 // crbug.com/589186.
TEST_F(DiskCacheBackendTest,MemoryOnlyUseAfterFree)3471 TEST_F(DiskCacheBackendTest, MemoryOnlyUseAfterFree) {
3472 SetMemoryOnlyMode();
3473
3474 const int kMaxSize = 200 * 1024;
3475 const int kMaxEntryCount = 20;
3476 const int kWriteSize = kMaxSize / kMaxEntryCount;
3477
3478 SetMaxSize(kMaxSize);
3479 InitCache();
3480
3481 auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(kWriteSize);
3482 CacheTestFillBuffer(buffer->data(), kWriteSize, false);
3483
3484 // Create an entry to be our sparse entry that gets written later.
3485 disk_cache::Entry* entry;
3486 ASSERT_THAT(CreateEntry("first parent", &entry), IsOk());
3487 disk_cache::ScopedEntryPtr first_parent(entry);
3488
3489 // Create a ton of entries, and keep them open, to put the cache well above
3490 // its eviction threshhold.
3491 const int kTooManyEntriesCount = kMaxEntryCount * 2;
3492 std::list<disk_cache::ScopedEntryPtr> open_entries;
3493 std::string key_prefix("prefix");
3494 for (int i = 0; i < kTooManyEntriesCount; ++i) {
3495 ASSERT_THAT(CreateEntry(key_prefix + base::NumberToString(i), &entry),
3496 IsOk());
3497 // Not checking the result because it will start to fail once the max size
3498 // is reached.
3499 WriteData(entry, 1, 0, buffer.get(), kWriteSize, false);
3500 open_entries.push_back(disk_cache::ScopedEntryPtr(entry));
3501 }
3502
3503 // Writing this sparse data should not crash. Ignoring the result because
3504 // we're only concerned with not crashing in this particular test.
3505 first_parent->WriteSparseData(32768, buffer.get(), 1024,
3506 net::CompletionOnceCallback());
3507 }
3508
TEST_F(DiskCacheBackendTest,MemoryCapsWritesToMaxSize)3509 TEST_F(DiskCacheBackendTest, MemoryCapsWritesToMaxSize) {
3510 // Verify that the memory backend won't grow beyond its max size if lots of
3511 // open entries (each smaller than the max entry size) are trying to write
3512 // beyond the max size.
3513 SetMemoryOnlyMode();
3514
3515 const int kMaxSize = 100 * 1024; // 100KB cache
3516 const int kNumEntries = 20; // 20 entries to write
3517 const int kWriteSize = kMaxSize / 10; // Each entry writes 1/10th the max
3518
3519 SetMaxSize(kMaxSize);
3520 InitCache();
3521
3522 auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(kWriteSize);
3523 CacheTestFillBuffer(buffer->data(), kWriteSize, false);
3524
3525 // Create an entry to be the final entry that gets written later.
3526 disk_cache::Entry* entry;
3527 ASSERT_THAT(CreateEntry("final", &entry), IsOk());
3528 disk_cache::ScopedEntryPtr final_entry(entry);
3529
3530 // Create a ton of entries, write to the cache, and keep the entries open.
3531 // They should start failing writes once the cache fills.
3532 std::list<disk_cache::ScopedEntryPtr> open_entries;
3533 std::string key_prefix("prefix");
3534 for (int i = 0; i < kNumEntries; ++i) {
3535 ASSERT_THAT(CreateEntry(key_prefix + base::NumberToString(i), &entry),
3536 IsOk());
3537 WriteData(entry, 1, 0, buffer.get(), kWriteSize, false);
3538 open_entries.push_back(disk_cache::ScopedEntryPtr(entry));
3539 }
3540 EXPECT_GE(kMaxSize, CalculateSizeOfAllEntries());
3541
3542 // Any more writing at this point should cause an error.
3543 EXPECT_THAT(
3544 WriteData(final_entry.get(), 1, 0, buffer.get(), kWriteSize, false),
3545 IsError(net::ERR_INSUFFICIENT_RESOURCES));
3546 }
3547
TEST_F(DiskCacheTest,Backend_UsageStatsTimer)3548 TEST_F(DiskCacheTest, Backend_UsageStatsTimer) {
3549 MessageLoopHelper helper;
3550
3551 ASSERT_TRUE(CleanupCacheDir());
3552 // Want to use our thread since we call SyncInit ourselves.
3553 std::unique_ptr<disk_cache::BackendImpl> cache(
3554 std::make_unique<disk_cache::BackendImpl>(
3555 cache_path_, nullptr,
3556 base::SingleThreadTaskRunner::GetCurrentDefault(), net::DISK_CACHE,
3557 nullptr));
3558 ASSERT_TRUE(nullptr != cache.get());
3559 cache->SetUnitTestMode();
3560 ASSERT_THAT(cache->SyncInit(), IsOk());
3561
3562 // Wait for a callback that never comes... about 2 secs :). The message loop
3563 // has to run to allow invocation of the usage timer.
3564 helper.WaitUntilCacheIoFinished(1);
3565 }
3566
TEST_F(DiskCacheBackendTest,TimerNotCreated)3567 TEST_F(DiskCacheBackendTest, TimerNotCreated) {
3568 ASSERT_TRUE(CopyTestCache("wrong_version"));
3569
3570 // Want to use our thread since we call SyncInit ourselves.
3571 std::unique_ptr<disk_cache::BackendImpl> cache(
3572 std::make_unique<disk_cache::BackendImpl>(
3573 cache_path_, nullptr,
3574 base::SingleThreadTaskRunner::GetCurrentDefault(), net::DISK_CACHE,
3575 nullptr));
3576 ASSERT_TRUE(nullptr != cache.get());
3577 cache->SetUnitTestMode();
3578 ASSERT_NE(net::OK, cache->SyncInit());
3579
3580 ASSERT_TRUE(nullptr == cache->GetTimerForTest());
3581
3582 DisableIntegrityCheck();
3583 }
3584
TEST_F(DiskCacheBackendTest,Backend_UsageStats)3585 TEST_F(DiskCacheBackendTest, Backend_UsageStats) {
3586 InitCache();
3587 disk_cache::Entry* entry;
3588 ASSERT_THAT(CreateEntry("key", &entry), IsOk());
3589 entry->Close();
3590 FlushQueueForTest();
3591
3592 disk_cache::StatsItems stats;
3593 cache_->GetStats(&stats);
3594 EXPECT_FALSE(stats.empty());
3595
3596 disk_cache::StatsItems::value_type hits("Create hit", "0x1");
3597 EXPECT_EQ(1, base::ranges::count(stats, hits));
3598
3599 ResetCaches();
3600
3601 // Now open the cache and verify that the stats are still there.
3602 DisableFirstCleanup();
3603 InitCache();
3604 EXPECT_EQ(1, cache_->GetEntryCount());
3605
3606 stats.clear();
3607 cache_->GetStats(&stats);
3608 EXPECT_FALSE(stats.empty());
3609
3610 EXPECT_EQ(1, base::ranges::count(stats, hits));
3611 }
3612
BackendDoomAll()3613 void DiskCacheBackendTest::BackendDoomAll() {
3614 InitCache();
3615
3616 disk_cache::Entry *entry1, *entry2;
3617 ASSERT_THAT(CreateEntry("first", &entry1), IsOk());
3618 ASSERT_THAT(CreateEntry("second", &entry2), IsOk());
3619 entry1->Close();
3620 entry2->Close();
3621
3622 ASSERT_THAT(CreateEntry("third", &entry1), IsOk());
3623 ASSERT_THAT(CreateEntry("fourth", &entry2), IsOk());
3624
3625 ASSERT_EQ(4, cache_->GetEntryCount());
3626 EXPECT_THAT(DoomAllEntries(), IsOk());
3627 ASSERT_EQ(0, cache_->GetEntryCount());
3628
3629 // We should stop posting tasks at some point (if we post any).
3630 base::RunLoop().RunUntilIdle();
3631
3632 disk_cache::Entry *entry3, *entry4;
3633 EXPECT_NE(net::OK, OpenEntry("third", &entry3));
3634 ASSERT_THAT(CreateEntry("third", &entry3), IsOk());
3635 ASSERT_THAT(CreateEntry("fourth", &entry4), IsOk());
3636
3637 EXPECT_THAT(DoomAllEntries(), IsOk());
3638 ASSERT_EQ(0, cache_->GetEntryCount());
3639
3640 entry1->Close();
3641 entry2->Close();
3642 entry3->Doom(); // The entry should be already doomed, but this must work.
3643 entry3->Close();
3644 entry4->Close();
3645
3646 // Now try with all references released.
3647 ASSERT_THAT(CreateEntry("third", &entry1), IsOk());
3648 ASSERT_THAT(CreateEntry("fourth", &entry2), IsOk());
3649 entry1->Close();
3650 entry2->Close();
3651
3652 ASSERT_EQ(2, cache_->GetEntryCount());
3653 EXPECT_THAT(DoomAllEntries(), IsOk());
3654 ASSERT_EQ(0, cache_->GetEntryCount());
3655
3656 EXPECT_THAT(DoomAllEntries(), IsOk());
3657 }
3658
TEST_F(DiskCacheBackendTest,DoomAll)3659 TEST_F(DiskCacheBackendTest, DoomAll) {
3660 BackendDoomAll();
3661 }
3662
TEST_F(DiskCacheBackendTest,NewEvictionDoomAll)3663 TEST_F(DiskCacheBackendTest, NewEvictionDoomAll) {
3664 SetNewEviction();
3665 BackendDoomAll();
3666 }
3667
TEST_F(DiskCacheBackendTest,MemoryOnlyDoomAll)3668 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomAll) {
3669 SetMemoryOnlyMode();
3670 BackendDoomAll();
3671 }
3672
TEST_F(DiskCacheBackendTest,AppCacheOnlyDoomAll)3673 TEST_F(DiskCacheBackendTest, AppCacheOnlyDoomAll) {
3674 SetCacheType(net::APP_CACHE);
3675 BackendDoomAll();
3676 }
3677
TEST_F(DiskCacheBackendTest,ShaderCacheOnlyDoomAll)3678 TEST_F(DiskCacheBackendTest, ShaderCacheOnlyDoomAll) {
3679 SetCacheType(net::SHADER_CACHE);
3680 BackendDoomAll();
3681 }
3682
3683 // If the index size changes when we doom the cache, we should not crash.
BackendDoomAll2()3684 void DiskCacheBackendTest::BackendDoomAll2() {
3685 EXPECT_EQ(2, cache_->GetEntryCount());
3686 EXPECT_THAT(DoomAllEntries(), IsOk());
3687
3688 disk_cache::Entry* entry;
3689 ASSERT_THAT(CreateEntry("Something new", &entry), IsOk());
3690 entry->Close();
3691
3692 EXPECT_EQ(1, cache_->GetEntryCount());
3693 }
3694
TEST_F(DiskCacheBackendTest,DoomAll2)3695 TEST_F(DiskCacheBackendTest, DoomAll2) {
3696 ASSERT_TRUE(CopyTestCache("bad_rankings2"));
3697 DisableFirstCleanup();
3698 SetMaxSize(20 * 1024 * 1024);
3699 InitCache();
3700 BackendDoomAll2();
3701 }
3702
TEST_F(DiskCacheBackendTest,NewEvictionDoomAll2)3703 TEST_F(DiskCacheBackendTest, NewEvictionDoomAll2) {
3704 ASSERT_TRUE(CopyTestCache("bad_rankings2"));
3705 DisableFirstCleanup();
3706 SetMaxSize(20 * 1024 * 1024);
3707 SetNewEviction();
3708 InitCache();
3709 BackendDoomAll2();
3710 }
3711
3712 // We should be able to create the same entry on multiple simultaneous instances
3713 // of the cache.
TEST_F(DiskCacheTest,MultipleInstances)3714 TEST_F(DiskCacheTest, MultipleInstances) {
3715 base::ScopedTempDir store1, store2;
3716 ASSERT_TRUE(store1.CreateUniqueTempDir());
3717 ASSERT_TRUE(store2.CreateUniqueTempDir());
3718
3719 TestBackendResultCompletionCallback cb;
3720
3721 const int kNumberOfCaches = 2;
3722 std::unique_ptr<disk_cache::Backend> caches[kNumberOfCaches];
3723
3724 disk_cache::BackendResult rv = disk_cache::CreateCacheBackend(
3725 net::DISK_CACHE, net::CACHE_BACKEND_DEFAULT, /*file_operations=*/nullptr,
3726 store1.GetPath(), 0, disk_cache::ResetHandling::kNeverReset,
3727 /*net_log=*/nullptr, cb.callback());
3728 rv = cb.GetResult(std::move(rv));
3729 ASSERT_THAT(rv.net_error, IsOk());
3730 caches[0] = std::move(rv.backend);
3731 rv = disk_cache::CreateCacheBackend(
3732 net::GENERATED_BYTE_CODE_CACHE, net::CACHE_BACKEND_DEFAULT,
3733 /*file_operations=*/nullptr, store2.GetPath(), 0,
3734 disk_cache::ResetHandling::kNeverReset, /*net_log=*/nullptr,
3735 cb.callback());
3736 rv = cb.GetResult(std::move(rv));
3737 ASSERT_THAT(rv.net_error, IsOk());
3738 caches[1] = std::move(rv.backend);
3739
3740 ASSERT_TRUE(caches[0].get() != nullptr && caches[1].get() != nullptr);
3741
3742 std::string key("the first key");
3743 for (auto& cache : caches) {
3744 TestEntryResultCompletionCallback cb2;
3745 EntryResult result = cache->CreateEntry(key, net::HIGHEST, cb2.callback());
3746 result = cb2.GetResult(std::move(result));
3747 ASSERT_THAT(result.net_error(), IsOk());
3748 result.ReleaseEntry()->Close();
3749 }
3750 }
3751
3752 // Test the six regions of the curve that determines the max cache size.
TEST_F(DiskCacheTest,AutomaticMaxSize)3753 TEST_F(DiskCacheTest, AutomaticMaxSize) {
3754 using disk_cache::kDefaultCacheSize;
3755 int64_t large_size = kDefaultCacheSize;
3756
3757 // Region 1: expected = available * 0.8
3758 EXPECT_EQ((kDefaultCacheSize - 1) * 8 / 10,
3759 disk_cache::PreferredCacheSize(large_size - 1));
3760 EXPECT_EQ(kDefaultCacheSize * 8 / 10,
3761 disk_cache::PreferredCacheSize(large_size));
3762 EXPECT_EQ(kDefaultCacheSize - 1,
3763 disk_cache::PreferredCacheSize(large_size * 10 / 8 - 1));
3764
3765 // Region 2: expected = default_size
3766 EXPECT_EQ(kDefaultCacheSize,
3767 disk_cache::PreferredCacheSize(large_size * 10 / 8));
3768 EXPECT_EQ(kDefaultCacheSize,
3769 disk_cache::PreferredCacheSize(large_size * 10 - 1));
3770
3771 // Region 3: expected = available * 0.1
3772 EXPECT_EQ(kDefaultCacheSize, disk_cache::PreferredCacheSize(large_size * 10));
3773 EXPECT_EQ((kDefaultCacheSize * 25 - 1) / 10,
3774 disk_cache::PreferredCacheSize(large_size * 25 - 1));
3775
3776 // Region 4: expected = default_size * 2.5
3777 EXPECT_EQ(kDefaultCacheSize * 25 / 10,
3778 disk_cache::PreferredCacheSize(large_size * 25));
3779 EXPECT_EQ(kDefaultCacheSize * 25 / 10,
3780 disk_cache::PreferredCacheSize(large_size * 100 - 1));
3781 EXPECT_EQ(kDefaultCacheSize * 25 / 10,
3782 disk_cache::PreferredCacheSize(large_size * 100));
3783 EXPECT_EQ(kDefaultCacheSize * 25 / 10,
3784 disk_cache::PreferredCacheSize(large_size * 250 - 1));
3785
3786 // Region 5: expected = available * 0.1
3787 int64_t largest_size = kDefaultCacheSize * 4;
3788 EXPECT_EQ(kDefaultCacheSize * 25 / 10,
3789 disk_cache::PreferredCacheSize(large_size * 250));
3790 EXPECT_EQ(largest_size - 1,
3791 disk_cache::PreferredCacheSize(largest_size * 100 - 1));
3792
3793 // Region 6: expected = largest possible size
3794 EXPECT_EQ(largest_size, disk_cache::PreferredCacheSize(largest_size * 100));
3795 EXPECT_EQ(largest_size, disk_cache::PreferredCacheSize(largest_size * 10000));
3796 }
3797
3798 // Make sure that we keep the total memory used by the internal buffers under
3799 // control.
TEST_F(DiskCacheBackendTest,TotalBuffersSize1)3800 TEST_F(DiskCacheBackendTest, TotalBuffersSize1) {
3801 InitCache();
3802 std::string key("the first key");
3803 disk_cache::Entry* entry;
3804 ASSERT_THAT(CreateEntry(key, &entry), IsOk());
3805
3806 const int kSize = 200;
3807 auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
3808 CacheTestFillBuffer(buffer->data(), kSize, true);
3809
3810 for (int i = 0; i < 10; i++) {
3811 SCOPED_TRACE(i);
3812 // Allocate 2MB for this entry.
3813 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, true));
3814 EXPECT_EQ(kSize, WriteData(entry, 1, 0, buffer.get(), kSize, true));
3815 EXPECT_EQ(kSize,
3816 WriteData(entry, 0, 1024 * 1024, buffer.get(), kSize, false));
3817 EXPECT_EQ(kSize,
3818 WriteData(entry, 1, 1024 * 1024, buffer.get(), kSize, false));
3819
3820 // Delete one of the buffers and truncate the other.
3821 EXPECT_EQ(0, WriteData(entry, 0, 0, buffer.get(), 0, true));
3822 EXPECT_EQ(0, WriteData(entry, 1, 10, buffer.get(), 0, true));
3823
3824 // Delete the second buffer, writing 10 bytes to disk.
3825 entry->Close();
3826 ASSERT_THAT(OpenEntry(key, &entry), IsOk());
3827 }
3828
3829 entry->Close();
3830 EXPECT_EQ(0, cache_impl_->GetTotalBuffersSize());
3831 }
3832
3833 // This test assumes at least 150MB of system memory.
TEST_F(DiskCacheBackendTest,TotalBuffersSize2)3834 TEST_F(DiskCacheBackendTest, TotalBuffersSize2) {
3835 InitCache();
3836
3837 const int kOneMB = 1024 * 1024;
3838 EXPECT_TRUE(cache_impl_->IsAllocAllowed(0, kOneMB));
3839 EXPECT_EQ(kOneMB, cache_impl_->GetTotalBuffersSize());
3840
3841 EXPECT_TRUE(cache_impl_->IsAllocAllowed(0, kOneMB));
3842 EXPECT_EQ(kOneMB * 2, cache_impl_->GetTotalBuffersSize());
3843
3844 EXPECT_TRUE(cache_impl_->IsAllocAllowed(0, kOneMB));
3845 EXPECT_EQ(kOneMB * 3, cache_impl_->GetTotalBuffersSize());
3846
3847 cache_impl_->BufferDeleted(kOneMB);
3848 EXPECT_EQ(kOneMB * 2, cache_impl_->GetTotalBuffersSize());
3849
3850 // Check the upper limit.
3851 EXPECT_FALSE(cache_impl_->IsAllocAllowed(0, 30 * kOneMB));
3852
3853 for (int i = 0; i < 30; i++)
3854 cache_impl_->IsAllocAllowed(0, kOneMB); // Ignore the result.
3855
3856 EXPECT_FALSE(cache_impl_->IsAllocAllowed(0, kOneMB));
3857 }
3858
3859 // Tests that sharing of external files works and we are able to delete the
3860 // files when we need to.
TEST_F(DiskCacheBackendTest,FileSharing)3861 TEST_F(DiskCacheBackendTest, FileSharing) {
3862 InitCache();
3863
3864 disk_cache::Addr address(0x80000001);
3865 ASSERT_TRUE(cache_impl_->CreateExternalFile(&address));
3866 base::FilePath name = cache_impl_->GetFileName(address);
3867
3868 {
3869 auto file = base::MakeRefCounted<disk_cache::File>(false);
3870 file->Init(name);
3871
3872 #if BUILDFLAG(IS_WIN)
3873 DWORD sharing = FILE_SHARE_READ | FILE_SHARE_WRITE;
3874 DWORD access = GENERIC_READ | GENERIC_WRITE;
3875 base::win::ScopedHandle file2(CreateFile(name.value().c_str(), access,
3876 sharing, nullptr, OPEN_EXISTING, 0,
3877 nullptr));
3878 EXPECT_FALSE(file2.IsValid());
3879
3880 sharing |= FILE_SHARE_DELETE;
3881 file2.Set(CreateFile(name.value().c_str(), access, sharing, nullptr,
3882 OPEN_EXISTING, 0, nullptr));
3883 EXPECT_TRUE(file2.IsValid());
3884 #endif
3885
3886 EXPECT_TRUE(base::DeleteFile(name));
3887
3888 // We should be able to use the file.
3889 const int kSize = 200;
3890 char buffer1[kSize];
3891 char buffer2[kSize];
3892 memset(buffer1, 't', kSize);
3893 memset(buffer2, 0, kSize);
3894 EXPECT_TRUE(file->Write(buffer1, kSize, 0));
3895 EXPECT_TRUE(file->Read(buffer2, kSize, 0));
3896 EXPECT_EQ(0, memcmp(buffer1, buffer2, kSize));
3897 }
3898
3899 base::File file(name, base::File::FLAG_OPEN | base::File::FLAG_READ);
3900 EXPECT_FALSE(file.IsValid());
3901 EXPECT_EQ(file.error_details(), base::File::FILE_ERROR_NOT_FOUND);
3902 }
3903
TEST_F(DiskCacheBackendTest,UpdateRankForExternalCacheHit)3904 TEST_F(DiskCacheBackendTest, UpdateRankForExternalCacheHit) {
3905 InitCache();
3906
3907 disk_cache::Entry* entry;
3908
3909 for (int i = 0; i < 2; ++i) {
3910 std::string key = base::StringPrintf("key%d", i);
3911 ASSERT_THAT(CreateEntry(key, &entry), IsOk());
3912 entry->Close();
3913 }
3914
3915 // Ping the oldest entry.
3916 OnExternalCacheHit("key0");
3917
3918 TrimForTest(false);
3919
3920 // Make sure the older key remains.
3921 EXPECT_EQ(1, cache_->GetEntryCount());
3922 ASSERT_THAT(OpenEntry("key0", &entry), IsOk());
3923 entry->Close();
3924 }
3925
TEST_F(DiskCacheBackendTest,ShaderCacheUpdateRankForExternalCacheHit)3926 TEST_F(DiskCacheBackendTest, ShaderCacheUpdateRankForExternalCacheHit) {
3927 SetCacheType(net::SHADER_CACHE);
3928 InitCache();
3929
3930 disk_cache::Entry* entry;
3931
3932 for (int i = 0; i < 2; ++i) {
3933 std::string key = base::StringPrintf("key%d", i);
3934 ASSERT_THAT(CreateEntry(key, &entry), IsOk());
3935 entry->Close();
3936 }
3937
3938 // Ping the oldest entry.
3939 OnExternalCacheHit("key0");
3940
3941 TrimForTest(false);
3942
3943 // Make sure the older key remains.
3944 EXPECT_EQ(1, cache_->GetEntryCount());
3945 ASSERT_THAT(OpenEntry("key0", &entry), IsOk());
3946 entry->Close();
3947 }
3948
TEST_F(DiskCacheBackendTest,SimpleCacheShutdownWithPendingCreate)3949 TEST_F(DiskCacheBackendTest, SimpleCacheShutdownWithPendingCreate) {
3950 // Use net::APP_CACHE to make size estimations deterministic via
3951 // non-optimistic writes.
3952 SetCacheType(net::APP_CACHE);
3953 SetSimpleCacheMode();
3954 BackendShutdownWithPendingCreate(false);
3955 }
3956
TEST_F(DiskCacheBackendTest,SimpleCacheShutdownWithPendingDoom)3957 TEST_F(DiskCacheBackendTest, SimpleCacheShutdownWithPendingDoom) {
3958 SetCacheType(net::APP_CACHE);
3959 SetSimpleCacheMode();
3960 BackendShutdownWithPendingDoom();
3961 }
3962
TEST_F(DiskCacheBackendTest,SimpleCacheShutdownWithPendingFileIO)3963 TEST_F(DiskCacheBackendTest, SimpleCacheShutdownWithPendingFileIO) {
3964 SetCacheType(net::APP_CACHE);
3965 SetSimpleCacheMode();
3966 BackendShutdownWithPendingFileIO(false);
3967 }
3968
TEST_F(DiskCacheBackendTest,SimpleCacheBasics)3969 TEST_F(DiskCacheBackendTest, SimpleCacheBasics) {
3970 SetSimpleCacheMode();
3971 BackendBasics();
3972 }
3973
TEST_F(DiskCacheBackendTest,SimpleCacheAppCacheBasics)3974 TEST_F(DiskCacheBackendTest, SimpleCacheAppCacheBasics) {
3975 SetCacheType(net::APP_CACHE);
3976 SetSimpleCacheMode();
3977 BackendBasics();
3978 }
3979
TEST_F(DiskCacheBackendTest,SimpleCacheKeying)3980 TEST_F(DiskCacheBackendTest, SimpleCacheKeying) {
3981 SetSimpleCacheMode();
3982 BackendKeying();
3983 }
3984
TEST_F(DiskCacheBackendTest,SimpleCacheAppCacheKeying)3985 TEST_F(DiskCacheBackendTest, SimpleCacheAppCacheKeying) {
3986 SetSimpleCacheMode();
3987 SetCacheType(net::APP_CACHE);
3988 BackendKeying();
3989 }
3990
TEST_F(DiskCacheBackendTest,SimpleCacheLoad)3991 TEST_F(DiskCacheBackendTest, SimpleCacheLoad) {
3992 SetMaxSize(0x100000);
3993 SetSimpleCacheMode();
3994 BackendLoad();
3995 }
3996
TEST_F(DiskCacheBackendTest,SimpleCacheAppCacheLoad)3997 TEST_F(DiskCacheBackendTest, SimpleCacheAppCacheLoad) {
3998 SetCacheType(net::APP_CACHE);
3999 SetSimpleCacheMode();
4000 SetMaxSize(0x100000);
4001 BackendLoad();
4002 }
4003
TEST_F(DiskCacheBackendTest,SimpleDoomRecent)4004 TEST_F(DiskCacheBackendTest, SimpleDoomRecent) {
4005 SetSimpleCacheMode();
4006 BackendDoomRecent();
4007 }
4008
4009 // crbug.com/330926, crbug.com/370677
TEST_F(DiskCacheBackendTest,DISABLED_SimpleDoomBetween)4010 TEST_F(DiskCacheBackendTest, DISABLED_SimpleDoomBetween) {
4011 SetSimpleCacheMode();
4012 BackendDoomBetween();
4013 }
4014
TEST_F(DiskCacheBackendTest,SimpleCacheDoomAll)4015 TEST_F(DiskCacheBackendTest, SimpleCacheDoomAll) {
4016 SetSimpleCacheMode();
4017 BackendDoomAll();
4018 }
4019
TEST_F(DiskCacheBackendTest,SimpleCacheAppCacheOnlyDoomAll)4020 TEST_F(DiskCacheBackendTest, SimpleCacheAppCacheOnlyDoomAll) {
4021 SetCacheType(net::APP_CACHE);
4022 SetSimpleCacheMode();
4023 BackendDoomAll();
4024 }
4025
TEST_F(DiskCacheBackendTest,SimpleCacheOpenMissingFile)4026 TEST_F(DiskCacheBackendTest, SimpleCacheOpenMissingFile) {
4027 SetSimpleCacheMode();
4028 InitCache();
4029
4030 const char key[] = "the first key";
4031 disk_cache::Entry* entry = nullptr;
4032
4033 ASSERT_THAT(CreateEntry(key, &entry), IsOk());
4034 ASSERT_TRUE(entry != nullptr);
4035 entry->Close();
4036 entry = nullptr;
4037
4038 // To make sure the file creation completed we need to call open again so that
4039 // we block until it actually created the files.
4040 ASSERT_THAT(OpenEntry(key, &entry), IsOk());
4041 ASSERT_TRUE(entry != nullptr);
4042 entry->Close();
4043 entry = nullptr;
4044
4045 // Delete one of the files in the entry.
4046 base::FilePath to_delete_file = cache_path_.AppendASCII(
4047 disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, 0));
4048 EXPECT_TRUE(base::PathExists(to_delete_file));
4049 EXPECT_TRUE(base::DeleteFile(to_delete_file));
4050
4051 // Failing to open the entry should delete the rest of these files.
4052 ASSERT_THAT(OpenEntry(key, &entry), IsError(net::ERR_FAILED));
4053
4054 // Confirm the rest of the files are gone.
4055 for (int i = 1; i < disk_cache::kSimpleEntryNormalFileCount; ++i) {
4056 base::FilePath should_be_gone_file(cache_path_.AppendASCII(
4057 disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, i)));
4058 EXPECT_FALSE(base::PathExists(should_be_gone_file));
4059 }
4060 }
4061
TEST_F(DiskCacheBackendTest,SimpleCacheOpenBadFile)4062 TEST_F(DiskCacheBackendTest, SimpleCacheOpenBadFile) {
4063 SetSimpleCacheMode();
4064 InitCache();
4065
4066 const char key[] = "the first key";
4067 disk_cache::Entry* entry = nullptr;
4068
4069 ASSERT_THAT(CreateEntry(key, &entry), IsOk());
4070 disk_cache::Entry* null = nullptr;
4071 ASSERT_NE(null, entry);
4072 entry->Close();
4073 entry = nullptr;
4074
4075 // To make sure the file creation completed we need to call open again so that
4076 // we block until it actually created the files.
4077 ASSERT_THAT(OpenEntry(key, &entry), IsOk());
4078 ASSERT_NE(null, entry);
4079 entry->Close();
4080 entry = nullptr;
4081
4082 // The entry is being closed on the Simple Cache worker pool
4083 disk_cache::FlushCacheThreadForTesting();
4084 base::RunLoop().RunUntilIdle();
4085
4086 // Write an invalid header for stream 0 and stream 1.
4087 base::FilePath entry_file1_path = cache_path_.AppendASCII(
4088 disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, 0));
4089
4090 disk_cache::SimpleFileHeader header;
4091 header.initial_magic_number = UINT64_C(0xbadf00d);
4092 EXPECT_TRUE(base::WriteFile(entry_file1_path,
4093 base::as_bytes(base::make_span(&header, 1u))));
4094 ASSERT_THAT(OpenEntry(key, &entry), IsError(net::ERR_FAILED));
4095 }
4096
4097 // Tests that the Simple Cache Backend fails to initialize with non-matching
4098 // file structure on disk.
TEST_F(DiskCacheBackendTest,SimpleCacheOverBlockfileCache)4099 TEST_F(DiskCacheBackendTest, SimpleCacheOverBlockfileCache) {
4100 // Create a cache structure with the |BackendImpl|.
4101 InitCache();
4102 disk_cache::Entry* entry;
4103 const int kSize = 50;
4104 auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
4105 CacheTestFillBuffer(buffer->data(), kSize, false);
4106 ASSERT_THAT(CreateEntry("key", &entry), IsOk());
4107 ASSERT_EQ(0, WriteData(entry, 0, 0, buffer.get(), 0, false));
4108 entry->Close();
4109 ResetCaches();
4110
4111 // Check that the |SimpleBackendImpl| does not favor this structure.
4112 auto simple_cache = std::make_unique<disk_cache::SimpleBackendImpl>(
4113 /*file_operations_factory=*/nullptr, cache_path_, nullptr, nullptr, 0,
4114 net::DISK_CACHE, nullptr);
4115 net::TestCompletionCallback cb;
4116 simple_cache->Init(cb.callback());
4117 EXPECT_NE(net::OK, cb.WaitForResult());
4118 simple_cache.reset();
4119 DisableIntegrityCheck();
4120 }
4121
4122 // Tests that the |BackendImpl| refuses to initialize on top of the files
4123 // generated by the Simple Cache Backend.
TEST_F(DiskCacheBackendTest,BlockfileCacheOverSimpleCache)4124 TEST_F(DiskCacheBackendTest, BlockfileCacheOverSimpleCache) {
4125 // Create a cache structure with the |SimpleBackendImpl|.
4126 SetSimpleCacheMode();
4127 InitCache();
4128 disk_cache::Entry* entry;
4129 const int kSize = 50;
4130 auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
4131 CacheTestFillBuffer(buffer->data(), kSize, false);
4132 ASSERT_THAT(CreateEntry("key", &entry), IsOk());
4133 ASSERT_EQ(0, WriteData(entry, 0, 0, buffer.get(), 0, false));
4134 entry->Close();
4135 ResetCaches();
4136
4137 // Check that the |BackendImpl| does not favor this structure.
4138 auto cache = std::make_unique<disk_cache::BackendImpl>(
4139 cache_path_, nullptr, nullptr, net::DISK_CACHE, nullptr);
4140 cache->SetUnitTestMode();
4141 net::TestCompletionCallback cb;
4142 cache->Init(cb.callback());
4143 EXPECT_NE(net::OK, cb.WaitForResult());
4144 cache.reset();
4145 DisableIntegrityCheck();
4146 }
4147
TEST_F(DiskCacheBackendTest,SimpleCacheFixEnumerators)4148 TEST_F(DiskCacheBackendTest, SimpleCacheFixEnumerators) {
4149 SetSimpleCacheMode();
4150 BackendFixEnumerators();
4151 }
4152
4153 // Tests basic functionality of the SimpleBackend implementation of the
4154 // enumeration API.
TEST_F(DiskCacheBackendTest,SimpleCacheEnumerationBasics)4155 TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationBasics) {
4156 SetSimpleCacheMode();
4157 InitCache();
4158 std::set<std::string> key_pool;
4159 ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool));
4160
4161 // Check that enumeration returns all entries.
4162 std::set<std::string> keys_to_match(key_pool);
4163 std::unique_ptr<TestIterator> iter = CreateIterator();
4164 size_t count = 0;
4165 ASSERT_TRUE(EnumerateAndMatchKeys(-1, iter.get(), &keys_to_match, &count));
4166 iter.reset();
4167 EXPECT_EQ(key_pool.size(), count);
4168 EXPECT_TRUE(keys_to_match.empty());
4169
4170 // Check that opening entries does not affect enumeration.
4171 keys_to_match = key_pool;
4172 iter = CreateIterator();
4173 count = 0;
4174 disk_cache::Entry* entry_opened_before;
4175 ASSERT_THAT(OpenEntry(*(key_pool.begin()), &entry_opened_before), IsOk());
4176 ASSERT_TRUE(EnumerateAndMatchKeys(key_pool.size() / 2, iter.get(),
4177 &keys_to_match, &count));
4178
4179 disk_cache::Entry* entry_opened_middle;
4180 ASSERT_EQ(net::OK, OpenEntry(*(keys_to_match.begin()), &entry_opened_middle));
4181 ASSERT_TRUE(EnumerateAndMatchKeys(-1, iter.get(), &keys_to_match, &count));
4182 iter.reset();
4183 entry_opened_before->Close();
4184 entry_opened_middle->Close();
4185
4186 EXPECT_EQ(key_pool.size(), count);
4187 EXPECT_TRUE(keys_to_match.empty());
4188 }
4189
4190 // Tests that the enumerations are not affected by dooming an entry in the
4191 // middle.
TEST_F(DiskCacheBackendTest,SimpleCacheEnumerationWhileDoomed)4192 TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationWhileDoomed) {
4193 SetSimpleCacheMode();
4194 InitCache();
4195 std::set<std::string> key_pool;
4196 ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool));
4197
4198 // Check that enumeration returns all entries but the doomed one.
4199 std::set<std::string> keys_to_match(key_pool);
4200 std::unique_ptr<TestIterator> iter = CreateIterator();
4201 size_t count = 0;
4202 ASSERT_TRUE(EnumerateAndMatchKeys(key_pool.size() / 2, iter.get(),
4203 &keys_to_match, &count));
4204
4205 std::string key_to_delete = *(keys_to_match.begin());
4206 DoomEntry(key_to_delete);
4207 keys_to_match.erase(key_to_delete);
4208 key_pool.erase(key_to_delete);
4209 ASSERT_TRUE(EnumerateAndMatchKeys(-1, iter.get(), &keys_to_match, &count));
4210 iter.reset();
4211
4212 EXPECT_EQ(key_pool.size(), count);
4213 EXPECT_TRUE(keys_to_match.empty());
4214 }
4215
4216 // Tests that enumerations are not affected by corrupt files.
TEST_F(DiskCacheBackendTest,SimpleCacheEnumerationCorruption)4217 TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationCorruption) {
4218 SetSimpleCacheMode();
4219 InitCache();
4220 // Create a corrupt entry.
4221 const std::string key = "the key";
4222 disk_cache::Entry* corrupted_entry;
4223
4224 ASSERT_THAT(CreateEntry(key, &corrupted_entry), IsOk());
4225 ASSERT_TRUE(corrupted_entry);
4226 const int kSize = 50;
4227 auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
4228 CacheTestFillBuffer(buffer->data(), kSize, false);
4229 ASSERT_EQ(kSize,
4230 WriteData(corrupted_entry, 0, 0, buffer.get(), kSize, false));
4231 ASSERT_EQ(kSize, ReadData(corrupted_entry, 0, 0, buffer.get(), kSize));
4232 corrupted_entry->Close();
4233 // Let all I/O finish so it doesn't race with corrupting the file below.
4234 RunUntilIdle();
4235
4236 std::set<std::string> key_pool;
4237 ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool));
4238
4239 EXPECT_TRUE(
4240 disk_cache::simple_util::CreateCorruptFileForTests(key, cache_path_));
4241 EXPECT_EQ(key_pool.size() + 1, static_cast<size_t>(cache_->GetEntryCount()));
4242
4243 // Check that enumeration returns all entries but the corrupt one.
4244 std::set<std::string> keys_to_match(key_pool);
4245 std::unique_ptr<TestIterator> iter = CreateIterator();
4246 size_t count = 0;
4247 ASSERT_TRUE(EnumerateAndMatchKeys(-1, iter.get(), &keys_to_match, &count));
4248 iter.reset();
4249
4250 EXPECT_EQ(key_pool.size(), count);
4251 EXPECT_TRUE(keys_to_match.empty());
4252 }
4253
4254 // Tests that enumerations don't leak memory when the backend is destructed
4255 // mid-enumeration.
TEST_F(DiskCacheBackendTest,SimpleCacheEnumerationDestruction)4256 TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationDestruction) {
4257 SetSimpleCacheMode();
4258 InitCache();
4259 std::set<std::string> key_pool;
4260 ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool));
4261
4262 std::unique_ptr<TestIterator> iter = CreateIterator();
4263 disk_cache::Entry* entry = nullptr;
4264 ASSERT_THAT(iter->OpenNextEntry(&entry), IsOk());
4265 EXPECT_TRUE(entry);
4266 disk_cache::ScopedEntryPtr entry_closer(entry);
4267
4268 ResetCaches();
4269 // This test passes if we don't leak memory.
4270 }
4271
4272 // Verify that tasks run in priority order when the experiment is enabled.
4273 // Test has races, disabling until fixed: https://crbug.com/853283
TEST_F(DiskCacheBackendTest,DISABLED_SimpleCachePrioritizedEntryOrder)4274 TEST_F(DiskCacheBackendTest, DISABLED_SimpleCachePrioritizedEntryOrder) {
4275 base::test::ScopedFeatureList scoped_feature_list;
4276 SetSimpleCacheMode();
4277 InitCache();
4278
4279 // Set the SimpleCache's worker pool to a sequenced type for testing
4280 // priority order.
4281 disk_cache::SimpleBackendImpl* simple_cache =
4282 static_cast<disk_cache::SimpleBackendImpl*>(cache_.get());
4283 auto task_runner = base::ThreadPool::CreateSequencedTaskRunner(
4284 {base::TaskPriority::USER_VISIBLE, base::MayBlock()});
4285 simple_cache->SetTaskRunnerForTesting(task_runner);
4286
4287 // Create three entries. Priority order is 3, 1, 2 because 3 has the highest
4288 // request priority and 1 is created before 2.
4289 disk_cache::Entry* entry1 = nullptr;
4290 disk_cache::Entry* entry2 = nullptr;
4291 disk_cache::Entry* entry3 = nullptr;
4292 ASSERT_THAT(CreateEntryWithPriority("first", net::LOWEST, &entry1), IsOk());
4293 ASSERT_THAT(CreateEntryWithPriority("second", net::LOWEST, &entry2), IsOk());
4294 ASSERT_THAT(CreateEntryWithPriority("third", net::HIGHEST, &entry3), IsOk());
4295
4296 // Write some data to the entries.
4297 const int kSize = 10;
4298 auto buf1 = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
4299 auto buf2 = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
4300 auto buf3 = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
4301 CacheTestFillBuffer(buf1->data(), kSize, false);
4302 CacheTestFillBuffer(buf2->data(), kSize, false);
4303 CacheTestFillBuffer(buf3->data(), kSize, false);
4304
4305 // Write to stream 2 because it's the only stream that can't be read from
4306 // synchronously.
4307 EXPECT_EQ(kSize, WriteData(entry1, 2, 0, buf1.get(), kSize, true));
4308 EXPECT_EQ(kSize, WriteData(entry2, 2, 0, buf1.get(), kSize, true));
4309 EXPECT_EQ(kSize, WriteData(entry3, 2, 0, buf1.get(), kSize, true));
4310
4311 // Wait until the task_runner's queue is empty (WriteData might have
4312 // optimistically returned synchronously but still had some tasks to run in
4313 // the worker pool.
4314 base::RunLoop run_loop;
4315 task_runner->PostTaskAndReply(FROM_HERE, base::DoNothing(),
4316 run_loop.QuitClosure());
4317 run_loop.Run();
4318
4319 std::vector<int> finished_read_order;
4320 auto finished_callback = [](std::vector<int>* finished_read_order,
4321 int entry_number, base::OnceClosure quit_closure,
4322 int rv) {
4323 finished_read_order->push_back(entry_number);
4324 if (quit_closure)
4325 std::move(quit_closure).Run();
4326 };
4327
4328 auto read_buf1 = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
4329 auto read_buf2 = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
4330 auto read_buf3 = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
4331
4332 // Read from the entries in order 2, 3, 1. They should be reprioritized to
4333 // 3, 1, 2.
4334 base::RunLoop read_run_loop;
4335
4336 entry2->ReadData(2, 0, read_buf2.get(), kSize,
4337 base::BindOnce(finished_callback, &finished_read_order, 2,
4338 read_run_loop.QuitClosure()));
4339 entry3->ReadData(2, 0, read_buf3.get(), kSize,
4340 base::BindOnce(finished_callback, &finished_read_order, 3,
4341 base::OnceClosure()));
4342 entry1->ReadData(2, 0, read_buf1.get(), kSize,
4343 base::BindOnce(finished_callback, &finished_read_order, 1,
4344 base::OnceClosure()));
4345 EXPECT_EQ(0u, finished_read_order.size());
4346
4347 read_run_loop.Run();
4348 EXPECT_EQ((std::vector<int>{3, 1, 2}), finished_read_order);
4349 entry1->Close();
4350 entry2->Close();
4351 entry3->Close();
4352 }
4353
4354 // Tests that enumerations include entries with long keys.
TEST_F(DiskCacheBackendTest,SimpleCacheEnumerationLongKeys)4355 TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationLongKeys) {
4356 SetSimpleCacheMode();
4357 InitCache();
4358 std::set<std::string> key_pool;
4359 ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool));
4360
4361 const size_t long_key_length =
4362 disk_cache::SimpleSynchronousEntry::kInitialHeaderRead + 10;
4363 std::string long_key(long_key_length, 'X');
4364 key_pool.insert(long_key);
4365 disk_cache::Entry* entry = nullptr;
4366 ASSERT_THAT(CreateEntry(long_key.c_str(), &entry), IsOk());
4367 entry->Close();
4368
4369 std::unique_ptr<TestIterator> iter = CreateIterator();
4370 size_t count = 0;
4371 EXPECT_TRUE(EnumerateAndMatchKeys(-1, iter.get(), &key_pool, &count));
4372 EXPECT_TRUE(key_pool.empty());
4373 }
4374
4375 // Tests that a SimpleCache doesn't crash when files are deleted very quickly
4376 // after closing.
4377 // NOTE: IF THIS TEST IS FLAKY THEN IT IS FAILING. See https://crbug.com/416940
TEST_F(DiskCacheBackendTest,SimpleCacheDeleteQuickly)4378 TEST_F(DiskCacheBackendTest, SimpleCacheDeleteQuickly) {
4379 SetSimpleCacheMode();
4380 for (int i = 0; i < 100; ++i) {
4381 InitCache();
4382 ResetCaches();
4383 EXPECT_TRUE(CleanupCacheDir());
4384 }
4385 }
4386
TEST_F(DiskCacheBackendTest,SimpleCacheLateDoom)4387 TEST_F(DiskCacheBackendTest, SimpleCacheLateDoom) {
4388 SetSimpleCacheMode();
4389 InitCache();
4390
4391 disk_cache::Entry *entry1, *entry2;
4392 ASSERT_THAT(CreateEntry("first", &entry1), IsOk());
4393 ASSERT_THAT(CreateEntry("second", &entry2), IsOk());
4394 entry1->Close();
4395
4396 // Ensure that the directory mtime is flushed to disk before serializing the
4397 // index.
4398 disk_cache::FlushCacheThreadForTesting();
4399 #if BUILDFLAG(IS_POSIX)
4400 base::File cache_dir(cache_path_,
4401 base::File::FLAG_OPEN | base::File::FLAG_READ);
4402 EXPECT_TRUE(cache_dir.Flush());
4403 #endif // BUILDFLAG(IS_POSIX)
4404 ResetCaches();
4405 disk_cache::FlushCacheThreadForTesting();
4406
4407 // The index is now written. Dooming the last entry can't delete a file,
4408 // because that would advance the cache directory mtime and invalidate the
4409 // index.
4410 entry2->Doom();
4411 entry2->Close();
4412
4413 DisableFirstCleanup();
4414 InitCache();
4415 EXPECT_EQ(disk_cache::SimpleIndex::INITIALIZE_METHOD_LOADED,
4416 simple_cache_impl_->index()->init_method());
4417 }
4418
TEST_F(DiskCacheBackendTest,SimpleCacheNegMaxSize)4419 TEST_F(DiskCacheBackendTest, SimpleCacheNegMaxSize) {
4420 SetMaxSize(-1);
4421 SetSimpleCacheMode();
4422 InitCache();
4423 // We don't know what it will pick, but it's limited to what
4424 // disk_cache::PreferredCacheSize would return, scaled by the size experiment,
4425 // which only goes as much as 4x. It definitely should not be MAX_UINT64.
4426 EXPECT_NE(simple_cache_impl_->index()->max_size(),
4427 std::numeric_limits<uint64_t>::max());
4428
4429 int max_default_size =
4430 2 * disk_cache::PreferredCacheSize(std::numeric_limits<int32_t>::max());
4431
4432 ASSERT_GE(max_default_size, 0);
4433 EXPECT_LT(simple_cache_impl_->index()->max_size(),
4434 static_cast<unsigned>(max_default_size));
4435
4436 uint64_t max_size_without_scaling = simple_cache_impl_->index()->max_size();
4437
4438 // Scale to 200%. The size should be twice of |max_size_without_scaling| but
4439 // since that's capped on 20% of available size, checking for the size to be
4440 // between max_size_without_scaling and max_size_without_scaling*2.
4441 {
4442 base::test::ScopedFeatureList scoped_feature_list;
4443 std::map<std::string, std::string> field_trial_params;
4444 field_trial_params["percent_relative_size"] = "200";
4445 scoped_feature_list.InitAndEnableFeatureWithParameters(
4446 disk_cache::kChangeDiskCacheSizeExperiment, field_trial_params);
4447
4448 InitCache();
4449
4450 uint64_t max_size_scaled = simple_cache_impl_->index()->max_size();
4451
4452 EXPECT_GE(max_size_scaled, max_size_without_scaling);
4453 EXPECT_LE(max_size_scaled, 2 * max_size_without_scaling);
4454 }
4455 }
4456
TEST_F(DiskCacheBackendTest,SimpleLastModified)4457 TEST_F(DiskCacheBackendTest, SimpleLastModified) {
4458 // Simple cache used to incorrectly set LastModified on entries based on
4459 // timestamp of the cache directory, and not the entries' file
4460 // (https://crbug.com/714143). So this test arranges for a situation
4461 // where this would occur by doing:
4462 // 1) Write entry 1
4463 // 2) Delay
4464 // 3) Write entry 2. This sets directory time stamp to be different from
4465 // timestamp of entry 1 (due to the delay)
4466 // It then checks whether the entry 1 got the proper timestamp or not.
4467
4468 SetSimpleCacheMode();
4469 InitCache();
4470 std::string key1 = GenerateKey(true);
4471 std::string key2 = GenerateKey(true);
4472
4473 disk_cache::Entry* entry1;
4474 ASSERT_THAT(CreateEntry(key1, &entry1), IsOk());
4475
4476 // Make the Create complete --- SimpleCache can handle it optimistically,
4477 // and if we let it go fully async then trying to flush the Close might just
4478 // flush the Create.
4479 disk_cache::FlushCacheThreadForTesting();
4480 base::RunLoop().RunUntilIdle();
4481
4482 entry1->Close();
4483
4484 // Make the ::Close actually complete, since it is asynchronous.
4485 disk_cache::FlushCacheThreadForTesting();
4486 base::RunLoop().RunUntilIdle();
4487
4488 Time entry1_timestamp = Time::NowFromSystemTime();
4489
4490 // Don't want AddDelay since it sleep 1s(!) for SimpleCache, and we don't
4491 // care about reduced precision in index here.
4492 while (base::Time::NowFromSystemTime() <=
4493 (entry1_timestamp + base::Milliseconds(10))) {
4494 base::PlatformThread::Sleep(base::Milliseconds(1));
4495 }
4496
4497 disk_cache::Entry* entry2;
4498 ASSERT_THAT(CreateEntry(key2, &entry2), IsOk());
4499 entry2->Close();
4500 disk_cache::FlushCacheThreadForTesting();
4501 base::RunLoop().RunUntilIdle();
4502
4503 disk_cache::Entry* reopen_entry1;
4504 ASSERT_THAT(OpenEntry(key1, &reopen_entry1), IsOk());
4505
4506 // This shouldn't pick up entry2's write time incorrectly.
4507 EXPECT_LE(reopen_entry1->GetLastModified(), entry1_timestamp);
4508 reopen_entry1->Close();
4509 }
4510
TEST_F(DiskCacheBackendTest,SimpleFdLimit)4511 TEST_F(DiskCacheBackendTest, SimpleFdLimit) {
4512 base::HistogramTester histogram_tester;
4513 SetSimpleCacheMode();
4514 // Make things blocking so CreateEntry actually waits for file to be
4515 // created.
4516 SetCacheType(net::APP_CACHE);
4517 InitCache();
4518
4519 disk_cache::Entry* entries[kLargeNumEntries];
4520 std::string keys[kLargeNumEntries];
4521 for (int i = 0; i < kLargeNumEntries; ++i) {
4522 keys[i] = GenerateKey(true);
4523 ASSERT_THAT(CreateEntry(keys[i], &entries[i]), IsOk());
4524 }
4525
4526 // Note the fixture sets the file limit to 64.
4527 histogram_tester.ExpectBucketCount("SimpleCache.FileDescriptorLimiterAction",
4528 disk_cache::FD_LIMIT_CLOSE_FILE,
4529 kLargeNumEntries - 64);
4530 histogram_tester.ExpectBucketCount("SimpleCache.FileDescriptorLimiterAction",
4531 disk_cache::FD_LIMIT_REOPEN_FILE, 0);
4532 histogram_tester.ExpectBucketCount("SimpleCache.FileDescriptorLimiterAction",
4533 disk_cache::FD_LIMIT_FAIL_REOPEN_FILE, 0);
4534
4535 const int kSize = 25000;
4536 auto buf1 = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
4537 CacheTestFillBuffer(buf1->data(), kSize, false);
4538
4539 auto buf2 = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
4540 CacheTestFillBuffer(buf2->data(), kSize, false);
4541
4542 // Doom an entry and create a new one with same name, to test that both
4543 // re-open properly.
4544 EXPECT_EQ(net::OK, DoomEntry(keys[0]));
4545 disk_cache::Entry* alt_entry;
4546 ASSERT_THAT(CreateEntry(keys[0], &alt_entry), IsOk());
4547
4548 // One more file closure here to accommodate for alt_entry.
4549 histogram_tester.ExpectBucketCount("SimpleCache.FileDescriptorLimiterAction",
4550 disk_cache::FD_LIMIT_CLOSE_FILE,
4551 kLargeNumEntries - 64 + 1);
4552 histogram_tester.ExpectBucketCount("SimpleCache.FileDescriptorLimiterAction",
4553 disk_cache::FD_LIMIT_REOPEN_FILE, 0);
4554 histogram_tester.ExpectBucketCount("SimpleCache.FileDescriptorLimiterAction",
4555 disk_cache::FD_LIMIT_FAIL_REOPEN_FILE, 0);
4556
4557 // Do some writes in [1...kLargeNumEntries) range, both testing bring those in
4558 // and kicking out [0] and [alt_entry]. These have to be to stream != 0 to
4559 // actually need files.
4560 for (int i = 1; i < kLargeNumEntries; ++i) {
4561 EXPECT_EQ(kSize, WriteData(entries[i], 1, 0, buf1.get(), kSize, true));
4562 auto read_buf = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
4563 ASSERT_EQ(kSize, ReadData(entries[i], 1, 0, read_buf.get(), kSize));
4564 EXPECT_EQ(0, memcmp(read_buf->data(), buf1->data(), kSize));
4565 }
4566
4567 histogram_tester.ExpectBucketCount(
4568 "SimpleCache.FileDescriptorLimiterAction",
4569 disk_cache::FD_LIMIT_CLOSE_FILE,
4570 kLargeNumEntries - 64 + 1 + kLargeNumEntries - 1);
4571 histogram_tester.ExpectBucketCount("SimpleCache.FileDescriptorLimiterAction",
4572 disk_cache::FD_LIMIT_REOPEN_FILE,
4573 kLargeNumEntries - 1);
4574 histogram_tester.ExpectBucketCount("SimpleCache.FileDescriptorLimiterAction",
4575 disk_cache::FD_LIMIT_FAIL_REOPEN_FILE, 0);
4576 EXPECT_EQ(kSize, WriteData(entries[0], 1, 0, buf1.get(), kSize, true));
4577 EXPECT_EQ(kSize, WriteData(alt_entry, 1, 0, buf2.get(), kSize, true));
4578
4579 auto read_buf = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
4580 ASSERT_EQ(kSize, ReadData(entries[0], 1, 0, read_buf.get(), kSize));
4581 EXPECT_EQ(0, memcmp(read_buf->data(), buf1->data(), kSize));
4582
4583 auto read_buf2 = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
4584 ASSERT_EQ(kSize, ReadData(alt_entry, 1, 0, read_buf2.get(), kSize));
4585 EXPECT_EQ(0, memcmp(read_buf2->data(), buf2->data(), kSize));
4586
4587 // Two more things than last time --- entries[0] and |alt_entry|
4588 histogram_tester.ExpectBucketCount(
4589 "SimpleCache.FileDescriptorLimiterAction",
4590 disk_cache::FD_LIMIT_CLOSE_FILE,
4591 kLargeNumEntries - 64 + 1 + kLargeNumEntries - 1 + 2);
4592 histogram_tester.ExpectBucketCount("SimpleCache.FileDescriptorLimiterAction",
4593 disk_cache::FD_LIMIT_REOPEN_FILE,
4594 kLargeNumEntries + 1);
4595 histogram_tester.ExpectBucketCount("SimpleCache.FileDescriptorLimiterAction",
4596 disk_cache::FD_LIMIT_FAIL_REOPEN_FILE, 0);
4597
4598 for (auto* entry : entries) {
4599 entry->Close();
4600 RunUntilIdle();
4601 }
4602 alt_entry->Close();
4603 RunUntilIdle();
4604
4605 // Closes have to pull things in to write out the footer, but they also
4606 // free up FDs.
4607 histogram_tester.ExpectBucketCount(
4608 "SimpleCache.FileDescriptorLimiterAction",
4609 disk_cache::FD_LIMIT_CLOSE_FILE,
4610 kLargeNumEntries - 64 + 1 + kLargeNumEntries - 1 + 2);
4611 histogram_tester.ExpectBucketCount(
4612 "SimpleCache.FileDescriptorLimiterAction",
4613 disk_cache::FD_LIMIT_REOPEN_FILE,
4614 kLargeNumEntries - 64 + 1 + kLargeNumEntries - 1 + 2);
4615 histogram_tester.ExpectBucketCount("SimpleCache.FileDescriptorLimiterAction",
4616 disk_cache::FD_LIMIT_FAIL_REOPEN_FILE, 0);
4617 }
4618
TEST_F(DiskCacheBackendTest,SparseEvict)4619 TEST_F(DiskCacheBackendTest, SparseEvict) {
4620 const int kMaxSize = 512;
4621
4622 SetMaxSize(kMaxSize);
4623 InitCache();
4624
4625 auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(64);
4626 CacheTestFillBuffer(buffer->data(), 64, false);
4627
4628 disk_cache::Entry* entry0 = nullptr;
4629 ASSERT_THAT(CreateEntry("http://www.0.com/", &entry0), IsOk());
4630
4631 disk_cache::Entry* entry1 = nullptr;
4632 ASSERT_THAT(CreateEntry("http://www.1.com/", &entry1), IsOk());
4633
4634 disk_cache::Entry* entry2 = nullptr;
4635 // This strange looking domain name affects cache trim order
4636 // due to hashing
4637 ASSERT_THAT(CreateEntry("http://www.15360.com/", &entry2), IsOk());
4638
4639 // Write sparse data to put us over the eviction threshold
4640 ASSERT_EQ(64, WriteSparseData(entry0, 0, buffer.get(), 64));
4641 ASSERT_EQ(1, WriteSparseData(entry0, 67108923, buffer.get(), 1));
4642 ASSERT_EQ(1, WriteSparseData(entry1, 53, buffer.get(), 1));
4643 ASSERT_EQ(1, WriteSparseData(entry2, 0, buffer.get(), 1));
4644
4645 // Closing these in a special order should not lead to buggy reentrant
4646 // eviction.
4647 entry1->Close();
4648 entry2->Close();
4649 entry0->Close();
4650 }
4651
TEST_F(DiskCacheBackendTest,InMemorySparseDoom)4652 TEST_F(DiskCacheBackendTest, InMemorySparseDoom) {
4653 const int kMaxSize = 512;
4654
4655 SetMaxSize(kMaxSize);
4656 SetMemoryOnlyMode();
4657 InitCache();
4658
4659 auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(64);
4660 CacheTestFillBuffer(buffer->data(), 64, false);
4661
4662 disk_cache::Entry* entry = nullptr;
4663 ASSERT_THAT(CreateEntry("http://www.0.com/", &entry), IsOk());
4664
4665 ASSERT_EQ(net::ERR_FAILED, WriteSparseData(entry, 4337, buffer.get(), 64));
4666 entry->Close();
4667
4668 // Dooming all entries at this point should properly iterate over
4669 // the parent and its children
4670 DoomAllEntries();
4671 }
4672
TEST_F(DiskCacheBackendTest,BlockFileMaxSizeLimit)4673 TEST_F(DiskCacheBackendTest, BlockFileMaxSizeLimit) {
4674 InitCache();
4675
4676 int64_t size = std::numeric_limits<int32_t>::max();
4677 SetMaxSize(size, true /* should_succeed */);
4678
4679 size += 1;
4680 SetMaxSize(size, false /* should_succeed */);
4681 }
4682
TEST_F(DiskCacheBackendTest,InMemoryMaxSizeLimit)4683 TEST_F(DiskCacheBackendTest, InMemoryMaxSizeLimit) {
4684 SetMemoryOnlyMode();
4685 InitCache();
4686
4687 int64_t size = std::numeric_limits<int32_t>::max();
4688 SetMaxSize(size, true /* should_succeed */);
4689
4690 size += 1;
4691 SetMaxSize(size, false /* should_succeed */);
4692 }
4693
TEST_F(DiskCacheBackendTest,SimpleMaxSizeLimit)4694 TEST_F(DiskCacheBackendTest, SimpleMaxSizeLimit) {
4695 SetSimpleCacheMode();
4696 InitCache();
4697
4698 int64_t size = std::numeric_limits<int32_t>::max();
4699 SetMaxSize(size, true /* should_succeed */);
4700
4701 size += 1;
4702 SetMaxSize(size, true /* should_succeed */);
4703 }
4704
BackendOpenOrCreateEntry()4705 void DiskCacheBackendTest::BackendOpenOrCreateEntry() {
4706 // Avoid the weird kNoRandom flag on blockfile, since this needs to
4707 // test cleanup behavior actually used in production.
4708 if (memory_only_) {
4709 InitCache();
4710 } else {
4711 CleanupCacheDir();
4712 // Since we're not forcing a clean shutdown, integrity check may fail.
4713 DisableIntegrityCheck();
4714 CreateBackend(disk_cache::kNone);
4715 }
4716
4717 // Test that new key is created.
4718 disk_cache::EntryResult es1 = OpenOrCreateEntry("first");
4719 ASSERT_THAT(es1.net_error(), IsOk());
4720 ASSERT_FALSE(es1.opened());
4721 disk_cache::Entry* e1 = es1.ReleaseEntry();
4722 ASSERT_TRUE(nullptr != e1);
4723
4724 // Test that existing key is opened and its entry matches.
4725 disk_cache::EntryResult es2 = OpenOrCreateEntry("first");
4726 ASSERT_THAT(es2.net_error(), IsOk());
4727 ASSERT_TRUE(es2.opened());
4728 disk_cache::Entry* e2 = es2.ReleaseEntry();
4729 ASSERT_TRUE(nullptr != e2);
4730 ASSERT_EQ(e1, e2);
4731
4732 // Test that different keys' entries are not the same.
4733 disk_cache::EntryResult es3 = OpenOrCreateEntry("second");
4734 ASSERT_THAT(es3.net_error(), IsOk());
4735 ASSERT_FALSE(es3.opened());
4736 disk_cache::Entry* e3 = es3.ReleaseEntry();
4737 ASSERT_TRUE(nullptr != e3);
4738 ASSERT_NE(e3, e1);
4739
4740 // Test that a new entry can be created with the same key as a doomed entry.
4741 e3->Doom();
4742 disk_cache::EntryResult es4 = OpenOrCreateEntry("second");
4743 ASSERT_THAT(es4.net_error(), IsOk());
4744 ASSERT_FALSE(es4.opened());
4745 disk_cache::Entry* e4 = es4.ReleaseEntry();
4746 ASSERT_TRUE(nullptr != e4);
4747 ASSERT_NE(e4, e3);
4748
4749 // Verify the expected number of entries
4750 ASSERT_EQ(2, cache_->GetEntryCount());
4751
4752 e1->Close();
4753 e2->Close();
4754 e3->Close();
4755 e4->Close();
4756
4757 // Test proper cancellation of callback. In-memory cache
4758 // is always synchronous, so this isn't' meaningful for it.
4759 if (!memory_only_) {
4760 TestEntryResultCompletionCallback callback;
4761
4762 // Using "first" here:
4763 // 1) It's an existing entry, so SimpleCache can't cheat with an optimistic
4764 // create.
4765 // 2) "second"'s creation is a cheated post-doom create one, which also
4766 // makes testing trickier.
4767 EntryResult result =
4768 cache_->OpenOrCreateEntry("first", net::HIGHEST, callback.callback());
4769 ASSERT_EQ(net::ERR_IO_PENDING, result.net_error());
4770 ResetCaches();
4771
4772 // Callback is supposed to be cancelled, so have to flush everything
4773 // to check for any trouble.
4774 disk_cache::FlushCacheThreadForTesting();
4775 RunUntilIdle();
4776 EXPECT_FALSE(callback.have_result());
4777 }
4778 }
4779
TEST_F(DiskCacheBackendTest,InMemoryOnlyOpenOrCreateEntry)4780 TEST_F(DiskCacheBackendTest, InMemoryOnlyOpenOrCreateEntry) {
4781 SetMemoryOnlyMode();
4782 BackendOpenOrCreateEntry();
4783 }
4784
TEST_F(DiskCacheBackendTest,MAYBE_BlockFileOpenOrCreateEntry)4785 TEST_F(DiskCacheBackendTest, MAYBE_BlockFileOpenOrCreateEntry) {
4786 BackendOpenOrCreateEntry();
4787 }
4788
TEST_F(DiskCacheBackendTest,MAYBE_SimpleOpenOrCreateEntry)4789 TEST_F(DiskCacheBackendTest, MAYBE_SimpleOpenOrCreateEntry) {
4790 SetSimpleCacheMode();
4791 BackendOpenOrCreateEntry();
4792 }
4793
BackendDeadOpenNextEntry()4794 void DiskCacheBackendTest::BackendDeadOpenNextEntry() {
4795 InitCache();
4796 std::unique_ptr<disk_cache::Backend::Iterator> iter =
4797 cache_->CreateIterator();
4798 ResetCaches();
4799 EntryResult result = iter->OpenNextEntry(base::DoNothing());
4800 ASSERT_EQ(net::ERR_FAILED, result.net_error());
4801 }
4802
TEST_F(DiskCacheBackendTest,BlockFileBackendDeadOpenNextEntry)4803 TEST_F(DiskCacheBackendTest, BlockFileBackendDeadOpenNextEntry) {
4804 BackendDeadOpenNextEntry();
4805 }
4806
TEST_F(DiskCacheBackendTest,SimpleBackendDeadOpenNextEntry)4807 TEST_F(DiskCacheBackendTest, SimpleBackendDeadOpenNextEntry) {
4808 SetSimpleCacheMode();
4809 BackendDeadOpenNextEntry();
4810 }
4811
TEST_F(DiskCacheBackendTest,InMemorySimpleBackendDeadOpenNextEntry)4812 TEST_F(DiskCacheBackendTest, InMemorySimpleBackendDeadOpenNextEntry) {
4813 SetMemoryOnlyMode();
4814 BackendDeadOpenNextEntry();
4815 }
4816
BackendIteratorConcurrentDoom()4817 void DiskCacheBackendTest::BackendIteratorConcurrentDoom() {
4818 disk_cache::Entry* entry1 = nullptr;
4819 disk_cache::Entry* entry2 = nullptr;
4820 EXPECT_EQ(net::OK, CreateEntry("Key0", &entry1));
4821 EXPECT_EQ(net::OK, CreateEntry("Key1", &entry2));
4822
4823 std::unique_ptr<disk_cache::Backend::Iterator> iter =
4824 cache_->CreateIterator();
4825
4826 disk_cache::Entry* entry3 = nullptr;
4827 EXPECT_EQ(net::OK, OpenEntry("Key0", &entry3));
4828
4829 TestEntryResultCompletionCallback cb;
4830 EntryResult result_iter = iter->OpenNextEntry(cb.callback());
4831 result_iter = cb.GetResult(std::move(result_iter));
4832 EXPECT_EQ(net::OK, result_iter.net_error());
4833
4834 net::TestCompletionCallback cb_doom;
4835 int rv_doom = cache_->DoomAllEntries(cb_doom.callback());
4836 EXPECT_EQ(net::OK, cb_doom.GetResult(rv_doom));
4837
4838 TestEntryResultCompletionCallback cb2;
4839 EntryResult result_iter2 = iter->OpenNextEntry(cb2.callback());
4840 result_iter2 = cb2.GetResult(std::move(result_iter2));
4841
4842 EXPECT_TRUE(result_iter2.net_error() == net::ERR_FAILED ||
4843 result_iter2.net_error() == net::OK);
4844
4845 entry1->Close();
4846 entry2->Close();
4847 entry3->Close();
4848 }
4849
TEST_F(DiskCacheBackendTest,BlockFileIteratorConcurrentDoom)4850 TEST_F(DiskCacheBackendTest, BlockFileIteratorConcurrentDoom) {
4851 // Init in normal mode, bug not reproducible with kNoRandom. Still need to
4852 // let the test fixture know the new eviction algorithm will be on.
4853 CleanupCacheDir();
4854 SetNewEviction();
4855 CreateBackend(disk_cache::kNone);
4856 BackendIteratorConcurrentDoom();
4857 }
4858
TEST_F(DiskCacheBackendTest,SimpleIteratorConcurrentDoom)4859 TEST_F(DiskCacheBackendTest, SimpleIteratorConcurrentDoom) {
4860 SetSimpleCacheMode();
4861 InitCache();
4862 BackendIteratorConcurrentDoom();
4863 }
4864
TEST_F(DiskCacheBackendTest,InMemoryConcurrentDoom)4865 TEST_F(DiskCacheBackendTest, InMemoryConcurrentDoom) {
4866 SetMemoryOnlyMode();
4867 InitCache();
4868 BackendIteratorConcurrentDoom();
4869 }
4870
TEST_F(DiskCacheBackendTest,EmptyCorruptSimpleCacheRecovery)4871 TEST_F(DiskCacheBackendTest, EmptyCorruptSimpleCacheRecovery) {
4872 SetSimpleCacheMode();
4873
4874 const std::string kCorruptData("corrupted");
4875
4876 // Create a corrupt fake index in an otherwise empty simple cache.
4877 ASSERT_TRUE(base::PathExists(cache_path_));
4878 const base::FilePath index = cache_path_.AppendASCII("index");
4879 ASSERT_TRUE(base::WriteFile(index, kCorruptData));
4880
4881 TestBackendResultCompletionCallback cb;
4882
4883 // Simple cache should be able to recover.
4884 disk_cache::BackendResult rv = disk_cache::CreateCacheBackend(
4885 net::APP_CACHE, net::CACHE_BACKEND_SIMPLE, /*file_operations=*/nullptr,
4886 cache_path_, 0, disk_cache::ResetHandling::kNeverReset,
4887 /*net_log=*/nullptr, cb.callback());
4888 rv = cb.GetResult(std::move(rv));
4889 EXPECT_THAT(rv.net_error, IsOk());
4890 }
4891
TEST_F(DiskCacheBackendTest,MAYBE_NonEmptyCorruptSimpleCacheDoesNotRecover)4892 TEST_F(DiskCacheBackendTest, MAYBE_NonEmptyCorruptSimpleCacheDoesNotRecover) {
4893 SetSimpleCacheMode();
4894 BackendOpenOrCreateEntry();
4895
4896 const std::string kCorruptData("corrupted");
4897
4898 // Corrupt the fake index file for the populated simple cache.
4899 ASSERT_TRUE(base::PathExists(cache_path_));
4900 const base::FilePath index = cache_path_.AppendASCII("index");
4901 ASSERT_TRUE(base::WriteFile(index, kCorruptData));
4902
4903 TestBackendResultCompletionCallback cb;
4904
4905 // Simple cache should not be able to recover when there are entry files.
4906 disk_cache::BackendResult rv = disk_cache::CreateCacheBackend(
4907 net::APP_CACHE, net::CACHE_BACKEND_SIMPLE, /*file_operations=*/nullptr,
4908 cache_path_, 0, disk_cache::ResetHandling::kNeverReset,
4909 /*net_log=*/nullptr, cb.callback());
4910 rv = cb.GetResult(std::move(rv));
4911 EXPECT_THAT(rv.net_error, IsError(net::ERR_FAILED));
4912 }
4913
TEST_F(DiskCacheBackendTest,SimpleOwnershipTransferBackendDestroyRace)4914 TEST_F(DiskCacheBackendTest, SimpleOwnershipTransferBackendDestroyRace) {
4915 struct CleanupContext {
4916 explicit CleanupContext(bool* ran_ptr) : ran_ptr(ran_ptr) {}
4917 ~CleanupContext() {
4918 *ran_ptr = true;
4919 }
4920
4921 raw_ptr<bool> ran_ptr;
4922 };
4923
4924 const char kKey[] = "skeleton";
4925
4926 // This test was for a fix for see https://crbug.com/946349, but the mechanics
4927 // of that failure became impossible after a follow up API refactor. Still,
4928 // the timing is strange, and warrant coverage; in particular this tests what
4929 // happen if the SimpleBackendImpl is destroyed after SimpleEntryImpl
4930 // decides to return an entry to the caller, but before the callback is run.
4931 SetSimpleCacheMode();
4932 InitCache();
4933
4934 disk_cache::Entry* entry = nullptr;
4935 ASSERT_THAT(CreateEntry(kKey, &entry), IsOk());
4936 // Make sure create actually succeeds, not just optimistically.
4937 RunUntilIdle();
4938
4939 bool cleanup_context_ran = false;
4940 auto cleanup_context = std::make_unique<CleanupContext>(&cleanup_context_ran);
4941
4942 // The OpenEntry code below will find a pre-existing entry in a READY state,
4943 // so it will immediately post a task to return a result. Destroying the
4944 // backend before running the event loop again will run that callback in the
4945 // dead-backend state, while OpenEntry completion was still with it alive.
4946
4947 EntryResult result = cache_->OpenEntry(
4948 kKey, net::HIGHEST,
4949 base::BindOnce(
4950 [](std::unique_ptr<CleanupContext>, EntryResult result) {
4951 // The callback is here for ownership of CleanupContext,
4952 // and it shouldn't get invoked in this test. Normal
4953 // one would transfer result.entry to CleanupContext.
4954 ADD_FAILURE() << "This should not actually run";
4955
4956 // ... but if it ran, it also shouldn't see the pointer.
4957 EXPECT_EQ(nullptr, result.ReleaseEntry());
4958 },
4959 std::move(cleanup_context)));
4960 EXPECT_EQ(net::ERR_IO_PENDING, result.net_error());
4961 ResetCaches();
4962
4963 // Give CleanupContext a chance to do its thing.
4964 RunUntilIdle();
4965 EXPECT_TRUE(cleanup_context_ran);
4966
4967 entry->Close();
4968 }
4969
4970 // Verify that reloading the cache will preserve indices in kNeverReset mode.
TEST_F(DiskCacheBackendTest,SimpleCacheSoftResetKeepsValues)4971 TEST_F(DiskCacheBackendTest, SimpleCacheSoftResetKeepsValues) {
4972 SetSimpleCacheMode();
4973 SetCacheType(net::APP_CACHE);
4974 DisableFirstCleanup();
4975 CleanupCacheDir();
4976
4977 { // Do the initial cache creation then delete the values.
4978 TestBackendResultCompletionCallback cb;
4979
4980 // Create an initial back-end and wait for indexing
4981 disk_cache::BackendResult rv = disk_cache::CreateCacheBackend(
4982 net::APP_CACHE, net::CACHE_BACKEND_SIMPLE, /*file_operations=*/nullptr,
4983 cache_path_, 0, disk_cache::ResetHandling::kNeverReset,
4984 /*net_log=*/nullptr, cb.callback());
4985 rv = cb.GetResult(std::move(rv));
4986 EXPECT_THAT(rv.net_error, IsOk());
4987 std::unique_ptr<disk_cache::Backend> cache = std::move(rv.backend);
4988 ASSERT_TRUE(cache.get());
4989 WaitForSimpleCacheIndexAndCheck(cache.get());
4990
4991 // Create an entry in the cache
4992 CreateKeyAndCheck(cache.get(), "key");
4993 }
4994
4995 RunUntilIdle();
4996
4997 { // Do the second cache creation with no reset flag, preserving entries.
4998 TestBackendResultCompletionCallback cb;
4999
5000 disk_cache::BackendResult rv = disk_cache::CreateCacheBackend(
5001 net::APP_CACHE, net::CACHE_BACKEND_SIMPLE, /*file_operations=*/nullptr,
5002 cache_path_, 0, disk_cache::ResetHandling::kNeverReset,
5003 /*net_log=*/nullptr, cb.callback());
5004 rv = cb.GetResult(std::move(rv));
5005 EXPECT_THAT(rv.net_error, IsOk());
5006 std::unique_ptr<disk_cache::Backend> cache = std::move(rv.backend);
5007 ASSERT_TRUE(cache.get());
5008 WaitForSimpleCacheIndexAndCheck(cache.get());
5009
5010 // The entry should be present, as a forced reset was not called for.
5011 EXPECT_TRUE(static_cast<disk_cache::SimpleBackendImpl*>(cache.get())
5012 ->index()
5013 ->Has(disk_cache::simple_util::GetEntryHashKey("key")));
5014 }
5015 }
5016
5017 // Verify that reloading the cache will not preserve indices in Reset mode.
TEST_F(DiskCacheBackendTest,SimpleCacheHardResetDropsValues)5018 TEST_F(DiskCacheBackendTest, SimpleCacheHardResetDropsValues) {
5019 SetSimpleCacheMode();
5020 SetCacheType(net::APP_CACHE);
5021 DisableFirstCleanup();
5022 CleanupCacheDir();
5023
5024 { // Create the initial back-end.
5025 TestBackendResultCompletionCallback cb;
5026
5027 disk_cache::BackendResult rv = disk_cache::CreateCacheBackend(
5028 net::APP_CACHE, net::CACHE_BACKEND_SIMPLE, /*file_operations=*/nullptr,
5029 cache_path_, 0, disk_cache::ResetHandling::kNeverReset,
5030 /*net_log=*/nullptr, cb.callback());
5031 rv = cb.GetResult(std::move(rv));
5032 EXPECT_THAT(rv.net_error, IsOk());
5033 std::unique_ptr<disk_cache::Backend> cache = std::move(rv.backend);
5034 ASSERT_TRUE(cache.get());
5035 WaitForSimpleCacheIndexAndCheck(cache.get());
5036
5037 // Create an entry in the cache.
5038 CreateKeyAndCheck(cache.get(), "key");
5039 }
5040
5041 RunUntilIdle();
5042
5043 { // Re-load cache with a reset flag, which should ignore existing entries.
5044 TestBackendResultCompletionCallback cb;
5045
5046 disk_cache::BackendResult rv = disk_cache::CreateCacheBackend(
5047 net::APP_CACHE, net::CACHE_BACKEND_SIMPLE, /*file_operations=*/nullptr,
5048 cache_path_, 0, disk_cache::ResetHandling::kReset, /*net_log=*/nullptr,
5049 cb.callback());
5050 rv = cb.GetResult(std::move(rv));
5051 EXPECT_THAT(rv.net_error, IsOk());
5052 std::unique_ptr<disk_cache::Backend> cache = std::move(rv.backend);
5053 ASSERT_TRUE(cache.get());
5054 WaitForSimpleCacheIndexAndCheck(cache.get());
5055
5056 // The entry shouldn't be present, as a forced reset was called for.
5057 EXPECT_FALSE(static_cast<disk_cache::SimpleBackendImpl*>(cache.get())
5058 ->index()
5059 ->Has(disk_cache::simple_util::GetEntryHashKey("key")));
5060
5061 // Add the entry back in the cache, then make sure it's present.
5062 CreateKeyAndCheck(cache.get(), "key");
5063
5064 EXPECT_TRUE(static_cast<disk_cache::SimpleBackendImpl*>(cache.get())
5065 ->index()
5066 ->Has(disk_cache::simple_util::GetEntryHashKey("key")));
5067 }
5068 }
5069
5070 // Test to make sure cancelation of backend operation that got queued after
5071 // a pending doom on backend destruction happens properly.
TEST_F(DiskCacheBackendTest,SimpleCancelOpPendingDoom)5072 TEST_F(DiskCacheBackendTest, SimpleCancelOpPendingDoom) {
5073 struct CleanupContext {
5074 explicit CleanupContext(bool* ran_ptr) : ran_ptr(ran_ptr) {}
5075 ~CleanupContext() { *ran_ptr = true; }
5076
5077 raw_ptr<bool> ran_ptr;
5078 };
5079
5080 const char kKey[] = "skeleton";
5081
5082 // Disable optimistic ops.
5083 SetCacheType(net::APP_CACHE);
5084 SetSimpleCacheMode();
5085 InitCache();
5086
5087 disk_cache::Entry* entry = nullptr;
5088 ASSERT_THAT(CreateEntry(kKey, &entry), IsOk());
5089 entry->Close();
5090
5091 // Queue doom.
5092 cache_->DoomEntry(kKey, net::LOWEST, base::DoNothing());
5093
5094 // Queue create after it.
5095 bool cleanup_context_ran = false;
5096 auto cleanup_context = std::make_unique<CleanupContext>(&cleanup_context_ran);
5097
5098 EntryResult entry_result = cache_->CreateEntry(
5099 kKey, net::HIGHEST,
5100 base::BindOnce(
5101 [](std::unique_ptr<CleanupContext>, EntryResult result) {
5102 ADD_FAILURE() << "This should not actually run";
5103 },
5104 std::move(cleanup_context)));
5105
5106 EXPECT_EQ(net::ERR_IO_PENDING, entry_result.net_error());
5107 ResetCaches();
5108
5109 RunUntilIdle();
5110 EXPECT_TRUE(cleanup_context_ran);
5111 }
5112
TEST_F(DiskCacheBackendTest,SimpleDontLeakPostDoomCreate)5113 TEST_F(DiskCacheBackendTest, SimpleDontLeakPostDoomCreate) {
5114 // If an entry has been optimistically created after a pending doom, and the
5115 // backend destroyed before the doom completed, the entry would get wedged,
5116 // with no operations on it workable and entry leaked.
5117 // (See https://crbug.com/1015774).
5118 const char kKey[] = "for_lock";
5119 const int kBufSize = 2 * 1024;
5120 auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(kBufSize);
5121 CacheTestFillBuffer(buffer->data(), kBufSize, true);
5122
5123 SetSimpleCacheMode();
5124 InitCache();
5125
5126 disk_cache::Entry* entry = nullptr;
5127 ASSERT_THAT(CreateEntry(kKey, &entry), IsOk());
5128 entry->Close();
5129
5130 // Make sure create actually succeeds, not just optimistically.
5131 RunUntilIdle();
5132
5133 // Queue doom.
5134 int rv = cache_->DoomEntry(kKey, net::LOWEST, base::DoNothing());
5135 ASSERT_EQ(net::ERR_IO_PENDING, rv);
5136
5137 // And then do a create. This actually succeeds optimistically.
5138 EntryResult result =
5139 cache_->CreateEntry(kKey, net::LOWEST, base::DoNothing());
5140 ASSERT_EQ(net::OK, result.net_error());
5141 entry = result.ReleaseEntry();
5142
5143 ResetCaches();
5144
5145 // Entry is still supposed to be operable. This part is needed to see the bug
5146 // without a leak checker.
5147 EXPECT_EQ(kBufSize, WriteData(entry, 1, 0, buffer.get(), kBufSize, false));
5148
5149 entry->Close();
5150
5151 // Should not have leaked files here.
5152 }
5153
TEST_F(DiskCacheBackendTest,BlockFileDelayedWriteFailureRecovery)5154 TEST_F(DiskCacheBackendTest, BlockFileDelayedWriteFailureRecovery) {
5155 // Test that blockfile recovers appropriately when some entries are
5156 // in a screwed up state due to an error in delayed writeback.
5157 //
5158 // https://crbug.com/1086727
5159 InitCache();
5160
5161 const char kKey[] = "Key2";
5162 disk_cache::Entry* entry = nullptr;
5163 ASSERT_THAT(CreateEntry(kKey, &entry), IsOk());
5164
5165 const int kBufSize = 24320;
5166 auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(kBufSize);
5167 CacheTestFillBuffer(buffer->data(), kBufSize, true);
5168
5169 ASSERT_EQ(kBufSize, WriteSparseData(entry, 0, buffer.get(), kBufSize));
5170
5171 // Setting the size limit artificially low injects a failure on writing back
5172 // data buffered above.
5173 SetMaxSize(4096);
5174
5175 // This causes SparseControl to close the child entry corresponding to
5176 // low portion of offset space, triggering the writeback --- which fails
5177 // due to the space cap, and in particular fails to allocate data for
5178 // a stream, so it gets address 0.
5179 ASSERT_EQ(net::ERR_FAILED, WriteSparseData(entry, 16773118, buffer.get(), 4));
5180
5181 // Now try reading the broken child. This should report an error, not
5182 // DCHECK.
5183 ASSERT_EQ(net::ERR_FAILED, ReadSparseData(entry, 4, buffer.get(), 4));
5184
5185 entry->Close();
5186 }
5187
TEST_F(DiskCacheBackendTest,BlockFileInsertAliasing)5188 TEST_F(DiskCacheBackendTest, BlockFileInsertAliasing) {
5189 // Test for not having rankings corruption due to aliasing between iterator
5190 // and other ranking list copies during insertion operations.
5191 //
5192 // https://crbug.com/1156288
5193
5194 // Need to disable weird extra sync behavior to hit the bug.
5195 CreateBackend(disk_cache::kNone);
5196 SetNewEviction(); // default, but integrity check doesn't realize that.
5197
5198 const char kKey[] = "Key0";
5199 const char kKeyA[] = "KeyAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA41";
5200 disk_cache::Entry* entry = nullptr;
5201 ASSERT_THAT(CreateEntry(kKey, &entry), IsOk());
5202
5203 const int kBufSize = 61188;
5204 auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(kBufSize);
5205 CacheTestFillBuffer(buffer->data(), kBufSize, true);
5206
5207 net::TestCompletionCallback cb_write64;
5208 EXPECT_EQ(net::ERR_IO_PENDING,
5209 entry->WriteSparseData(8, buffer.get(), 64, cb_write64.callback()));
5210
5211 net::TestCompletionCallback cb_write61k;
5212 EXPECT_EQ(net::ERR_IO_PENDING,
5213 entry->WriteSparseData(16773118, buffer.get(), 61188,
5214 cb_write61k.callback()));
5215
5216 EXPECT_EQ(64, cb_write64.WaitForResult());
5217 EXPECT_EQ(61188, cb_write61k.WaitForResult());
5218
5219 EXPECT_EQ(4128, WriteSparseData(entry, 2147479550, buffer.get(), 4128));
5220
5221 std::unique_ptr<TestIterator> iter = CreateIterator();
5222 EXPECT_EQ(4128, WriteSparseData(entry, 2147479550, buffer.get(), 4128));
5223 EXPECT_EQ(64, WriteSparseData(entry, 8, buffer.get(), 64));
5224
5225 disk_cache::Entry* itEntry1 = nullptr;
5226 ASSERT_EQ(net::OK, iter->OpenNextEntry(&itEntry1));
5227 // These are actually child nodes for range.
5228
5229 entry->Close();
5230
5231 disk_cache::Entry* itEntry2 = nullptr;
5232 ASSERT_EQ(net::OK, iter->OpenNextEntry(&itEntry2));
5233
5234 net::TestCompletionCallback doom_cb;
5235 EXPECT_EQ(net::ERR_IO_PENDING, cache_->DoomAllEntries(doom_cb.callback()));
5236
5237 TestEntryResultCompletionCallback cb_create1;
5238 disk_cache::EntryResult result =
5239 cache_->CreateEntry(kKey, net::HIGHEST, cb_create1.callback());
5240 EXPECT_EQ(net::OK, doom_cb.WaitForResult());
5241 result = cb_create1.WaitForResult();
5242 EXPECT_EQ(net::OK, result.net_error());
5243 entry = result.ReleaseEntry();
5244
5245 disk_cache::Entry* entryA = nullptr;
5246 ASSERT_THAT(CreateEntry(kKeyA, &entryA), IsOk());
5247 entryA->Close();
5248
5249 disk_cache::Entry* itEntry3 = nullptr;
5250 EXPECT_EQ(net::OK, iter->OpenNextEntry(&itEntry3));
5251
5252 EXPECT_EQ(net::OK, DoomEntry(kKeyA));
5253 itEntry1->Close();
5254 entry->Close();
5255 itEntry2->Close();
5256 if (itEntry3)
5257 itEntry3->Close();
5258 }
5259
TEST_F(DiskCacheBackendTest,MemCacheBackwardsClock)5260 TEST_F(DiskCacheBackendTest, MemCacheBackwardsClock) {
5261 // Test to make sure that wall clock going backwards is tolerated.
5262
5263 base::SimpleTestClock clock;
5264 clock.SetNow(base::Time::Now());
5265
5266 SetMemoryOnlyMode();
5267 InitCache();
5268 mem_cache_->SetClockForTesting(&clock);
5269
5270 const int kBufSize = 4 * 1024;
5271 auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(kBufSize);
5272 CacheTestFillBuffer(buffer->data(), kBufSize, true);
5273
5274 disk_cache::Entry* entry = nullptr;
5275 ASSERT_THAT(CreateEntry("key1", &entry), IsOk());
5276 EXPECT_EQ(kBufSize, WriteData(entry, 0, 0, buffer.get(), kBufSize, false));
5277 entry->Close();
5278
5279 clock.Advance(-base::Hours(1));
5280
5281 ASSERT_THAT(CreateEntry("key2", &entry), IsOk());
5282 EXPECT_EQ(kBufSize, WriteData(entry, 0, 0, buffer.get(), kBufSize, false));
5283 entry->Close();
5284
5285 EXPECT_LE(2 * kBufSize,
5286 CalculateSizeOfEntriesBetween(base::Time(), base::Time::Max()));
5287 EXPECT_EQ(net::OK, DoomEntriesBetween(base::Time(), base::Time::Max()));
5288 EXPECT_EQ(0, CalculateSizeOfEntriesBetween(base::Time(), base::Time::Max()));
5289 EXPECT_EQ(0, CalculateSizeOfAllEntries());
5290
5291 mem_cache_->SetClockForTesting(nullptr);
5292 }
5293
TEST_F(DiskCacheBackendTest,SimpleOpenOrCreateIndexError)5294 TEST_F(DiskCacheBackendTest, SimpleOpenOrCreateIndexError) {
5295 // Exercise behavior of OpenOrCreateEntry in SimpleCache where the index
5296 // incorrectly claims the entry is missing. Regression test for
5297 // https://crbug.com/1316034
5298 const char kKey[] = "http://example.org";
5299
5300 const int kBufSize = 256;
5301 auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(kBufSize);
5302 CacheTestFillBuffer(buffer->data(), kBufSize, /*no_nulls=*/false);
5303
5304 SetSimpleCacheMode();
5305 InitCache();
5306
5307 // Create an entry.
5308 disk_cache::Entry* entry = nullptr;
5309 ASSERT_THAT(CreateEntry(kKey, &entry), IsOk());
5310
5311 EXPECT_EQ(kBufSize, WriteData(entry, /*index=*/1, /*offset=*/0, buffer.get(),
5312 /*len=*/kBufSize, /*truncate=*/false));
5313 entry->Close();
5314
5315 // Mess up the index to say it's not there.
5316 simple_cache_impl_->index()->Remove(
5317 disk_cache::simple_util::GetEntryHashKey(kKey));
5318
5319 // Reopening with OpenOrCreateEntry should still work.
5320 disk_cache::EntryResult result = OpenOrCreateEntry(kKey);
5321 ASSERT_THAT(result.net_error(), IsOk());
5322 ASSERT_TRUE(result.opened());
5323 entry = result.ReleaseEntry();
5324 EXPECT_EQ(kBufSize, entry->GetDataSize(/*index=*/1));
5325 entry->Close();
5326 }
5327
TEST_F(DiskCacheBackendTest,SimpleOpenOrCreateIndexErrorOptimistic)5328 TEST_F(DiskCacheBackendTest, SimpleOpenOrCreateIndexErrorOptimistic) {
5329 // Exercise behavior of OpenOrCreateEntry in SimpleCache where the index
5330 // incorrectly claims the entry is missing and we do an optimistic create.
5331 // Covers a codepath adjacent to the one that caused https://crbug.com/1316034
5332 const char kKey[] = "http://example.org";
5333
5334 SetSimpleCacheMode();
5335 InitCache();
5336
5337 const int kBufSize = 256;
5338 auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(kBufSize);
5339 CacheTestFillBuffer(buffer->data(), kBufSize, /*no_nulls=*/false);
5340
5341 // Create an entry.
5342 disk_cache::Entry* entry = nullptr;
5343 ASSERT_THAT(CreateEntry(kKey, &entry), IsOk());
5344 EXPECT_EQ(kBufSize, WriteData(entry, /*index=*/1, /*offset=*/0, buffer.get(),
5345 /*len=*/kBufSize, /*truncate=*/false));
5346 entry->Close();
5347
5348 // Let all the I/O finish, so that OpenOrCreateEntry can try optimistic path.
5349 RunUntilIdle();
5350
5351 // Mess up the index to say it's not there.
5352 simple_cache_impl_->index()->Remove(
5353 disk_cache::simple_util::GetEntryHashKey(kKey));
5354
5355 // Reopening with OpenOrCreateEntry should still work, but since the backend
5356 // chose to be optimistic based on index, the result should be a fresh empty
5357 // entry.
5358 disk_cache::EntryResult result = OpenOrCreateEntry(kKey);
5359 ASSERT_THAT(result.net_error(), IsOk());
5360 ASSERT_FALSE(result.opened());
5361 entry = result.ReleaseEntry();
5362 EXPECT_EQ(0, entry->GetDataSize(/*index=*/1));
5363 entry->Close();
5364 }
5365
TEST_F(DiskCacheBackendTest,SimpleDoomAfterBackendDestruction)5366 TEST_F(DiskCacheBackendTest, SimpleDoomAfterBackendDestruction) {
5367 // Test for when validating file headers/footers during close on simple
5368 // backend fails. To get the header to be checked on close, there needs to be
5369 // a stream 2, since 0/1 are validated on open, and no other operation must
5370 // have happened to stream 2, since those will force it, too. A way of getting
5371 // the validation to fail is to perform a doom on the file after the backend
5372 // is destroyed, since that will truncated the files to mark them invalid. See
5373 // https://crbug.com/1317884
5374 const char kKey[] = "Key0";
5375
5376 const int kBufSize = 256;
5377 auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(kBufSize);
5378 CacheTestFillBuffer(buffer->data(), kBufSize, /*no_nulls=*/false);
5379
5380 SetCacheType(net::SHADER_CACHE);
5381 SetSimpleCacheMode();
5382
5383 InitCache();
5384 disk_cache::Entry* entry = nullptr;
5385 ASSERT_THAT(CreateEntry(kKey, &entry), IsOk());
5386
5387 EXPECT_EQ(0, WriteData(entry, /*index=*/2, /*offset=*/1, buffer.get(),
5388 /*len=*/0, /*truncate=*/false));
5389 entry->Close();
5390
5391 ASSERT_THAT(OpenEntry(kKey, &entry), IsOk());
5392 ResetCaches();
5393
5394 entry->Doom();
5395 entry->Close();
5396 }
5397
BackendValidateMigrated()5398 void DiskCacheBackendTest::BackendValidateMigrated() {
5399 // Blockfile 3.0 migration test.
5400 DisableFirstCleanup(); // started from copied dir, not cleaned dir.
5401 InitCache();
5402
5403 // The total size comes straight from the headers, and is expected to be 1258
5404 // for either set of testdata.
5405 EXPECT_EQ(1258, CalculateSizeOfAllEntries());
5406 EXPECT_EQ(1, cache_->GetEntryCount());
5407
5408 disk_cache::Entry* entry = nullptr;
5409 ASSERT_THAT(OpenEntry("https://example.org/data", &entry), IsOk());
5410
5411 // Size of the actual payload.
5412 EXPECT_EQ(1234, entry->GetDataSize(1));
5413
5414 entry->Close();
5415 }
5416
TEST_F(DiskCacheBackendTest,BlockfileMigrate20)5417 TEST_F(DiskCacheBackendTest, BlockfileMigrate20) {
5418 ASSERT_TRUE(CopyTestCache("good_2_0"));
5419 BackendValidateMigrated();
5420 }
5421
TEST_F(DiskCacheBackendTest,BlockfileMigrate21)5422 TEST_F(DiskCacheBackendTest, BlockfileMigrate21) {
5423 ASSERT_TRUE(CopyTestCache("good_2_1"));
5424 BackendValidateMigrated();
5425 }
5426
TEST_F(DiskCacheBackendTest,BlockfileMigrateNewEviction20)5427 TEST_F(DiskCacheBackendTest, BlockfileMigrateNewEviction20) {
5428 ASSERT_TRUE(CopyTestCache("good_2_0"));
5429 SetNewEviction();
5430 BackendValidateMigrated();
5431 }
5432
TEST_F(DiskCacheBackendTest,BlockfileMigrateNewEviction21)5433 TEST_F(DiskCacheBackendTest, BlockfileMigrateNewEviction21) {
5434 ASSERT_TRUE(CopyTestCache("good_2_1"));
5435 SetNewEviction();
5436 BackendValidateMigrated();
5437 }
5438
5439 // Disabled on android since this test requires cache creator to create
5440 // blockfile caches, and we don't use them on Android anyway.
5441 #if !BUILDFLAG(IS_ANDROID)
TEST_F(DiskCacheBackendTest,BlockfileEmptyIndex)5442 TEST_F(DiskCacheBackendTest, BlockfileEmptyIndex) {
5443 // Regression case for https://crbug.com/1441330 --- blockfile DCHECKing
5444 // on mmap error for files it uses.
5445
5446 // Create a cache.
5447 TestBackendResultCompletionCallback cb;
5448 disk_cache::BackendResult rv = disk_cache::CreateCacheBackend(
5449 net::DISK_CACHE, net::CACHE_BACKEND_BLOCKFILE,
5450 /*file_operations=*/nullptr, cache_path_, 0,
5451 disk_cache::ResetHandling::kNeverReset, nullptr, cb.callback());
5452 rv = cb.GetResult(std::move(rv));
5453 ASSERT_THAT(rv.net_error, IsOk());
5454 ASSERT_TRUE(rv.backend);
5455 rv.backend.reset();
5456
5457 // Make sure it's done doing I/O stuff.
5458 disk_cache::BackendImpl::FlushForTesting();
5459
5460 // Truncate the index to zero bytes.
5461 base::File index(cache_path_.AppendASCII("index"),
5462 base::File::FLAG_OPEN | base::File::FLAG_WRITE);
5463 ASSERT_TRUE(index.IsValid());
5464 ASSERT_TRUE(index.SetLength(0));
5465 index.Close();
5466
5467 // Open the backend again. Fails w/o error-recovery.
5468 rv = disk_cache::CreateCacheBackend(
5469 net::DISK_CACHE, net::CACHE_BACKEND_BLOCKFILE,
5470 /*file_operations=*/nullptr, cache_path_, 0,
5471 disk_cache::ResetHandling::kNeverReset, nullptr, cb.callback());
5472 rv = cb.GetResult(std::move(rv));
5473 EXPECT_EQ(rv.net_error, net::ERR_FAILED);
5474 EXPECT_FALSE(rv.backend);
5475
5476 // Now try again with the "delete and start over on error" flag people
5477 // normally use.
5478 rv = disk_cache::CreateCacheBackend(
5479 net::DISK_CACHE, net::CACHE_BACKEND_BLOCKFILE,
5480 /*file_operations=*/nullptr, cache_path_, 0,
5481 disk_cache::ResetHandling::kResetOnError, nullptr, cb.callback());
5482 rv = cb.GetResult(std::move(rv));
5483 ASSERT_THAT(rv.net_error, IsOk());
5484 ASSERT_TRUE(rv.backend);
5485 }
5486 #endif
5487
5488 // See https://crbug.com/1486958
TEST_F(DiskCacheBackendTest,SimpleDoomIter)5489 TEST_F(DiskCacheBackendTest, SimpleDoomIter) {
5490 const int kEntries = 1000;
5491
5492 SetSimpleCacheMode();
5493 // Note: this test relies on InitCache() making sure the index is ready.
5494 InitCache();
5495
5496 // We create a whole bunch of entries so that deleting them will hopefully
5497 // finish after the iteration, in order to reproduce timing for the bug.
5498 for (int i = 0; i < kEntries; ++i) {
5499 disk_cache::Entry* entry = nullptr;
5500 ASSERT_THAT(CreateEntry(base::NumberToString(i), &entry), IsOk());
5501 entry->Close();
5502 }
5503 RunUntilIdle(); // Make sure close completes.
5504
5505 auto iterator = cache_->CreateIterator();
5506 base::RunLoop run_loop;
5507
5508 disk_cache::EntryResult result = iterator->OpenNextEntry(
5509 base::BindLambdaForTesting([&](disk_cache::EntryResult result) {
5510 ASSERT_EQ(result.net_error(), net::OK);
5511 disk_cache::Entry* entry = result.ReleaseEntry();
5512 entry->Doom();
5513 entry->Close();
5514 run_loop.Quit();
5515 }));
5516 ASSERT_EQ(result.net_error(), net::ERR_IO_PENDING);
5517 cache_->DoomAllEntries(base::DoNothing());
5518 run_loop.Run();
5519 }
5520
5521 // See https://crbug.com/1486958
TEST_F(DiskCacheBackendTest,SimpleOpenIter)5522 TEST_F(DiskCacheBackendTest, SimpleOpenIter) {
5523 constexpr int kEntries = 50;
5524
5525 SetSimpleCacheMode();
5526 // Note: this test relies on InitCache() making sure the index is ready.
5527 InitCache();
5528
5529 // We create a whole bunch of entries so that deleting them will hopefully
5530 // finish after the iteration, in order to reproduce timing for the bug.
5531 for (int i = 0; i < kEntries; ++i) {
5532 disk_cache::Entry* entry = nullptr;
5533 ASSERT_THAT(CreateEntry(base::NumberToString(i), &entry), IsOk());
5534 entry->Close();
5535 }
5536 RunUntilIdle(); // Make sure close completes.
5537 EXPECT_EQ(kEntries, cache_->GetEntryCount());
5538
5539 // Iterate once to get the order.
5540 base::queue<std::string> keys;
5541 auto iterator = cache_->CreateIterator();
5542 base::RunLoop run_loop;
5543 base::RepeatingCallback<void(EntryResult)> collect_entry_key =
5544 base::BindLambdaForTesting([&](disk_cache::EntryResult result) {
5545 if (result.net_error() == net::ERR_FAILED) {
5546 run_loop.Quit();
5547 return; // iteration complete.
5548 }
5549 ASSERT_EQ(result.net_error(), net::OK);
5550 disk_cache::Entry* entry = result.ReleaseEntry();
5551 keys.push(entry->GetKey());
5552 entry->Close();
5553 result = iterator->OpenNextEntry(collect_entry_key);
5554 EXPECT_EQ(result.net_error(), net::ERR_IO_PENDING);
5555 });
5556
5557 disk_cache::EntryResult result = iterator->OpenNextEntry(collect_entry_key);
5558 ASSERT_EQ(result.net_error(), net::ERR_IO_PENDING);
5559 run_loop.Run();
5560
5561 // Open all entries with iterator...
5562 int opened = 0;
5563 int iter_opened = 0;
5564 bool iter_done = false;
5565 auto all_done = [&]() { return opened == kEntries && iter_done; };
5566
5567 iterator = cache_->CreateIterator();
5568 base::RunLoop run_loop2;
5569 base::RepeatingCallback<void(EntryResult)> handle_entry =
5570 base::BindLambdaForTesting([&](disk_cache::EntryResult result) {
5571 ++iter_opened;
5572 if (result.net_error() == net::ERR_FAILED) {
5573 EXPECT_EQ(iter_opened - 1, kEntries);
5574 iter_done = true;
5575 if (all_done()) {
5576 run_loop2.Quit();
5577 }
5578 return; // iteration complete.
5579 }
5580 EXPECT_EQ(result.net_error(), net::OK);
5581 result = iterator->OpenNextEntry(handle_entry);
5582 EXPECT_EQ(result.net_error(), net::ERR_IO_PENDING);
5583 });
5584
5585 result = iterator->OpenNextEntry(handle_entry);
5586 ASSERT_EQ(result.net_error(), net::ERR_IO_PENDING);
5587
5588 // ... while simultaneously opening them via name.
5589 auto handle_open_result =
5590 base::BindLambdaForTesting([&](disk_cache::EntryResult result) {
5591 if (result.net_error() == net::OK) {
5592 ++opened;
5593 }
5594 if (all_done()) {
5595 run_loop2.Quit();
5596 }
5597 });
5598
5599 base::RepeatingClosure open_one_entry = base::BindLambdaForTesting([&]() {
5600 std::string key = keys.front();
5601 keys.pop();
5602 disk_cache::EntryResult result =
5603 cache_->OpenEntry(key, net::DEFAULT_PRIORITY, handle_open_result);
5604 if (result.net_error() != net::ERR_IO_PENDING) {
5605 handle_open_result.Run(std::move(result));
5606 }
5607
5608 if (!keys.empty()) {
5609 base::SequencedTaskRunner::GetCurrentDefault()->PostTask(FROM_HERE,
5610 open_one_entry);
5611 }
5612 });
5613 base::SequencedTaskRunner::GetCurrentDefault()->PostTask(FROM_HERE,
5614 open_one_entry);
5615
5616 run_loop2.Run();
5617
5618 // Should not have eaten any entries.
5619 EXPECT_EQ(kEntries, cache_->GetEntryCount());
5620 }
5621
5622 // Make sure that if we close an entry in callback from open/create we do not
5623 // trigger dangling pointer warnings.
TEST_F(DiskCacheBackendTest,BlockFileImmediateCloseNoDangle)5624 TEST_F(DiskCacheBackendTest, BlockFileImmediateCloseNoDangle) {
5625 InitCache();
5626 base::RunLoop run_loop;
5627 EntryResult result =
5628 cache_->CreateEntry("some key", net::HIGHEST,
5629 base::BindLambdaForTesting([&](EntryResult result) {
5630 ASSERT_EQ(result.net_error(), net::OK);
5631 result.ReleaseEntry()->Close();
5632 // Make sure the close actually happens now.
5633 disk_cache::BackendImpl::FlushForTesting();
5634 run_loop.Quit();
5635 }));
5636 EXPECT_EQ(result.net_error(), net::ERR_IO_PENDING);
5637 run_loop.Run();
5638 }
5639