• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2012 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include <stdint.h>
6 
7 #include <memory>
8 
9 #include "base/files/file.h"
10 #include "base/files/file_util.h"
11 #include "base/functional/bind.h"
12 #include "base/functional/callback.h"
13 #include "base/functional/callback_helpers.h"
14 #include "base/memory/memory_pressure_listener.h"
15 #include "base/memory/raw_ptr.h"
16 #include "base/metrics/field_trial.h"
17 #include "base/ranges/algorithm.h"
18 #include "base/run_loop.h"
19 #include "base/strings/string_number_conversions.h"
20 #include "base/strings/string_split.h"
21 #include "base/strings/string_util.h"
22 #include "base/strings/stringprintf.h"
23 #include "base/task/sequenced_task_runner.h"
24 #include "base/task/single_thread_task_runner.h"
25 #include "base/task/thread_pool.h"
26 #include "base/test/metrics/histogram_tester.h"
27 #include "base/test/scoped_feature_list.h"
28 #include "base/test/simple_test_clock.h"
29 #include "base/third_party/dynamic_annotations/dynamic_annotations.h"
30 #include "base/threading/platform_thread.h"
31 #include "base/threading/thread_restrictions.h"
32 #include "base/time/time.h"
33 #include "base/trace_event/memory_allocator_dump.h"
34 #include "base/trace_event/process_memory_dump.h"
35 #include "build/build_config.h"
36 #include "net/base/cache_type.h"
37 #include "net/base/completion_once_callback.h"
38 #include "net/base/io_buffer.h"
39 #include "net/base/net_errors.h"
40 #include "net/base/request_priority.h"
41 #include "net/base/test_completion_callback.h"
42 #include "net/base/tracing.h"
43 #include "net/disk_cache/backend_cleanup_tracker.h"
44 #include "net/disk_cache/blockfile/backend_impl.h"
45 #include "net/disk_cache/blockfile/entry_impl.h"
46 #include "net/disk_cache/blockfile/experiments.h"
47 #include "net/disk_cache/blockfile/histogram_macros.h"
48 #include "net/disk_cache/blockfile/mapped_file.h"
49 #include "net/disk_cache/cache_util.h"
50 #include "net/disk_cache/disk_cache_test_base.h"
51 #include "net/disk_cache/disk_cache_test_util.h"
52 #include "net/disk_cache/memory/mem_backend_impl.h"
53 #include "net/disk_cache/simple/simple_backend_impl.h"
54 #include "net/disk_cache/simple/simple_entry_format.h"
55 #include "net/disk_cache/simple/simple_histogram_enums.h"
56 #include "net/disk_cache/simple/simple_index.h"
57 #include "net/disk_cache/simple/simple_synchronous_entry.h"
58 #include "net/disk_cache/simple/simple_test_util.h"
59 #include "net/disk_cache/simple/simple_util.h"
60 #include "net/test/gtest_util.h"
61 #include "testing/gmock/include/gmock/gmock.h"
62 #include "testing/gtest/include/gtest/gtest.h"
63 #include "third_party/abseil-cpp/absl/types/optional.h"
64 
65 using disk_cache::EntryResult;
66 using net::test::IsError;
67 using net::test::IsOk;
68 using testing::ByRef;
69 using testing::Contains;
70 using testing::Eq;
71 using testing::Field;
72 
73 #if BUILDFLAG(IS_WIN)
74 #include "base/win/scoped_handle.h"
75 
76 #include <windows.h>
77 #endif
78 
79 // Provide a BackendImpl object to macros from histogram_macros.h.
80 #define CACHE_UMA_BACKEND_IMPL_OBJ backend_
81 
82 // TODO(crbug.com/949811): Fix memory leaks in tests and re-enable on LSAN.
83 #ifdef LEAK_SANITIZER
84 #define MAYBE_BlockFileOpenOrCreateEntry DISABLED_BlockFileOpenOrCreateEntry
85 #define MAYBE_NonEmptyCorruptSimpleCacheDoesNotRecover \
86   DISABLED_NonEmptyCorruptSimpleCacheDoesNotRecover
87 #define MAYBE_SimpleOpenOrCreateEntry DISABLED_SimpleOpenOrCreateEntry
88 #else
89 #define MAYBE_BlockFileOpenOrCreateEntry BlockFileOpenOrCreateEntry
90 #define MAYBE_NonEmptyCorruptSimpleCacheDoesNotRecover \
91   NonEmptyCorruptSimpleCacheDoesNotRecover
92 #define MAYBE_SimpleOpenOrCreateEntry SimpleOpenOrCreateEntry
93 #endif
94 
95 using base::Time;
96 
97 namespace {
98 
99 const char kExistingEntryKey[] = "existing entry key";
100 
CreateExistingEntryCache(const base::FilePath & cache_path)101 std::unique_ptr<disk_cache::BackendImpl> CreateExistingEntryCache(
102     const base::FilePath& cache_path) {
103   net::TestCompletionCallback cb;
104 
105   std::unique_ptr<disk_cache::BackendImpl> cache(
106       std::make_unique<disk_cache::BackendImpl>(cache_path,
107                                                 /* cleanup_tracker = */ nullptr,
108                                                 /* cache_thread = */ nullptr,
109                                                 net::DISK_CACHE,
110                                                 /* net_log = */ nullptr));
111   cache->Init(cb.callback());
112   if (cb.WaitForResult() != net::OK)
113     return nullptr;
114 
115   TestEntryResultCompletionCallback cb2;
116   EntryResult result =
117       cache->CreateEntry(kExistingEntryKey, net::HIGHEST, cb2.callback());
118   result = cb2.GetResult(std::move(result));
119   if (result.net_error() != net::OK)
120     return nullptr;
121 
122   return cache;
123 }
124 
125 #if BUILDFLAG(IS_FUCHSIA)
126 // Load tests with large numbers of file descriptors perform poorly on
127 // virtualized test execution environments.
128 // TODO(807882): Remove this workaround when virtualized test performance
129 // improves.
130 const int kLargeNumEntries = 100;
131 #else
132 const int kLargeNumEntries = 512;
133 #endif
134 
135 }  // namespace
136 
137 // Tests that can run with different types of caches.
138 class DiskCacheBackendTest : public DiskCacheTestWithCache {
139  protected:
140   // Some utility methods:
141 
142   // Perform IO operations on the cache until there is pending IO.
143   int GeneratePendingIO(net::TestCompletionCallback* cb);
144 
145   // Adds 5 sparse entries. |doomed_start| and |doomed_end| if not NULL,
146   // will be filled with times, used by DoomEntriesSince and DoomEntriesBetween.
147   // There are 4 entries after doomed_start and 2 after doomed_end.
148   void InitSparseCache(base::Time* doomed_start, base::Time* doomed_end);
149 
150   bool CreateSetOfRandomEntries(std::set<std::string>* key_pool);
151   bool EnumerateAndMatchKeys(int max_to_open,
152                              TestIterator* iter,
153                              std::set<std::string>* keys_to_match,
154                              size_t* count);
155 
156   // Computes the expected size of entry metadata, i.e. the total size without
157   // the actual data stored. This depends only on the entry's |key| size.
158   int GetEntryMetadataSize(std::string key);
159 
160   // The Simple Backend only tracks the approximate sizes of entries. This
161   // rounds the exact size appropriately.
162   int GetRoundedSize(int exact_size);
163 
164   // Create a default key with the name provided, populate it with
165   // CacheTestFillBuffer, and ensure this was done correctly.
166   void CreateKeyAndCheck(disk_cache::Backend* cache, std::string key);
167 
168   // For the simple cache, wait until indexing has occurred and make sure
169   // completes successfully.
170   void WaitForSimpleCacheIndexAndCheck(disk_cache::Backend* cache);
171 
172   // Run all of the task runners untile idle, covers cache worker pools.
173   void RunUntilIdle();
174 
175   // Actual tests:
176   void BackendBasics();
177   void BackendKeying();
178   void BackendShutdownWithPendingFileIO(bool fast);
179   void BackendShutdownWithPendingIO(bool fast);
180   void BackendShutdownWithPendingCreate(bool fast);
181   void BackendShutdownWithPendingDoom();
182   void BackendSetSize();
183   void BackendLoad();
184   void BackendChain();
185   void BackendValidEntry();
186   void BackendInvalidEntry();
187   void BackendInvalidEntryRead();
188   void BackendInvalidEntryWithLoad();
189   void BackendTrimInvalidEntry();
190   void BackendTrimInvalidEntry2();
191   void BackendEnumerations();
192   void BackendEnumerations2();
193   void BackendDoomMidEnumeration();
194   void BackendInvalidEntryEnumeration();
195   void BackendFixEnumerators();
196   void BackendDoomRecent();
197   void BackendDoomBetween();
198   void BackendCalculateSizeOfAllEntries();
199   void BackendCalculateSizeOfEntriesBetween(
200       bool expect_access_time_range_comparisons);
201   void BackendTransaction(const std::string& name, int num_entries, bool load);
202   void BackendRecoverInsert();
203   void BackendRecoverRemove();
204   void BackendRecoverWithEviction();
205   void BackendInvalidEntry2();
206   void BackendInvalidEntry3();
207   void BackendInvalidEntry7();
208   void BackendInvalidEntry8();
209   void BackendInvalidEntry9(bool eviction);
210   void BackendInvalidEntry10(bool eviction);
211   void BackendInvalidEntry11(bool eviction);
212   void BackendTrimInvalidEntry12();
213   void BackendDoomAll();
214   void BackendDoomAll2();
215   void BackendInvalidRankings();
216   void BackendInvalidRankings2();
217   void BackendDisable();
218   void BackendDisable2();
219   void BackendDisable3();
220   void BackendDisable4();
221   void BackendDisabledAPI();
222   void BackendEviction();
223   void BackendOpenOrCreateEntry();
224   void BackendDeadOpenNextEntry();
225   void BackendIteratorConcurrentDoom();
226   void BackendValidateMigrated();
227 };
228 
CreateKeyAndCheck(disk_cache::Backend * cache,std::string key)229 void DiskCacheBackendTest::CreateKeyAndCheck(disk_cache::Backend* cache,
230                                              std::string key) {
231   const int kBufSize = 4 * 1024;
232   scoped_refptr<net::IOBuffer> buffer =
233       base::MakeRefCounted<net::IOBuffer>(kBufSize);
234   CacheTestFillBuffer(buffer->data(), kBufSize, true);
235   TestEntryResultCompletionCallback cb_entry;
236   disk_cache::EntryResult result =
237       cache->CreateEntry(key, net::HIGHEST, cb_entry.callback());
238   result = cb_entry.GetResult(std::move(result));
239   ASSERT_EQ(net::OK, result.net_error());
240   disk_cache::Entry* entry = result.ReleaseEntry();
241   EXPECT_EQ(kBufSize, WriteData(entry, 0, 0, buffer.get(), kBufSize, false));
242   entry->Close();
243   RunUntilIdle();
244 }
245 
WaitForSimpleCacheIndexAndCheck(disk_cache::Backend * cache)246 void DiskCacheBackendTest::WaitForSimpleCacheIndexAndCheck(
247     disk_cache::Backend* cache) {
248   net::TestCompletionCallback wait_for_index_cb;
249   static_cast<disk_cache::SimpleBackendImpl*>(cache)->index()->ExecuteWhenReady(
250       wait_for_index_cb.callback());
251   int rv = wait_for_index_cb.WaitForResult();
252   ASSERT_THAT(rv, IsOk());
253   RunUntilIdle();
254 }
255 
RunUntilIdle()256 void DiskCacheBackendTest::RunUntilIdle() {
257   DiskCacheTestWithCache::RunUntilIdle();
258   base::RunLoop().RunUntilIdle();
259   disk_cache::FlushCacheThreadForTesting();
260 }
261 
GeneratePendingIO(net::TestCompletionCallback * cb)262 int DiskCacheBackendTest::GeneratePendingIO(net::TestCompletionCallback* cb) {
263   if (!use_current_thread_ && !simple_cache_mode_) {
264     ADD_FAILURE();
265     return net::ERR_FAILED;
266   }
267 
268   TestEntryResultCompletionCallback create_cb;
269   EntryResult entry_result;
270   entry_result =
271       cache_->CreateEntry("some key", net::HIGHEST, create_cb.callback());
272   entry_result = create_cb.GetResult(std::move(entry_result));
273   if (entry_result.net_error() != net::OK)
274     return net::ERR_CACHE_CREATE_FAILURE;
275   disk_cache::Entry* entry = entry_result.ReleaseEntry();
276 
277   const int kSize = 25000;
278   scoped_refptr<net::IOBuffer> buffer =
279       base::MakeRefCounted<net::IOBuffer>(kSize);
280   CacheTestFillBuffer(buffer->data(), kSize, false);
281 
282   int rv = net::OK;
283   for (int i = 0; i < 10 * 1024 * 1024; i += 64 * 1024) {
284     // We are using the current thread as the cache thread because we want to
285     // be able to call directly this method to make sure that the OS (instead
286     // of us switching thread) is returning IO pending.
287     if (!simple_cache_mode_) {
288       rv = static_cast<disk_cache::EntryImpl*>(entry)->WriteDataImpl(
289           0, i, buffer.get(), kSize, cb->callback(), false);
290     } else {
291       rv = entry->WriteData(0, i, buffer.get(), kSize, cb->callback(), false);
292     }
293 
294     if (rv == net::ERR_IO_PENDING)
295       break;
296     if (rv != kSize)
297       rv = net::ERR_FAILED;
298   }
299 
300   // Don't call Close() to avoid going through the queue or we'll deadlock
301   // waiting for the operation to finish.
302   if (!simple_cache_mode_)
303     static_cast<disk_cache::EntryImpl*>(entry)->Release();
304   else
305     entry->Close();
306 
307   return rv;
308 }
309 
InitSparseCache(base::Time * doomed_start,base::Time * doomed_end)310 void DiskCacheBackendTest::InitSparseCache(base::Time* doomed_start,
311                                            base::Time* doomed_end) {
312   InitCache();
313 
314   const int kSize = 50;
315   // This must be greater than MemEntryImpl::kMaxSparseEntrySize.
316   const int kOffset = 10 + 1024 * 1024;
317 
318   disk_cache::Entry* entry0 = nullptr;
319   disk_cache::Entry* entry1 = nullptr;
320   disk_cache::Entry* entry2 = nullptr;
321 
322   scoped_refptr<net::IOBuffer> buffer =
323       base::MakeRefCounted<net::IOBuffer>(kSize);
324   CacheTestFillBuffer(buffer->data(), kSize, false);
325 
326   ASSERT_THAT(CreateEntry("zeroth", &entry0), IsOk());
327   ASSERT_EQ(kSize, WriteSparseData(entry0, 0, buffer.get(), kSize));
328   ASSERT_EQ(kSize,
329             WriteSparseData(entry0, kOffset + kSize, buffer.get(), kSize));
330   entry0->Close();
331 
332   FlushQueueForTest();
333   AddDelay();
334   if (doomed_start)
335     *doomed_start = base::Time::Now();
336 
337   // Order in rankings list:
338   // first_part1, first_part2, second_part1, second_part2
339   ASSERT_THAT(CreateEntry("first", &entry1), IsOk());
340   ASSERT_EQ(kSize, WriteSparseData(entry1, 0, buffer.get(), kSize));
341   ASSERT_EQ(kSize,
342             WriteSparseData(entry1, kOffset + kSize, buffer.get(), kSize));
343   entry1->Close();
344 
345   ASSERT_THAT(CreateEntry("second", &entry2), IsOk());
346   ASSERT_EQ(kSize, WriteSparseData(entry2, 0, buffer.get(), kSize));
347   ASSERT_EQ(kSize,
348             WriteSparseData(entry2, kOffset + kSize, buffer.get(), kSize));
349   entry2->Close();
350 
351   FlushQueueForTest();
352   AddDelay();
353   if (doomed_end)
354     *doomed_end = base::Time::Now();
355 
356   // Order in rankings list:
357   // third_part1, fourth_part1, third_part2, fourth_part2
358   disk_cache::Entry* entry3 = nullptr;
359   disk_cache::Entry* entry4 = nullptr;
360   ASSERT_THAT(CreateEntry("third", &entry3), IsOk());
361   ASSERT_EQ(kSize, WriteSparseData(entry3, 0, buffer.get(), kSize));
362   ASSERT_THAT(CreateEntry("fourth", &entry4), IsOk());
363   ASSERT_EQ(kSize, WriteSparseData(entry4, 0, buffer.get(), kSize));
364   ASSERT_EQ(kSize,
365             WriteSparseData(entry3, kOffset + kSize, buffer.get(), kSize));
366   ASSERT_EQ(kSize,
367             WriteSparseData(entry4, kOffset + kSize, buffer.get(), kSize));
368   entry3->Close();
369   entry4->Close();
370 
371   FlushQueueForTest();
372   AddDelay();
373 }
374 
375 // Creates entries based on random keys. Stores these keys in |key_pool|.
CreateSetOfRandomEntries(std::set<std::string> * key_pool)376 bool DiskCacheBackendTest::CreateSetOfRandomEntries(
377     std::set<std::string>* key_pool) {
378   const int kNumEntries = 10;
379   const int initial_entry_count = cache_->GetEntryCount();
380 
381   for (int i = 0; i < kNumEntries; ++i) {
382     std::string key = GenerateKey(true);
383     disk_cache::Entry* entry;
384     if (CreateEntry(key, &entry) != net::OK) {
385       return false;
386     }
387     key_pool->insert(key);
388     entry->Close();
389   }
390   return key_pool->size() ==
391          static_cast<size_t>(cache_->GetEntryCount() - initial_entry_count);
392 }
393 
394 // Performs iteration over the backend and checks that the keys of entries
395 // opened are in |keys_to_match|, then erases them. Up to |max_to_open| entries
396 // will be opened, if it is positive. Otherwise, iteration will continue until
397 // OpenNextEntry stops returning net::OK.
EnumerateAndMatchKeys(int max_to_open,TestIterator * iter,std::set<std::string> * keys_to_match,size_t * count)398 bool DiskCacheBackendTest::EnumerateAndMatchKeys(
399     int max_to_open,
400     TestIterator* iter,
401     std::set<std::string>* keys_to_match,
402     size_t* count) {
403   disk_cache::Entry* entry;
404 
405   if (!iter)
406     return false;
407   while (iter->OpenNextEntry(&entry) == net::OK) {
408     if (!entry)
409       return false;
410     EXPECT_EQ(1U, keys_to_match->erase(entry->GetKey()));
411     entry->Close();
412     ++(*count);
413     if (max_to_open >= 0 && static_cast<int>(*count) >= max_to_open)
414       break;
415   };
416 
417   return true;
418 }
419 
GetEntryMetadataSize(std::string key)420 int DiskCacheBackendTest::GetEntryMetadataSize(std::string key) {
421   // For blockfile and memory backends, it is just the key size.
422   if (!simple_cache_mode_)
423     return key.size();
424 
425   // For the simple cache, we must add the file header and EOF, and that for
426   // every stream.
427   return disk_cache::kSimpleEntryStreamCount *
428          (sizeof(disk_cache::SimpleFileHeader) +
429           sizeof(disk_cache::SimpleFileEOF) + key.size());
430 }
431 
GetRoundedSize(int exact_size)432 int DiskCacheBackendTest::GetRoundedSize(int exact_size) {
433   if (!simple_cache_mode_)
434     return exact_size;
435 
436   return (exact_size + 255) & 0xFFFFFF00;
437 }
438 
BackendBasics()439 void DiskCacheBackendTest::BackendBasics() {
440   InitCache();
441   disk_cache::Entry *entry1 = nullptr, *entry2 = nullptr;
442   EXPECT_NE(net::OK, OpenEntry("the first key", &entry1));
443   ASSERT_THAT(CreateEntry("the first key", &entry1), IsOk());
444   ASSERT_TRUE(nullptr != entry1);
445   entry1->Close();
446   entry1 = nullptr;
447 
448   ASSERT_THAT(OpenEntry("the first key", &entry1), IsOk());
449   ASSERT_TRUE(nullptr != entry1);
450   entry1->Close();
451   entry1 = nullptr;
452 
453   EXPECT_NE(net::OK, CreateEntry("the first key", &entry1));
454   ASSERT_THAT(OpenEntry("the first key", &entry1), IsOk());
455   EXPECT_NE(net::OK, OpenEntry("some other key", &entry2));
456   ASSERT_THAT(CreateEntry("some other key", &entry2), IsOk());
457   ASSERT_TRUE(nullptr != entry1);
458   ASSERT_TRUE(nullptr != entry2);
459   EXPECT_EQ(2, cache_->GetEntryCount());
460 
461   disk_cache::Entry* entry3 = nullptr;
462   ASSERT_THAT(OpenEntry("some other key", &entry3), IsOk());
463   ASSERT_TRUE(nullptr != entry3);
464   EXPECT_TRUE(entry2 == entry3);
465 
466   EXPECT_THAT(DoomEntry("some other key"), IsOk());
467   EXPECT_EQ(1, cache_->GetEntryCount());
468   entry1->Close();
469   entry2->Close();
470   entry3->Close();
471 
472   EXPECT_THAT(DoomEntry("the first key"), IsOk());
473   EXPECT_EQ(0, cache_->GetEntryCount());
474 
475   ASSERT_THAT(CreateEntry("the first key", &entry1), IsOk());
476   ASSERT_THAT(CreateEntry("some other key", &entry2), IsOk());
477   entry1->Doom();
478   entry1->Close();
479   EXPECT_THAT(DoomEntry("some other key"), IsOk());
480   EXPECT_EQ(0, cache_->GetEntryCount());
481   entry2->Close();
482 }
483 
TEST_F(DiskCacheBackendTest,Basics)484 TEST_F(DiskCacheBackendTest, Basics) {
485   BackendBasics();
486 }
487 
TEST_F(DiskCacheBackendTest,NewEvictionBasics)488 TEST_F(DiskCacheBackendTest, NewEvictionBasics) {
489   SetNewEviction();
490   BackendBasics();
491 }
492 
TEST_F(DiskCacheBackendTest,MemoryOnlyBasics)493 TEST_F(DiskCacheBackendTest, MemoryOnlyBasics) {
494   SetMemoryOnlyMode();
495   BackendBasics();
496 }
497 
TEST_F(DiskCacheBackendTest,AppCacheBasics)498 TEST_F(DiskCacheBackendTest, AppCacheBasics) {
499   SetCacheType(net::APP_CACHE);
500   BackendBasics();
501 }
502 
TEST_F(DiskCacheBackendTest,ShaderCacheBasics)503 TEST_F(DiskCacheBackendTest, ShaderCacheBasics) {
504   SetCacheType(net::SHADER_CACHE);
505   BackendBasics();
506 }
507 
BackendKeying()508 void DiskCacheBackendTest::BackendKeying() {
509   InitCache();
510   const char kName1[] = "the first key";
511   const char kName2[] = "the first Key";
512   disk_cache::Entry *entry1, *entry2;
513   ASSERT_THAT(CreateEntry(kName1, &entry1), IsOk());
514 
515   ASSERT_THAT(CreateEntry(kName2, &entry2), IsOk());
516   EXPECT_TRUE(entry1 != entry2) << "Case sensitive";
517   entry2->Close();
518 
519   char buffer[30];
520   base::strlcpy(buffer, kName1, std::size(buffer));
521   ASSERT_THAT(OpenEntry(buffer, &entry2), IsOk());
522   EXPECT_TRUE(entry1 == entry2);
523   entry2->Close();
524 
525   base::strlcpy(buffer + 1, kName1, std::size(buffer) - 1);
526   ASSERT_THAT(OpenEntry(buffer + 1, &entry2), IsOk());
527   EXPECT_TRUE(entry1 == entry2);
528   entry2->Close();
529 
530   base::strlcpy(buffer + 3, kName1, std::size(buffer) - 3);
531   ASSERT_THAT(OpenEntry(buffer + 3, &entry2), IsOk());
532   EXPECT_TRUE(entry1 == entry2);
533   entry2->Close();
534 
535   // Now verify long keys.
536   char buffer2[20000];
537   memset(buffer2, 's', sizeof(buffer2));
538   buffer2[1023] = '\0';
539   ASSERT_EQ(net::OK, CreateEntry(buffer2, &entry2)) << "key on block file";
540   entry2->Close();
541 
542   buffer2[1023] = 'g';
543   buffer2[19999] = '\0';
544   ASSERT_EQ(net::OK, CreateEntry(buffer2, &entry2)) << "key on external file";
545   entry2->Close();
546   entry1->Close();
547 
548   // Create entries with null terminator(s), and check equality. Note we create
549   // the strings via the ctor instead of using literals because literals are
550   // implicitly C strings which will stop at the first null terminator.
551   std::string key1(4, '\0');
552   key1[1] = 's';
553   std::string key2(3, '\0');
554   key2[1] = 's';
555   ASSERT_THAT(CreateEntry(key1, &entry1), IsOk());
556   ASSERT_THAT(CreateEntry(key2, &entry2), IsOk());
557   EXPECT_TRUE(entry1 != entry2) << "Different lengths";
558   EXPECT_EQ(entry1->GetKey(), key1);
559   EXPECT_EQ(entry2->GetKey(), key2);
560   entry1->Close();
561   entry2->Close();
562 }
563 
TEST_F(DiskCacheBackendTest,Keying)564 TEST_F(DiskCacheBackendTest, Keying) {
565   BackendKeying();
566 }
567 
TEST_F(DiskCacheBackendTest,NewEvictionKeying)568 TEST_F(DiskCacheBackendTest, NewEvictionKeying) {
569   SetNewEviction();
570   BackendKeying();
571 }
572 
TEST_F(DiskCacheBackendTest,MemoryOnlyKeying)573 TEST_F(DiskCacheBackendTest, MemoryOnlyKeying) {
574   SetMemoryOnlyMode();
575   BackendKeying();
576 }
577 
TEST_F(DiskCacheBackendTest,AppCacheKeying)578 TEST_F(DiskCacheBackendTest, AppCacheKeying) {
579   SetCacheType(net::APP_CACHE);
580   BackendKeying();
581 }
582 
TEST_F(DiskCacheBackendTest,ShaderCacheKeying)583 TEST_F(DiskCacheBackendTest, ShaderCacheKeying) {
584   SetCacheType(net::SHADER_CACHE);
585   BackendKeying();
586 }
587 
TEST_F(DiskCacheTest,CreateBackend)588 TEST_F(DiskCacheTest, CreateBackend) {
589   TestBackendResultCompletionCallback cb;
590 
591   {
592     ASSERT_TRUE(CleanupCacheDir());
593 
594     // Test the private factory method(s).
595     std::unique_ptr<disk_cache::Backend> cache;
596     cache = disk_cache::MemBackendImpl::CreateBackend(0, nullptr);
597     ASSERT_TRUE(cache.get());
598     cache.reset();
599 
600     // Now test the public API.
601 
602     disk_cache::BackendResult rv = disk_cache::CreateCacheBackend(
603         net::DISK_CACHE, net::CACHE_BACKEND_DEFAULT,
604         /*file_operations=*/nullptr, cache_path_, 0,
605         disk_cache::ResetHandling::kNeverReset, nullptr, cb.callback());
606     rv = cb.GetResult(std::move(rv));
607     ASSERT_THAT(rv.net_error, IsOk());
608     ASSERT_TRUE(rv.backend);
609     rv.backend.reset();
610 
611     rv = disk_cache::CreateCacheBackend(
612         net::MEMORY_CACHE, net::CACHE_BACKEND_DEFAULT,
613         /*file_operations=*/nullptr, base::FilePath(), 0,
614         disk_cache::ResetHandling::kNeverReset, nullptr, cb.callback());
615     rv = cb.GetResult(std::move(rv));
616     ASSERT_THAT(rv.net_error, IsOk());
617     ASSERT_TRUE(rv.backend);
618     rv.backend.reset();
619   }
620 
621   base::RunLoop().RunUntilIdle();
622 }
623 
TEST_F(DiskCacheTest,MemBackendPostCleanupCallback)624 TEST_F(DiskCacheTest, MemBackendPostCleanupCallback) {
625   TestBackendResultCompletionCallback cb;
626 
627   net::TestClosure on_cleanup;
628 
629   disk_cache::BackendResult rv = disk_cache::CreateCacheBackend(
630       net::MEMORY_CACHE, net::CACHE_BACKEND_DEFAULT,
631       /*file_operations=*/nullptr, base::FilePath(), 0,
632       disk_cache::ResetHandling::kNeverReset, nullptr, on_cleanup.closure(),
633       cb.callback());
634   rv = cb.GetResult(std::move(rv));
635   ASSERT_THAT(rv.net_error, IsOk());
636   ASSERT_TRUE(rv.backend);
637   // The callback should be posted after backend is destroyed.
638   base::RunLoop().RunUntilIdle();
639   EXPECT_FALSE(on_cleanup.have_result());
640 
641   rv.backend.reset();
642 
643   EXPECT_FALSE(on_cleanup.have_result());
644   base::RunLoop().RunUntilIdle();
645   EXPECT_TRUE(on_cleanup.have_result());
646 }
647 
TEST_F(DiskCacheTest,CreateBackendDouble)648 TEST_F(DiskCacheTest, CreateBackendDouble) {
649   // Make sure that creation for the second backend for same path happens
650   // after the first one completes.
651   TestBackendResultCompletionCallback cb, cb2;
652 
653   disk_cache::BackendResult rv = disk_cache::CreateCacheBackend(
654       net::APP_CACHE, net::CACHE_BACKEND_DEFAULT, /*file_operations=*/nullptr,
655       cache_path_, 0, disk_cache::ResetHandling::kNeverReset,
656       /*net_log=*/nullptr, cb.callback());
657 
658   disk_cache::BackendResult rv2 = disk_cache::CreateCacheBackend(
659       net::APP_CACHE, net::CACHE_BACKEND_DEFAULT, /*file_operations=*/nullptr,
660       cache_path_, 0, disk_cache::ResetHandling::kNeverReset,
661       /*net_log=*/nullptr, cb2.callback());
662 
663   rv = cb.GetResult(std::move(rv));
664   EXPECT_THAT(rv.net_error, IsOk());
665   EXPECT_TRUE(rv.backend);
666   disk_cache::FlushCacheThreadForTesting();
667 
668   // No rv2.backend yet.
669   EXPECT_EQ(net::ERR_IO_PENDING, rv2.net_error);
670   EXPECT_FALSE(rv2.backend);
671   EXPECT_FALSE(cb2.have_result());
672 
673   rv.backend.reset();
674 
675   // Now rv2.backend should exist.
676   rv2 = cb2.GetResult(std::move(rv2));
677   EXPECT_THAT(rv2.net_error, IsOk());
678   EXPECT_TRUE(rv2.backend);
679 }
680 
TEST_F(DiskCacheBackendTest,CreateBackendDoubleOpenEntry)681 TEST_F(DiskCacheBackendTest, CreateBackendDoubleOpenEntry) {
682   // Demonstrate the creation sequencing with an open entry. This is done
683   // with SimpleCache since the block-file cache cancels most of I/O on
684   // destruction and blocks for what it can't cancel.
685 
686   // Don't try to sanity-check things as a blockfile cache
687   SetSimpleCacheMode();
688 
689   // Make sure that creation for the second backend for same path happens
690   // after the first one completes, and all of its ops complete.
691   TestBackendResultCompletionCallback cb, cb2;
692 
693   disk_cache::BackendResult rv = disk_cache::CreateCacheBackend(
694       net::APP_CACHE, net::CACHE_BACKEND_SIMPLE, /*file_operations=*/nullptr,
695       cache_path_, 0, disk_cache::ResetHandling::kNeverReset,
696       /*net_log=*/nullptr, cb.callback());
697 
698   disk_cache::BackendResult rv2 = disk_cache::CreateCacheBackend(
699       net::APP_CACHE, net::CACHE_BACKEND_SIMPLE, /*file_operations=*/nullptr,
700       cache_path_, 0, disk_cache::ResetHandling::kNeverReset,
701       /*net_log=*/nullptr, cb2.callback());
702 
703   rv = cb.GetResult(std::move(rv));
704   EXPECT_THAT(rv.net_error, IsOk());
705   ASSERT_TRUE(rv.backend);
706   disk_cache::FlushCacheThreadForTesting();
707 
708   // No cache 2 yet.
709   EXPECT_EQ(net::ERR_IO_PENDING, rv2.net_error);
710   EXPECT_FALSE(rv2.backend);
711   EXPECT_FALSE(cb2.have_result());
712 
713   TestEntryResultCompletionCallback cb3;
714   EntryResult entry_result =
715       rv.backend->CreateEntry("key", net::HIGHEST, cb3.callback());
716   entry_result = cb3.GetResult(std::move(entry_result));
717   ASSERT_EQ(net::OK, entry_result.net_error());
718 
719   rv.backend.reset();
720 
721   // Still doesn't exist.
722   EXPECT_FALSE(cb2.have_result());
723 
724   entry_result.ReleaseEntry()->Close();
725 
726   // Now should exist.
727   rv2 = cb2.GetResult(std::move(rv2));
728   EXPECT_THAT(rv2.net_error, IsOk());
729   EXPECT_TRUE(rv2.backend);
730 }
731 
TEST_F(DiskCacheBackendTest,CreateBackendPostCleanup)732 TEST_F(DiskCacheBackendTest, CreateBackendPostCleanup) {
733   // Test for the explicit PostCleanupCallback parameter to CreateCacheBackend.
734 
735   // Extravagant size payload to make reproducing races easier.
736   const int kBufSize = 256 * 1024;
737   scoped_refptr<net::IOBuffer> buffer =
738       base::MakeRefCounted<net::IOBuffer>(kBufSize);
739   CacheTestFillBuffer(buffer->data(), kBufSize, true);
740 
741   SetSimpleCacheMode();
742   CleanupCacheDir();
743 
744   base::RunLoop run_loop;
745   TestBackendResultCompletionCallback cb;
746 
747   disk_cache::BackendResult rv = disk_cache::CreateCacheBackend(
748       net::APP_CACHE, net::CACHE_BACKEND_SIMPLE, /*file_operations=*/nullptr,
749       cache_path_, 0, disk_cache::ResetHandling::kNeverReset,
750       /*net_log=*/nullptr, run_loop.QuitClosure(), cb.callback());
751   rv = cb.GetResult(std::move(rv));
752   EXPECT_THAT(rv.net_error, IsOk());
753   ASSERT_TRUE(rv.backend);
754 
755   TestEntryResultCompletionCallback cb2;
756   EntryResult result =
757       rv.backend->CreateEntry("key", net::HIGHEST, cb2.callback());
758   result = cb2.GetResult(std::move(result));
759   ASSERT_EQ(net::OK, result.net_error());
760   disk_cache::Entry* entry = result.ReleaseEntry();
761   EXPECT_EQ(kBufSize, WriteData(entry, 0, 0, buffer.get(), kBufSize, false));
762   entry->Close();
763 
764   rv.backend.reset();
765 
766   // Wait till the post-cleanup callback.
767   run_loop.Run();
768 
769   // All of the payload should be on disk, despite stream 0 being written
770   // back in the async Close()
771   base::FilePath entry_path = cache_path_.AppendASCII(
772       disk_cache::simple_util::GetFilenameFromKeyAndFileIndex("key", 0));
773   int64_t size = 0;
774   EXPECT_TRUE(base::GetFileSize(entry_path, &size));
775   EXPECT_GT(size, kBufSize);
776 }
777 
TEST_F(DiskCacheBackendTest,SimpleCreateBackendRecoveryAppCache)778 TEST_F(DiskCacheBackendTest, SimpleCreateBackendRecoveryAppCache) {
779   // Tests index recovery in APP_CACHE mode. (This is harder to test for
780   // DISK_CACHE since post-cleanup callbacks aren't permitted there).
781   const int kBufSize = 4 * 1024;
782   scoped_refptr<net::IOBuffer> buffer =
783       base::MakeRefCounted<net::IOBuffer>(kBufSize);
784   CacheTestFillBuffer(buffer->data(), kBufSize, true);
785 
786   SetSimpleCacheMode();
787   SetCacheType(net::APP_CACHE);
788   DisableFirstCleanup();
789   CleanupCacheDir();
790 
791   base::RunLoop run_loop;
792   TestBackendResultCompletionCallback cb;
793 
794   // Create a backend with post-cleanup callback specified, in order to know
795   // when the index has been written back (so it can be deleted race-free).
796   disk_cache::BackendResult rv = disk_cache::CreateCacheBackend(
797       net::APP_CACHE, net::CACHE_BACKEND_SIMPLE, /*file_operations=*/nullptr,
798       cache_path_, 0, disk_cache::ResetHandling::kNeverReset,
799       /*net_log=*/nullptr, run_loop.QuitClosure(), cb.callback());
800   rv = cb.GetResult(std::move(rv));
801   EXPECT_THAT(rv.net_error, IsOk());
802   ASSERT_TRUE(rv.backend);
803 
804   // Create an entry.
805   TestEntryResultCompletionCallback cb2;
806   disk_cache::EntryResult result =
807       rv.backend->CreateEntry("key", net::HIGHEST, cb2.callback());
808   result = cb2.GetResult(std::move(result));
809   ASSERT_EQ(net::OK, result.net_error());
810   disk_cache::Entry* entry = result.ReleaseEntry();
811   EXPECT_EQ(kBufSize, WriteData(entry, 0, 0, buffer.get(), kBufSize, false));
812   entry->Close();
813 
814   rv.backend.reset();
815 
816   // Wait till the post-cleanup callback.
817   run_loop.Run();
818 
819   // Delete the index.
820   base::DeleteFile(
821       cache_path_.AppendASCII("index-dir").AppendASCII("the-real-index"));
822 
823   // Open the cache again. The fixture will also waits for index init.
824   InitCache();
825 
826   // Entry should not have a trailer size, since can't tell what it should be
827   // when doing recovery (and definitely shouldn't interpret last use time as
828   // such).
829   EXPECT_EQ(0, simple_cache_impl_->index()->GetTrailerPrefetchSize(
830                    disk_cache::simple_util::GetEntryHashKey("key")));
831 }
832 
833 // Tests that |BackendImpl| fails to initialize with a missing file.
TEST_F(DiskCacheBackendTest,CreateBackend_MissingFile)834 TEST_F(DiskCacheBackendTest, CreateBackend_MissingFile) {
835   ASSERT_TRUE(CopyTestCache("bad_entry"));
836   base::FilePath filename = cache_path_.AppendASCII("data_1");
837   base::DeleteFile(filename);
838   net::TestCompletionCallback cb;
839 
840   // Blocking shouldn't be needed to create the cache.
841   absl::optional<base::ScopedDisallowBlocking> disallow_blocking(
842       absl::in_place);
843   std::unique_ptr<disk_cache::BackendImpl> cache(
844       std::make_unique<disk_cache::BackendImpl>(cache_path_, nullptr, nullptr,
845                                                 net::DISK_CACHE, nullptr));
846   cache->Init(cb.callback());
847   EXPECT_THAT(cb.WaitForResult(), IsError(net::ERR_FAILED));
848   disallow_blocking.reset();
849 
850   cache.reset();
851   DisableIntegrityCheck();
852 }
853 
TEST_F(DiskCacheBackendTest,MemoryListensToMemoryPressure)854 TEST_F(DiskCacheBackendTest, MemoryListensToMemoryPressure) {
855   const int kLimit = 16 * 1024;
856   const int kEntrySize = 256;
857   SetMaxSize(kLimit);
858   SetMemoryOnlyMode();
859   InitCache();
860 
861   // Fill in to about 80-90% full.
862   scoped_refptr<net::IOBuffer> buffer =
863       base::MakeRefCounted<net::IOBuffer>(kEntrySize);
864   CacheTestFillBuffer(buffer->data(), kEntrySize, false);
865 
866   for (int i = 0; i < 0.9 * (kLimit / kEntrySize); ++i) {
867     disk_cache::Entry* entry = nullptr;
868     ASSERT_EQ(net::OK, CreateEntry(base::NumberToString(i), &entry));
869     EXPECT_EQ(kEntrySize,
870               WriteData(entry, 0, 0, buffer.get(), kEntrySize, true));
871     entry->Close();
872   }
873 
874   EXPECT_GT(CalculateSizeOfAllEntries(), 0.8 * kLimit);
875 
876   // Signal low-memory of various sorts, and see how small it gets.
877   base::MemoryPressureListener::NotifyMemoryPressure(
878       base::MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE);
879   base::RunLoop().RunUntilIdle();
880   EXPECT_LT(CalculateSizeOfAllEntries(), 0.5 * kLimit);
881 
882   base::MemoryPressureListener::NotifyMemoryPressure(
883       base::MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL);
884   base::RunLoop().RunUntilIdle();
885   EXPECT_LT(CalculateSizeOfAllEntries(), 0.1 * kLimit);
886 }
887 
TEST_F(DiskCacheBackendTest,ExternalFiles)888 TEST_F(DiskCacheBackendTest, ExternalFiles) {
889   InitCache();
890   // First, let's create a file on the folder.
891   base::FilePath filename = cache_path_.AppendASCII("f_000001");
892 
893   const int kSize = 50;
894   scoped_refptr<net::IOBuffer> buffer1 =
895       base::MakeRefCounted<net::IOBuffer>(kSize);
896   CacheTestFillBuffer(buffer1->data(), kSize, false);
897   ASSERT_TRUE(base::WriteFile(
898       filename,
899       base::StringPiece(buffer1->data(), static_cast<size_t>(kSize))));
900 
901   // Now let's create a file with the cache.
902   disk_cache::Entry* entry;
903   ASSERT_THAT(CreateEntry("key", &entry), IsOk());
904   ASSERT_EQ(0, WriteData(entry, 0, 20000, buffer1.get(), 0, false));
905   entry->Close();
906 
907   // And verify that the first file is still there.
908   scoped_refptr<net::IOBuffer> buffer2(
909       base::MakeRefCounted<net::IOBuffer>(kSize));
910   ASSERT_EQ(kSize, base::ReadFile(filename, buffer2->data(), kSize));
911   EXPECT_EQ(0, memcmp(buffer1->data(), buffer2->data(), kSize));
912 }
913 
914 // Tests that we deal with file-level pending operations at destruction time.
BackendShutdownWithPendingFileIO(bool fast)915 void DiskCacheBackendTest::BackendShutdownWithPendingFileIO(bool fast) {
916   ASSERT_TRUE(CleanupCacheDir());
917   uint32_t flags = disk_cache::kNoBuffering;
918   if (!fast)
919     flags |= disk_cache::kNoRandom;
920 
921   if (!simple_cache_mode_)
922     UseCurrentThread();
923   CreateBackend(flags);
924 
925   net::TestCompletionCallback cb;
926   int rv = GeneratePendingIO(&cb);
927 
928   // The cache destructor will see one pending operation here.
929   ResetCaches();
930 
931   if (rv == net::ERR_IO_PENDING) {
932     if (fast || simple_cache_mode_)
933       EXPECT_FALSE(cb.have_result());
934     else
935       EXPECT_TRUE(cb.have_result());
936   }
937 
938   base::RunLoop().RunUntilIdle();
939 
940 #if !BUILDFLAG(IS_IOS)
941   // Wait for the actual operation to complete, or we'll keep a file handle that
942   // may cause issues later. Note that on iOS systems even though this test
943   // uses a single thread, the actual IO is posted to a worker thread and the
944   // cache destructor breaks the link to reach cb when the operation completes.
945   rv = cb.GetResult(rv);
946 #endif
947 }
948 
TEST_F(DiskCacheBackendTest,ShutdownWithPendingFileIO)949 TEST_F(DiskCacheBackendTest, ShutdownWithPendingFileIO) {
950   BackendShutdownWithPendingFileIO(false);
951 }
952 
953 // Here and below, tests that simulate crashes are not compiled in LeakSanitizer
954 // builds because they contain a lot of intentional memory leaks.
955 #if !defined(LEAK_SANITIZER)
956 // We'll be leaking from this test.
TEST_F(DiskCacheBackendTest,ShutdownWithPendingFileIO_Fast)957 TEST_F(DiskCacheBackendTest, ShutdownWithPendingFileIO_Fast) {
958   // The integrity test sets kNoRandom so there's a version mismatch if we don't
959   // force new eviction.
960   SetNewEviction();
961   BackendShutdownWithPendingFileIO(true);
962 }
963 #endif
964 
965 // See crbug.com/330074
966 #if !BUILDFLAG(IS_IOS)
967 // Tests that one cache instance is not affected by another one going away.
TEST_F(DiskCacheBackendTest,MultipleInstancesWithPendingFileIO)968 TEST_F(DiskCacheBackendTest, MultipleInstancesWithPendingFileIO) {
969   base::ScopedTempDir store;
970   ASSERT_TRUE(store.CreateUniqueTempDir());
971 
972   net::TestCompletionCallback cb;
973   TestBackendResultCompletionCallback create_cb;
974   disk_cache::BackendResult backend_rv = disk_cache::CreateCacheBackend(
975       net::DISK_CACHE, net::CACHE_BACKEND_DEFAULT, /*file_operations=*/nullptr,
976       store.GetPath(), 0, disk_cache::ResetHandling::kNeverReset,
977       /* net_log = */ nullptr, create_cb.callback());
978   backend_rv = create_cb.GetResult(std::move(backend_rv));
979   ASSERT_THAT(backend_rv.net_error, IsOk());
980   ASSERT_TRUE(backend_rv.backend);
981 
982   ASSERT_TRUE(CleanupCacheDir());
983   SetNewEviction();  // Match the expected behavior for integrity verification.
984   UseCurrentThread();
985 
986   CreateBackend(disk_cache::kNoBuffering);
987   int rv = GeneratePendingIO(&cb);
988 
989   // cache_ has a pending operation, and backend_rv.backend will go away.
990   backend_rv.backend.reset();
991 
992   if (rv == net::ERR_IO_PENDING)
993     EXPECT_FALSE(cb.have_result());
994 
995   disk_cache::FlushCacheThreadForTesting();
996   base::RunLoop().RunUntilIdle();
997 
998   // Wait for the actual operation to complete, or we'll keep a file handle that
999   // may cause issues later.
1000   rv = cb.GetResult(rv);
1001 }
1002 #endif
1003 
1004 // Tests that we deal with background-thread pending operations.
BackendShutdownWithPendingIO(bool fast)1005 void DiskCacheBackendTest::BackendShutdownWithPendingIO(bool fast) {
1006   TestEntryResultCompletionCallback cb;
1007 
1008   {
1009     ASSERT_TRUE(CleanupCacheDir());
1010 
1011     uint32_t flags = disk_cache::kNoBuffering;
1012     if (!fast)
1013       flags |= disk_cache::kNoRandom;
1014 
1015     CreateBackend(flags);
1016 
1017     EntryResult result =
1018         cache_->CreateEntry("some key", net::HIGHEST, cb.callback());
1019     result = cb.GetResult(std::move(result));
1020     ASSERT_THAT(result.net_error(), IsOk());
1021 
1022     result.ReleaseEntry()->Close();
1023 
1024     // The cache destructor will see one pending operation here.
1025     ResetCaches();
1026   }
1027 
1028   base::RunLoop().RunUntilIdle();
1029   EXPECT_FALSE(cb.have_result());
1030 }
1031 
TEST_F(DiskCacheBackendTest,ShutdownWithPendingIO)1032 TEST_F(DiskCacheBackendTest, ShutdownWithPendingIO) {
1033   BackendShutdownWithPendingIO(false);
1034 }
1035 
1036 #if !defined(LEAK_SANITIZER)
1037 // We'll be leaking from this test.
TEST_F(DiskCacheBackendTest,ShutdownWithPendingIO_Fast)1038 TEST_F(DiskCacheBackendTest, ShutdownWithPendingIO_Fast) {
1039   // The integrity test sets kNoRandom so there's a version mismatch if we don't
1040   // force new eviction.
1041   SetNewEviction();
1042   BackendShutdownWithPendingIO(true);
1043 }
1044 #endif
1045 
1046 // Tests that we deal with create-type pending operations.
BackendShutdownWithPendingCreate(bool fast)1047 void DiskCacheBackendTest::BackendShutdownWithPendingCreate(bool fast) {
1048   TestEntryResultCompletionCallback cb;
1049 
1050   {
1051     ASSERT_TRUE(CleanupCacheDir());
1052 
1053     disk_cache::BackendFlags flags =
1054         fast ? disk_cache::kNone : disk_cache::kNoRandom;
1055     CreateBackend(flags);
1056 
1057     EntryResult result =
1058         cache_->CreateEntry("some key", net::HIGHEST, cb.callback());
1059     ASSERT_THAT(result.net_error(), IsError(net::ERR_IO_PENDING));
1060 
1061     ResetCaches();
1062     EXPECT_FALSE(cb.have_result());
1063   }
1064 
1065   base::RunLoop().RunUntilIdle();
1066   EXPECT_FALSE(cb.have_result());
1067 }
1068 
TEST_F(DiskCacheBackendTest,ShutdownWithPendingCreate)1069 TEST_F(DiskCacheBackendTest, ShutdownWithPendingCreate) {
1070   BackendShutdownWithPendingCreate(false);
1071 }
1072 
1073 #if !defined(LEAK_SANITIZER)
1074 // We'll be leaking an entry from this test.
TEST_F(DiskCacheBackendTest,ShutdownWithPendingCreate_Fast)1075 TEST_F(DiskCacheBackendTest, ShutdownWithPendingCreate_Fast) {
1076   // The integrity test sets kNoRandom so there's a version mismatch if we don't
1077   // force new eviction.
1078   SetNewEviction();
1079   BackendShutdownWithPendingCreate(true);
1080 }
1081 #endif
1082 
BackendShutdownWithPendingDoom()1083 void DiskCacheBackendTest::BackendShutdownWithPendingDoom() {
1084   net::TestCompletionCallback cb;
1085   {
1086     ASSERT_TRUE(CleanupCacheDir());
1087 
1088     disk_cache::BackendFlags flags = disk_cache::kNoRandom;
1089     CreateBackend(flags);
1090 
1091     TestEntryResultCompletionCallback cb2;
1092     EntryResult result =
1093         cache_->CreateEntry("some key", net::HIGHEST, cb2.callback());
1094     result = cb2.GetResult(std::move(result));
1095     ASSERT_THAT(result.net_error(), IsOk());
1096     result.ReleaseEntry()->Close();
1097 
1098     int rv = cache_->DoomEntry("some key", net::HIGHEST, cb.callback());
1099     ASSERT_THAT(rv, IsError(net::ERR_IO_PENDING));
1100 
1101     ResetCaches();
1102     EXPECT_FALSE(cb.have_result());
1103   }
1104 
1105   base::RunLoop().RunUntilIdle();
1106   EXPECT_FALSE(cb.have_result());
1107 }
1108 
TEST_F(DiskCacheBackendTest,ShutdownWithPendingDoom)1109 TEST_F(DiskCacheBackendTest, ShutdownWithPendingDoom) {
1110   BackendShutdownWithPendingDoom();
1111 }
1112 
1113 // Disabled on android since this test requires cache creator to create
1114 // blockfile caches.
1115 #if !BUILDFLAG(IS_ANDROID)
TEST_F(DiskCacheTest,TruncatedIndex)1116 TEST_F(DiskCacheTest, TruncatedIndex) {
1117   ASSERT_TRUE(CleanupCacheDir());
1118   base::FilePath index = cache_path_.AppendASCII("index");
1119   ASSERT_TRUE(base::WriteFile(index, "hello"));
1120 
1121   TestBackendResultCompletionCallback cb;
1122 
1123   disk_cache::BackendResult rv = disk_cache::CreateCacheBackend(
1124       net::DISK_CACHE, net::CACHE_BACKEND_BLOCKFILE,
1125       /*file_operations=*/nullptr, cache_path_, 0,
1126       disk_cache::ResetHandling::kNeverReset, /*net_log=*/nullptr,
1127       cb.callback());
1128   rv = cb.GetResult(std::move(rv));
1129   ASSERT_NE(net::OK, rv.net_error);
1130   ASSERT_FALSE(rv.backend);
1131 }
1132 #endif
1133 
BackendSetSize()1134 void DiskCacheBackendTest::BackendSetSize() {
1135   const int cache_size = 0x10000;  // 64 kB
1136   SetMaxSize(cache_size);
1137   InitCache();
1138 
1139   std::string first("some key");
1140   std::string second("something else");
1141   disk_cache::Entry* entry;
1142   ASSERT_THAT(CreateEntry(first, &entry), IsOk());
1143 
1144   scoped_refptr<net::IOBuffer> buffer =
1145       base::MakeRefCounted<net::IOBuffer>(cache_size);
1146   memset(buffer->data(), 0, cache_size);
1147   EXPECT_EQ(cache_size / 10,
1148             WriteData(entry, 0, 0, buffer.get(), cache_size / 10, false))
1149       << "normal file";
1150 
1151   EXPECT_EQ(net::ERR_FAILED,
1152             WriteData(entry, 1, 0, buffer.get(), cache_size / 5, false))
1153       << "file size above the limit";
1154 
1155   // By doubling the total size, we make this file cacheable.
1156   SetMaxSize(cache_size * 2);
1157   EXPECT_EQ(cache_size / 5,
1158             WriteData(entry, 1, 0, buffer.get(), cache_size / 5, false));
1159 
1160   // Let's fill up the cache!.
1161   SetMaxSize(cache_size * 10);
1162   EXPECT_EQ(cache_size * 3 / 4,
1163             WriteData(entry, 0, 0, buffer.get(), cache_size * 3 / 4, false));
1164   entry->Close();
1165   FlushQueueForTest();
1166 
1167   SetMaxSize(cache_size);
1168 
1169   // The cache is 95% full.
1170 
1171   ASSERT_THAT(CreateEntry(second, &entry), IsOk());
1172   EXPECT_EQ(cache_size / 10,
1173             WriteData(entry, 0, 0, buffer.get(), cache_size / 10, false));
1174 
1175   disk_cache::Entry* entry2;
1176   ASSERT_THAT(CreateEntry("an extra key", &entry2), IsOk());
1177   EXPECT_EQ(cache_size / 10,
1178             WriteData(entry2, 0, 0, buffer.get(), cache_size / 10, false));
1179   entry2->Close();  // This will trigger the cache trim.
1180 
1181   EXPECT_NE(net::OK, OpenEntry(first, &entry2));
1182 
1183   FlushQueueForTest();  // Make sure that we are done trimming the cache.
1184   FlushQueueForTest();  // We may have posted two tasks to evict stuff.
1185 
1186   entry->Close();
1187   ASSERT_THAT(OpenEntry(second, &entry), IsOk());
1188   EXPECT_EQ(cache_size / 10, entry->GetDataSize(0));
1189   entry->Close();
1190 }
1191 
TEST_F(DiskCacheBackendTest,SetSize)1192 TEST_F(DiskCacheBackendTest, SetSize) {
1193   BackendSetSize();
1194 }
1195 
TEST_F(DiskCacheBackendTest,NewEvictionSetSize)1196 TEST_F(DiskCacheBackendTest, NewEvictionSetSize) {
1197   SetNewEviction();
1198   BackendSetSize();
1199 }
1200 
TEST_F(DiskCacheBackendTest,MemoryOnlySetSize)1201 TEST_F(DiskCacheBackendTest, MemoryOnlySetSize) {
1202   SetMemoryOnlyMode();
1203   BackendSetSize();
1204 }
1205 
BackendLoad()1206 void DiskCacheBackendTest::BackendLoad() {
1207   InitCache();
1208   int seed = static_cast<int>(Time::Now().ToInternalValue());
1209   srand(seed);
1210 
1211   disk_cache::Entry* entries[kLargeNumEntries];
1212   for (auto*& entry : entries) {
1213     std::string key = GenerateKey(true);
1214     ASSERT_THAT(CreateEntry(key, &entry), IsOk());
1215   }
1216   EXPECT_EQ(kLargeNumEntries, cache_->GetEntryCount());
1217 
1218   for (int i = 0; i < kLargeNumEntries; i++) {
1219     int source1 = rand() % kLargeNumEntries;
1220     int source2 = rand() % kLargeNumEntries;
1221     disk_cache::Entry* temp = entries[source1];
1222     entries[source1] = entries[source2];
1223     entries[source2] = temp;
1224   }
1225 
1226   for (auto* entry : entries) {
1227     disk_cache::Entry* new_entry;
1228     ASSERT_THAT(OpenEntry(entry->GetKey(), &new_entry), IsOk());
1229     EXPECT_TRUE(new_entry == entry);
1230     new_entry->Close();
1231     entry->Doom();
1232     entry->Close();
1233   }
1234   FlushQueueForTest();
1235   EXPECT_EQ(0, cache_->GetEntryCount());
1236 }
1237 
TEST_F(DiskCacheBackendTest,Load)1238 TEST_F(DiskCacheBackendTest, Load) {
1239   // Work with a tiny index table (16 entries)
1240   SetMask(0xf);
1241   SetMaxSize(0x100000);
1242   BackendLoad();
1243 }
1244 
TEST_F(DiskCacheBackendTest,NewEvictionLoad)1245 TEST_F(DiskCacheBackendTest, NewEvictionLoad) {
1246   SetNewEviction();
1247   // Work with a tiny index table (16 entries)
1248   SetMask(0xf);
1249   SetMaxSize(0x100000);
1250   BackendLoad();
1251 }
1252 
TEST_F(DiskCacheBackendTest,MemoryOnlyLoad)1253 TEST_F(DiskCacheBackendTest, MemoryOnlyLoad) {
1254   SetMaxSize(0x100000);
1255   SetMemoryOnlyMode();
1256   BackendLoad();
1257 }
1258 
TEST_F(DiskCacheBackendTest,AppCacheLoad)1259 TEST_F(DiskCacheBackendTest, AppCacheLoad) {
1260   SetCacheType(net::APP_CACHE);
1261   // Work with a tiny index table (16 entries)
1262   SetMask(0xf);
1263   SetMaxSize(0x100000);
1264   BackendLoad();
1265 }
1266 
TEST_F(DiskCacheBackendTest,ShaderCacheLoad)1267 TEST_F(DiskCacheBackendTest, ShaderCacheLoad) {
1268   SetCacheType(net::SHADER_CACHE);
1269   // Work with a tiny index table (16 entries)
1270   SetMask(0xf);
1271   SetMaxSize(0x100000);
1272   BackendLoad();
1273 }
1274 
1275 // Tests the chaining of an entry to the current head.
BackendChain()1276 void DiskCacheBackendTest::BackendChain() {
1277   SetMask(0x1);        // 2-entry table.
1278   SetMaxSize(0x3000);  // 12 kB.
1279   InitCache();
1280 
1281   disk_cache::Entry* entry;
1282   ASSERT_THAT(CreateEntry("The first key", &entry), IsOk());
1283   entry->Close();
1284   ASSERT_THAT(CreateEntry("The Second key", &entry), IsOk());
1285   entry->Close();
1286 }
1287 
TEST_F(DiskCacheBackendTest,Chain)1288 TEST_F(DiskCacheBackendTest, Chain) {
1289   BackendChain();
1290 }
1291 
TEST_F(DiskCacheBackendTest,NewEvictionChain)1292 TEST_F(DiskCacheBackendTest, NewEvictionChain) {
1293   SetNewEviction();
1294   BackendChain();
1295 }
1296 
TEST_F(DiskCacheBackendTest,AppCacheChain)1297 TEST_F(DiskCacheBackendTest, AppCacheChain) {
1298   SetCacheType(net::APP_CACHE);
1299   BackendChain();
1300 }
1301 
TEST_F(DiskCacheBackendTest,ShaderCacheChain)1302 TEST_F(DiskCacheBackendTest, ShaderCacheChain) {
1303   SetCacheType(net::SHADER_CACHE);
1304   BackendChain();
1305 }
1306 
TEST_F(DiskCacheBackendTest,NewEvictionTrim)1307 TEST_F(DiskCacheBackendTest, NewEvictionTrim) {
1308   SetNewEviction();
1309   InitCache();
1310 
1311   disk_cache::Entry* entry;
1312   for (int i = 0; i < 100; i++) {
1313     std::string name(base::StringPrintf("Key %d", i));
1314     ASSERT_THAT(CreateEntry(name, &entry), IsOk());
1315     entry->Close();
1316     if (i < 90) {
1317       // Entries 0 to 89 are in list 1; 90 to 99 are in list 0.
1318       ASSERT_THAT(OpenEntry(name, &entry), IsOk());
1319       entry->Close();
1320     }
1321   }
1322 
1323   // The first eviction must come from list 1 (10% limit), the second must come
1324   // from list 0.
1325   TrimForTest(false);
1326   EXPECT_NE(net::OK, OpenEntry("Key 0", &entry));
1327   TrimForTest(false);
1328   EXPECT_NE(net::OK, OpenEntry("Key 90", &entry));
1329 
1330   // Double check that we still have the list tails.
1331   ASSERT_THAT(OpenEntry("Key 1", &entry), IsOk());
1332   entry->Close();
1333   ASSERT_THAT(OpenEntry("Key 91", &entry), IsOk());
1334   entry->Close();
1335 }
1336 
1337 // Before looking for invalid entries, let's check a valid entry.
BackendValidEntry()1338 void DiskCacheBackendTest::BackendValidEntry() {
1339   InitCache();
1340 
1341   std::string key("Some key");
1342   disk_cache::Entry* entry;
1343   ASSERT_THAT(CreateEntry(key, &entry), IsOk());
1344 
1345   const int kSize = 50;
1346   scoped_refptr<net::IOBuffer> buffer1 =
1347       base::MakeRefCounted<net::IOBuffer>(kSize);
1348   memset(buffer1->data(), 0, kSize);
1349   base::strlcpy(buffer1->data(), "And the data to save", kSize);
1350   EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer1.get(), kSize, false));
1351   entry->Close();
1352   SimulateCrash();
1353 
1354   ASSERT_THAT(OpenEntry(key, &entry), IsOk());
1355 
1356   scoped_refptr<net::IOBuffer> buffer2 =
1357       base::MakeRefCounted<net::IOBuffer>(kSize);
1358   memset(buffer2->data(), 0, kSize);
1359   EXPECT_EQ(kSize, ReadData(entry, 0, 0, buffer2.get(), kSize));
1360   entry->Close();
1361   EXPECT_STREQ(buffer1->data(), buffer2->data());
1362 }
1363 
TEST_F(DiskCacheBackendTest,ValidEntry)1364 TEST_F(DiskCacheBackendTest, ValidEntry) {
1365   BackendValidEntry();
1366 }
1367 
TEST_F(DiskCacheBackendTest,NewEvictionValidEntry)1368 TEST_F(DiskCacheBackendTest, NewEvictionValidEntry) {
1369   SetNewEviction();
1370   BackendValidEntry();
1371 }
1372 
1373 // The same logic of the previous test (ValidEntry), but this time force the
1374 // entry to be invalid, simulating a crash in the middle.
1375 // We'll be leaking memory from this test.
BackendInvalidEntry()1376 void DiskCacheBackendTest::BackendInvalidEntry() {
1377   InitCache();
1378 
1379   std::string key("Some key");
1380   disk_cache::Entry* entry;
1381   ASSERT_THAT(CreateEntry(key, &entry), IsOk());
1382 
1383   const int kSize = 50;
1384   scoped_refptr<net::IOBuffer> buffer =
1385       base::MakeRefCounted<net::IOBuffer>(kSize);
1386   memset(buffer->data(), 0, kSize);
1387   base::strlcpy(buffer->data(), "And the data to save", kSize);
1388   EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
1389   SimulateCrash();
1390 
1391   EXPECT_NE(net::OK, OpenEntry(key, &entry));
1392   EXPECT_EQ(0, cache_->GetEntryCount());
1393 }
1394 
1395 #if !defined(LEAK_SANITIZER)
1396 // We'll be leaking memory from this test.
TEST_F(DiskCacheBackendTest,InvalidEntry)1397 TEST_F(DiskCacheBackendTest, InvalidEntry) {
1398   BackendInvalidEntry();
1399 }
1400 
1401 // We'll be leaking memory from this test.
TEST_F(DiskCacheBackendTest,NewEvictionInvalidEntry)1402 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry) {
1403   SetNewEviction();
1404   BackendInvalidEntry();
1405 }
1406 
1407 // We'll be leaking memory from this test.
TEST_F(DiskCacheBackendTest,AppCacheInvalidEntry)1408 TEST_F(DiskCacheBackendTest, AppCacheInvalidEntry) {
1409   SetCacheType(net::APP_CACHE);
1410   BackendInvalidEntry();
1411 }
1412 
1413 // We'll be leaking memory from this test.
TEST_F(DiskCacheBackendTest,ShaderCacheInvalidEntry)1414 TEST_F(DiskCacheBackendTest, ShaderCacheInvalidEntry) {
1415   SetCacheType(net::SHADER_CACHE);
1416   BackendInvalidEntry();
1417 }
1418 
1419 // Almost the same test, but this time crash the cache after reading an entry.
1420 // We'll be leaking memory from this test.
BackendInvalidEntryRead()1421 void DiskCacheBackendTest::BackendInvalidEntryRead() {
1422   InitCache();
1423 
1424   std::string key("Some key");
1425   disk_cache::Entry* entry;
1426   ASSERT_THAT(CreateEntry(key, &entry), IsOk());
1427 
1428   const int kSize = 50;
1429   scoped_refptr<net::IOBuffer> buffer =
1430       base::MakeRefCounted<net::IOBuffer>(kSize);
1431   memset(buffer->data(), 0, kSize);
1432   base::strlcpy(buffer->data(), "And the data to save", kSize);
1433   EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
1434   entry->Close();
1435   ASSERT_THAT(OpenEntry(key, &entry), IsOk());
1436   EXPECT_EQ(kSize, ReadData(entry, 0, 0, buffer.get(), kSize));
1437 
1438   SimulateCrash();
1439 
1440   if (type_ == net::APP_CACHE) {
1441     // Reading an entry and crashing should not make it dirty.
1442     ASSERT_THAT(OpenEntry(key, &entry), IsOk());
1443     EXPECT_EQ(1, cache_->GetEntryCount());
1444     entry->Close();
1445   } else {
1446     EXPECT_NE(net::OK, OpenEntry(key, &entry));
1447     EXPECT_EQ(0, cache_->GetEntryCount());
1448   }
1449 }
1450 
1451 // We'll be leaking memory from this test.
TEST_F(DiskCacheBackendTest,InvalidEntryRead)1452 TEST_F(DiskCacheBackendTest, InvalidEntryRead) {
1453   BackendInvalidEntryRead();
1454 }
1455 
1456 // We'll be leaking memory from this test.
TEST_F(DiskCacheBackendTest,NewEvictionInvalidEntryRead)1457 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntryRead) {
1458   SetNewEviction();
1459   BackendInvalidEntryRead();
1460 }
1461 
1462 // We'll be leaking memory from this test.
TEST_F(DiskCacheBackendTest,AppCacheInvalidEntryRead)1463 TEST_F(DiskCacheBackendTest, AppCacheInvalidEntryRead) {
1464   SetCacheType(net::APP_CACHE);
1465   BackendInvalidEntryRead();
1466 }
1467 
1468 // We'll be leaking memory from this test.
TEST_F(DiskCacheBackendTest,ShaderCacheInvalidEntryRead)1469 TEST_F(DiskCacheBackendTest, ShaderCacheInvalidEntryRead) {
1470   SetCacheType(net::SHADER_CACHE);
1471   BackendInvalidEntryRead();
1472 }
1473 
1474 // We'll be leaking memory from this test.
BackendInvalidEntryWithLoad()1475 void DiskCacheBackendTest::BackendInvalidEntryWithLoad() {
1476   // Work with a tiny index table (16 entries)
1477   SetMask(0xf);
1478   SetMaxSize(0x100000);
1479   InitCache();
1480 
1481   int seed = static_cast<int>(Time::Now().ToInternalValue());
1482   srand(seed);
1483 
1484   const int kNumEntries = 100;
1485   disk_cache::Entry* entries[kNumEntries];
1486   for (auto*& entry : entries) {
1487     std::string key = GenerateKey(true);
1488     ASSERT_THAT(CreateEntry(key, &entry), IsOk());
1489   }
1490   EXPECT_EQ(kNumEntries, cache_->GetEntryCount());
1491 
1492   for (int i = 0; i < kNumEntries; i++) {
1493     int source1 = rand() % kNumEntries;
1494     int source2 = rand() % kNumEntries;
1495     disk_cache::Entry* temp = entries[source1];
1496     entries[source1] = entries[source2];
1497     entries[source2] = temp;
1498   }
1499 
1500   std::string keys[kNumEntries];
1501   for (int i = 0; i < kNumEntries; i++) {
1502     keys[i] = entries[i]->GetKey();
1503     if (i < kNumEntries / 2)
1504       entries[i]->Close();
1505   }
1506 
1507   SimulateCrash();
1508 
1509   for (int i = kNumEntries / 2; i < kNumEntries; i++) {
1510     disk_cache::Entry* entry;
1511     EXPECT_NE(net::OK, OpenEntry(keys[i], &entry));
1512   }
1513 
1514   for (int i = 0; i < kNumEntries / 2; i++) {
1515     disk_cache::Entry* entry;
1516     ASSERT_THAT(OpenEntry(keys[i], &entry), IsOk());
1517     entry->Close();
1518   }
1519 
1520   EXPECT_EQ(kNumEntries / 2, cache_->GetEntryCount());
1521 }
1522 
1523 // We'll be leaking memory from this test.
TEST_F(DiskCacheBackendTest,InvalidEntryWithLoad)1524 TEST_F(DiskCacheBackendTest, InvalidEntryWithLoad) {
1525   BackendInvalidEntryWithLoad();
1526 }
1527 
1528 // We'll be leaking memory from this test.
TEST_F(DiskCacheBackendTest,NewEvictionInvalidEntryWithLoad)1529 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntryWithLoad) {
1530   SetNewEviction();
1531   BackendInvalidEntryWithLoad();
1532 }
1533 
1534 // We'll be leaking memory from this test.
TEST_F(DiskCacheBackendTest,AppCacheInvalidEntryWithLoad)1535 TEST_F(DiskCacheBackendTest, AppCacheInvalidEntryWithLoad) {
1536   SetCacheType(net::APP_CACHE);
1537   BackendInvalidEntryWithLoad();
1538 }
1539 
1540 // We'll be leaking memory from this test.
TEST_F(DiskCacheBackendTest,ShaderCacheInvalidEntryWithLoad)1541 TEST_F(DiskCacheBackendTest, ShaderCacheInvalidEntryWithLoad) {
1542   SetCacheType(net::SHADER_CACHE);
1543   BackendInvalidEntryWithLoad();
1544 }
1545 
1546 // We'll be leaking memory from this test.
BackendTrimInvalidEntry()1547 void DiskCacheBackendTest::BackendTrimInvalidEntry() {
1548   const int kSize = 0x3000;  // 12 kB
1549   SetMaxSize(kSize * 10);
1550   InitCache();
1551 
1552   std::string first("some key");
1553   std::string second("something else");
1554   disk_cache::Entry* entry;
1555   ASSERT_THAT(CreateEntry(first, &entry), IsOk());
1556 
1557   scoped_refptr<net::IOBuffer> buffer =
1558       base::MakeRefCounted<net::IOBuffer>(kSize);
1559   memset(buffer->data(), 0, kSize);
1560   EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
1561 
1562   // Simulate a crash.
1563   SimulateCrash();
1564 
1565   ASSERT_THAT(CreateEntry(second, &entry), IsOk());
1566   EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
1567 
1568   EXPECT_EQ(2, cache_->GetEntryCount());
1569   SetMaxSize(kSize);
1570   entry->Close();  // Trim the cache.
1571   FlushQueueForTest();
1572 
1573   // If we evicted the entry in less than 20mS, we have one entry in the cache;
1574   // if it took more than that, we posted a task and we'll delete the second
1575   // entry too.
1576   base::RunLoop().RunUntilIdle();
1577 
1578   // This may be not thread-safe in general, but for now it's OK so add some
1579   // ThreadSanitizer annotations to ignore data races on cache_.
1580   // See http://crbug.com/55970
1581   ANNOTATE_IGNORE_READS_BEGIN();
1582   EXPECT_GE(1, cache_->GetEntryCount());
1583   ANNOTATE_IGNORE_READS_END();
1584 
1585   EXPECT_NE(net::OK, OpenEntry(first, &entry));
1586 }
1587 
1588 // We'll be leaking memory from this test.
TEST_F(DiskCacheBackendTest,TrimInvalidEntry)1589 TEST_F(DiskCacheBackendTest, TrimInvalidEntry) {
1590   BackendTrimInvalidEntry();
1591 }
1592 
1593 // We'll be leaking memory from this test.
TEST_F(DiskCacheBackendTest,NewEvictionTrimInvalidEntry)1594 TEST_F(DiskCacheBackendTest, NewEvictionTrimInvalidEntry) {
1595   SetNewEviction();
1596   BackendTrimInvalidEntry();
1597 }
1598 
1599 // We'll be leaking memory from this test.
BackendTrimInvalidEntry2()1600 void DiskCacheBackendTest::BackendTrimInvalidEntry2() {
1601   SetMask(0xf);  // 16-entry table.
1602 
1603   const int kSize = 0x3000;  // 12 kB
1604   SetMaxSize(kSize * 40);
1605   InitCache();
1606 
1607   scoped_refptr<net::IOBuffer> buffer =
1608       base::MakeRefCounted<net::IOBuffer>(kSize);
1609   memset(buffer->data(), 0, kSize);
1610   disk_cache::Entry* entry;
1611 
1612   // Writing 32 entries to this cache chains most of them.
1613   for (int i = 0; i < 32; i++) {
1614     std::string key(base::StringPrintf("some key %d", i));
1615     ASSERT_THAT(CreateEntry(key, &entry), IsOk());
1616     EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
1617     entry->Close();
1618     ASSERT_THAT(OpenEntry(key, &entry), IsOk());
1619     // Note that we are not closing the entries.
1620   }
1621 
1622   // Simulate a crash.
1623   SimulateCrash();
1624 
1625   ASSERT_THAT(CreateEntry("Something else", &entry), IsOk());
1626   EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
1627 
1628   FlushQueueForTest();
1629   EXPECT_EQ(33, cache_->GetEntryCount());
1630   SetMaxSize(kSize);
1631 
1632   // For the new eviction code, all corrupt entries are on the second list so
1633   // they are not going away that easy.
1634   if (new_eviction_) {
1635     EXPECT_THAT(DoomAllEntries(), IsOk());
1636   }
1637 
1638   entry->Close();  // Trim the cache.
1639   FlushQueueForTest();
1640 
1641   // We may abort the eviction before cleaning up everything.
1642   base::RunLoop().RunUntilIdle();
1643   FlushQueueForTest();
1644   // If it's not clear enough: we may still have eviction tasks running at this
1645   // time, so the number of entries is changing while we read it.
1646   ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN();
1647   EXPECT_GE(30, cache_->GetEntryCount());
1648   ANNOTATE_IGNORE_READS_AND_WRITES_END();
1649 
1650   // For extra messiness, the integrity check for the cache can actually cause
1651   // evictions if it's over-capacity, which would race with above. So change the
1652   // size we pass to CheckCacheIntegrity (but don't mess with existing backend's
1653   // state.
1654   size_ = 0;
1655 }
1656 
1657 // We'll be leaking memory from this test.
TEST_F(DiskCacheBackendTest,TrimInvalidEntry2)1658 TEST_F(DiskCacheBackendTest, TrimInvalidEntry2) {
1659   BackendTrimInvalidEntry2();
1660 }
1661 
1662 // We'll be leaking memory from this test.
TEST_F(DiskCacheBackendTest,NewEvictionTrimInvalidEntry2)1663 TEST_F(DiskCacheBackendTest, NewEvictionTrimInvalidEntry2) {
1664   SetNewEviction();
1665   BackendTrimInvalidEntry2();
1666 }
1667 #endif  // !defined(LEAK_SANITIZER)
1668 
BackendEnumerations()1669 void DiskCacheBackendTest::BackendEnumerations() {
1670   InitCache();
1671   Time initial = Time::Now();
1672 
1673   const int kNumEntries = 100;
1674   for (int i = 0; i < kNumEntries; i++) {
1675     std::string key = GenerateKey(true);
1676     disk_cache::Entry* entry;
1677     ASSERT_THAT(CreateEntry(key, &entry), IsOk());
1678     entry->Close();
1679   }
1680   EXPECT_EQ(kNumEntries, cache_->GetEntryCount());
1681   Time final = Time::Now();
1682 
1683   disk_cache::Entry* entry;
1684   std::unique_ptr<TestIterator> iter = CreateIterator();
1685   int count = 0;
1686   Time last_modified[kNumEntries];
1687   Time last_used[kNumEntries];
1688   while (iter->OpenNextEntry(&entry) == net::OK) {
1689     ASSERT_TRUE(nullptr != entry);
1690     if (count < kNumEntries) {
1691       last_modified[count] = entry->GetLastModified();
1692       last_used[count] = entry->GetLastUsed();
1693       EXPECT_TRUE(initial <= last_modified[count]);
1694       EXPECT_TRUE(final >= last_modified[count]);
1695     }
1696 
1697     entry->Close();
1698     count++;
1699   };
1700   EXPECT_EQ(kNumEntries, count);
1701 
1702   iter = CreateIterator();
1703   count = 0;
1704   // The previous enumeration should not have changed the timestamps.
1705   while (iter->OpenNextEntry(&entry) == net::OK) {
1706     ASSERT_TRUE(nullptr != entry);
1707     if (count < kNumEntries) {
1708       EXPECT_TRUE(last_modified[count] == entry->GetLastModified());
1709       EXPECT_TRUE(last_used[count] == entry->GetLastUsed());
1710     }
1711     entry->Close();
1712     count++;
1713   };
1714   EXPECT_EQ(kNumEntries, count);
1715 }
1716 
TEST_F(DiskCacheBackendTest,Enumerations)1717 TEST_F(DiskCacheBackendTest, Enumerations) {
1718   BackendEnumerations();
1719 }
1720 
TEST_F(DiskCacheBackendTest,NewEvictionEnumerations)1721 TEST_F(DiskCacheBackendTest, NewEvictionEnumerations) {
1722   SetNewEviction();
1723   BackendEnumerations();
1724 }
1725 
TEST_F(DiskCacheBackendTest,MemoryOnlyEnumerations)1726 TEST_F(DiskCacheBackendTest, MemoryOnlyEnumerations) {
1727   SetMemoryOnlyMode();
1728   BackendEnumerations();
1729 }
1730 
TEST_F(DiskCacheBackendTest,ShaderCacheEnumerations)1731 TEST_F(DiskCacheBackendTest, ShaderCacheEnumerations) {
1732   SetCacheType(net::SHADER_CACHE);
1733   BackendEnumerations();
1734 }
1735 
TEST_F(DiskCacheBackendTest,AppCacheEnumerations)1736 TEST_F(DiskCacheBackendTest, AppCacheEnumerations) {
1737   SetCacheType(net::APP_CACHE);
1738   BackendEnumerations();
1739 }
1740 
1741 // Verifies enumerations while entries are open.
BackendEnumerations2()1742 void DiskCacheBackendTest::BackendEnumerations2() {
1743   InitCache();
1744   const std::string first("first");
1745   const std::string second("second");
1746   disk_cache::Entry *entry1, *entry2;
1747   ASSERT_THAT(CreateEntry(first, &entry1), IsOk());
1748   entry1->Close();
1749   ASSERT_THAT(CreateEntry(second, &entry2), IsOk());
1750   entry2->Close();
1751   FlushQueueForTest();
1752 
1753   // Make sure that the timestamp is not the same.
1754   AddDelay();
1755   ASSERT_THAT(OpenEntry(second, &entry1), IsOk());
1756   std::unique_ptr<TestIterator> iter = CreateIterator();
1757   ASSERT_THAT(iter->OpenNextEntry(&entry2), IsOk());
1758   EXPECT_EQ(entry2->GetKey(), second);
1759 
1760   // Two entries and the iterator pointing at "first".
1761   entry1->Close();
1762   entry2->Close();
1763 
1764   // The iterator should still be valid, so we should not crash.
1765   ASSERT_THAT(iter->OpenNextEntry(&entry2), IsOk());
1766   EXPECT_EQ(entry2->GetKey(), first);
1767   entry2->Close();
1768   iter = CreateIterator();
1769 
1770   // Modify the oldest entry and get the newest element.
1771   ASSERT_THAT(OpenEntry(first, &entry1), IsOk());
1772   EXPECT_EQ(0, WriteData(entry1, 0, 200, nullptr, 0, false));
1773   ASSERT_THAT(iter->OpenNextEntry(&entry2), IsOk());
1774   if (type_ == net::APP_CACHE) {
1775     // The list is not updated.
1776     EXPECT_EQ(entry2->GetKey(), second);
1777   } else {
1778     EXPECT_EQ(entry2->GetKey(), first);
1779   }
1780 
1781   entry1->Close();
1782   entry2->Close();
1783 }
1784 
TEST_F(DiskCacheBackendTest,Enumerations2)1785 TEST_F(DiskCacheBackendTest, Enumerations2) {
1786   BackendEnumerations2();
1787 }
1788 
TEST_F(DiskCacheBackendTest,NewEvictionEnumerations2)1789 TEST_F(DiskCacheBackendTest, NewEvictionEnumerations2) {
1790   SetNewEviction();
1791   BackendEnumerations2();
1792 }
1793 
TEST_F(DiskCacheBackendTest,AppCacheEnumerations2)1794 TEST_F(DiskCacheBackendTest, AppCacheEnumerations2) {
1795   SetCacheType(net::APP_CACHE);
1796   BackendEnumerations2();
1797 }
1798 
TEST_F(DiskCacheBackendTest,ShaderCacheEnumerations2)1799 TEST_F(DiskCacheBackendTest, ShaderCacheEnumerations2) {
1800   SetCacheType(net::SHADER_CACHE);
1801   BackendEnumerations2();
1802 }
1803 
BackendDoomMidEnumeration()1804 void DiskCacheBackendTest::BackendDoomMidEnumeration() {
1805   InitCache();
1806 
1807   const int kNumEntries = 100;
1808   std::set<std::string> keys;
1809   for (int i = 0; i < kNumEntries; i++) {
1810     std::string key = GenerateKey(true);
1811     keys.insert(key);
1812     disk_cache::Entry* entry;
1813     ASSERT_THAT(CreateEntry(key, &entry), IsOk());
1814     entry->Close();
1815   }
1816 
1817   disk_cache::Entry* entry;
1818   std::unique_ptr<TestIterator> iter = CreateIterator();
1819   int count = 0;
1820   while (iter->OpenNextEntry(&entry) == net::OK) {
1821     if (count == 0) {
1822       // Delete a random entry from the cache while in the midst of iteration.
1823       auto key_to_doom = keys.begin();
1824       while (*key_to_doom == entry->GetKey())
1825         key_to_doom++;
1826       ASSERT_THAT(DoomEntry(*key_to_doom), IsOk());
1827       ASSERT_EQ(1u, keys.erase(*key_to_doom));
1828     }
1829     ASSERT_NE(nullptr, entry);
1830     EXPECT_EQ(1u, keys.erase(entry->GetKey()));
1831     entry->Close();
1832     count++;
1833   };
1834 
1835   EXPECT_EQ(kNumEntries - 1, cache_->GetEntryCount());
1836   EXPECT_EQ(0u, keys.size());
1837 }
1838 
TEST_F(DiskCacheBackendTest,DoomEnumerations)1839 TEST_F(DiskCacheBackendTest, DoomEnumerations) {
1840   BackendDoomMidEnumeration();
1841 }
1842 
TEST_F(DiskCacheBackendTest,NewEvictionDoomEnumerations)1843 TEST_F(DiskCacheBackendTest, NewEvictionDoomEnumerations) {
1844   SetNewEviction();
1845   BackendDoomMidEnumeration();
1846 }
1847 
TEST_F(DiskCacheBackendTest,MemoryOnlyDoomEnumerations)1848 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomEnumerations) {
1849   SetMemoryOnlyMode();
1850   BackendDoomMidEnumeration();
1851 }
1852 
TEST_F(DiskCacheBackendTest,ShaderCacheDoomEnumerations)1853 TEST_F(DiskCacheBackendTest, ShaderCacheDoomEnumerations) {
1854   SetCacheType(net::SHADER_CACHE);
1855   BackendDoomMidEnumeration();
1856 }
1857 
TEST_F(DiskCacheBackendTest,AppCacheDoomEnumerations)1858 TEST_F(DiskCacheBackendTest, AppCacheDoomEnumerations) {
1859   SetCacheType(net::APP_CACHE);
1860   BackendDoomMidEnumeration();
1861 }
1862 
TEST_F(DiskCacheBackendTest,SimpleDoomEnumerations)1863 TEST_F(DiskCacheBackendTest, SimpleDoomEnumerations) {
1864   SetSimpleCacheMode();
1865   BackendDoomMidEnumeration();
1866 }
1867 
1868 // Verify that ReadData calls do not update the LRU cache
1869 // when using the SHADER_CACHE type.
TEST_F(DiskCacheBackendTest,ShaderCacheEnumerationReadData)1870 TEST_F(DiskCacheBackendTest, ShaderCacheEnumerationReadData) {
1871   SetCacheType(net::SHADER_CACHE);
1872   InitCache();
1873   const std::string first("first");
1874   const std::string second("second");
1875   disk_cache::Entry *entry1, *entry2;
1876   const int kSize = 50;
1877   scoped_refptr<net::IOBuffer> buffer1 =
1878       base::MakeRefCounted<net::IOBuffer>(kSize);
1879 
1880   ASSERT_THAT(CreateEntry(first, &entry1), IsOk());
1881   memset(buffer1->data(), 0, kSize);
1882   base::strlcpy(buffer1->data(), "And the data to save", kSize);
1883   EXPECT_EQ(kSize, WriteData(entry1, 0, 0, buffer1.get(), kSize, false));
1884 
1885   ASSERT_THAT(CreateEntry(second, &entry2), IsOk());
1886   entry2->Close();
1887 
1888   FlushQueueForTest();
1889 
1890   // Make sure that the timestamp is not the same.
1891   AddDelay();
1892 
1893   // Read from the last item in the LRU.
1894   EXPECT_EQ(kSize, ReadData(entry1, 0, 0, buffer1.get(), kSize));
1895   entry1->Close();
1896 
1897   std::unique_ptr<TestIterator> iter = CreateIterator();
1898   ASSERT_THAT(iter->OpenNextEntry(&entry2), IsOk());
1899   EXPECT_EQ(entry2->GetKey(), second);
1900   entry2->Close();
1901 }
1902 
1903 #if !defined(LEAK_SANITIZER)
1904 // Verify handling of invalid entries while doing enumerations.
1905 // We'll be leaking memory from this test.
BackendInvalidEntryEnumeration()1906 void DiskCacheBackendTest::BackendInvalidEntryEnumeration() {
1907   InitCache();
1908 
1909   std::string key("Some key");
1910   disk_cache::Entry *entry, *entry1, *entry2;
1911   ASSERT_THAT(CreateEntry(key, &entry1), IsOk());
1912 
1913   const int kSize = 50;
1914   scoped_refptr<net::IOBuffer> buffer1 =
1915       base::MakeRefCounted<net::IOBuffer>(kSize);
1916   memset(buffer1->data(), 0, kSize);
1917   base::strlcpy(buffer1->data(), "And the data to save", kSize);
1918   EXPECT_EQ(kSize, WriteData(entry1, 0, 0, buffer1.get(), kSize, false));
1919   entry1->Close();
1920   ASSERT_THAT(OpenEntry(key, &entry1), IsOk());
1921   EXPECT_EQ(kSize, ReadData(entry1, 0, 0, buffer1.get(), kSize));
1922 
1923   std::string key2("Another key");
1924   ASSERT_THAT(CreateEntry(key2, &entry2), IsOk());
1925   entry2->Close();
1926   ASSERT_EQ(2, cache_->GetEntryCount());
1927 
1928   SimulateCrash();
1929 
1930   std::unique_ptr<TestIterator> iter = CreateIterator();
1931   int count = 0;
1932   while (iter->OpenNextEntry(&entry) == net::OK) {
1933     ASSERT_TRUE(nullptr != entry);
1934     EXPECT_EQ(key2, entry->GetKey());
1935     entry->Close();
1936     count++;
1937   };
1938   EXPECT_EQ(1, count);
1939   EXPECT_EQ(1, cache_->GetEntryCount());
1940 }
1941 
1942 // We'll be leaking memory from this test.
TEST_F(DiskCacheBackendTest,InvalidEntryEnumeration)1943 TEST_F(DiskCacheBackendTest, InvalidEntryEnumeration) {
1944   BackendInvalidEntryEnumeration();
1945 }
1946 
1947 // We'll be leaking memory from this test.
TEST_F(DiskCacheBackendTest,NewEvictionInvalidEntryEnumeration)1948 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntryEnumeration) {
1949   SetNewEviction();
1950   BackendInvalidEntryEnumeration();
1951 }
1952 #endif  // !defined(LEAK_SANITIZER)
1953 
1954 // Tests that if for some reason entries are modified close to existing cache
1955 // iterators, we don't generate fatal errors or reset the cache.
BackendFixEnumerators()1956 void DiskCacheBackendTest::BackendFixEnumerators() {
1957   InitCache();
1958 
1959   int seed = static_cast<int>(Time::Now().ToInternalValue());
1960   srand(seed);
1961 
1962   const int kNumEntries = 10;
1963   for (int i = 0; i < kNumEntries; i++) {
1964     std::string key = GenerateKey(true);
1965     disk_cache::Entry* entry;
1966     ASSERT_THAT(CreateEntry(key, &entry), IsOk());
1967     entry->Close();
1968   }
1969   EXPECT_EQ(kNumEntries, cache_->GetEntryCount());
1970 
1971   disk_cache::Entry *entry1, *entry2;
1972   std::unique_ptr<TestIterator> iter1 = CreateIterator(),
1973                                 iter2 = CreateIterator();
1974   ASSERT_THAT(iter1->OpenNextEntry(&entry1), IsOk());
1975   ASSERT_TRUE(nullptr != entry1);
1976   entry1->Close();
1977   entry1 = nullptr;
1978 
1979   // Let's go to the middle of the list.
1980   for (int i = 0; i < kNumEntries / 2; i++) {
1981     if (entry1)
1982       entry1->Close();
1983     ASSERT_THAT(iter1->OpenNextEntry(&entry1), IsOk());
1984     ASSERT_TRUE(nullptr != entry1);
1985 
1986     ASSERT_THAT(iter2->OpenNextEntry(&entry2), IsOk());
1987     ASSERT_TRUE(nullptr != entry2);
1988     entry2->Close();
1989   }
1990 
1991   // Messing up with entry1 will modify entry2->next.
1992   entry1->Doom();
1993   ASSERT_THAT(iter2->OpenNextEntry(&entry2), IsOk());
1994   ASSERT_TRUE(nullptr != entry2);
1995 
1996   // The link entry2->entry1 should be broken.
1997   EXPECT_NE(entry2->GetKey(), entry1->GetKey());
1998   entry1->Close();
1999   entry2->Close();
2000 
2001   // And the second iterator should keep working.
2002   ASSERT_THAT(iter2->OpenNextEntry(&entry2), IsOk());
2003   ASSERT_TRUE(nullptr != entry2);
2004   entry2->Close();
2005 }
2006 
TEST_F(DiskCacheBackendTest,FixEnumerators)2007 TEST_F(DiskCacheBackendTest, FixEnumerators) {
2008   BackendFixEnumerators();
2009 }
2010 
TEST_F(DiskCacheBackendTest,NewEvictionFixEnumerators)2011 TEST_F(DiskCacheBackendTest, NewEvictionFixEnumerators) {
2012   SetNewEviction();
2013   BackendFixEnumerators();
2014 }
2015 
BackendDoomRecent()2016 void DiskCacheBackendTest::BackendDoomRecent() {
2017   InitCache();
2018 
2019   disk_cache::Entry* entry;
2020   ASSERT_THAT(CreateEntry("first", &entry), IsOk());
2021   entry->Close();
2022   ASSERT_THAT(CreateEntry("second", &entry), IsOk());
2023   entry->Close();
2024   FlushQueueForTest();
2025 
2026   AddDelay();
2027   Time middle = Time::Now();
2028 
2029   ASSERT_THAT(CreateEntry("third", &entry), IsOk());
2030   entry->Close();
2031   ASSERT_THAT(CreateEntry("fourth", &entry), IsOk());
2032   entry->Close();
2033   FlushQueueForTest();
2034 
2035   AddDelay();
2036   Time final = Time::Now();
2037 
2038   ASSERT_EQ(4, cache_->GetEntryCount());
2039   EXPECT_THAT(DoomEntriesSince(final), IsOk());
2040   ASSERT_EQ(4, cache_->GetEntryCount());
2041 
2042   EXPECT_THAT(DoomEntriesSince(middle), IsOk());
2043   ASSERT_EQ(2, cache_->GetEntryCount());
2044 
2045   ASSERT_THAT(OpenEntry("second", &entry), IsOk());
2046   entry->Close();
2047 }
2048 
TEST_F(DiskCacheBackendTest,DoomRecent)2049 TEST_F(DiskCacheBackendTest, DoomRecent) {
2050   BackendDoomRecent();
2051 }
2052 
TEST_F(DiskCacheBackendTest,NewEvictionDoomRecent)2053 TEST_F(DiskCacheBackendTest, NewEvictionDoomRecent) {
2054   SetNewEviction();
2055   BackendDoomRecent();
2056 }
2057 
TEST_F(DiskCacheBackendTest,MemoryOnlyDoomRecent)2058 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomRecent) {
2059   SetMemoryOnlyMode();
2060   BackendDoomRecent();
2061 }
2062 
TEST_F(DiskCacheBackendTest,MemoryOnlyDoomEntriesSinceSparse)2063 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomEntriesSinceSparse) {
2064   SetMemoryOnlyMode();
2065   base::Time start;
2066   InitSparseCache(&start, nullptr);
2067   DoomEntriesSince(start);
2068   EXPECT_EQ(1, cache_->GetEntryCount());
2069 }
2070 
TEST_F(DiskCacheBackendTest,DoomEntriesSinceSparse)2071 TEST_F(DiskCacheBackendTest, DoomEntriesSinceSparse) {
2072   base::Time start;
2073   InitSparseCache(&start, nullptr);
2074   DoomEntriesSince(start);
2075   // NOTE: BackendImpl counts child entries in its GetEntryCount(), while
2076   // MemBackendImpl does not. Thats why expected value differs here from
2077   // MemoryOnlyDoomEntriesSinceSparse.
2078   EXPECT_EQ(3, cache_->GetEntryCount());
2079 }
2080 
TEST_F(DiskCacheBackendTest,MemoryOnlyDoomAllSparse)2081 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomAllSparse) {
2082   SetMemoryOnlyMode();
2083   InitSparseCache(nullptr, nullptr);
2084   EXPECT_THAT(DoomAllEntries(), IsOk());
2085   EXPECT_EQ(0, cache_->GetEntryCount());
2086 }
2087 
TEST_F(DiskCacheBackendTest,DoomAllSparse)2088 TEST_F(DiskCacheBackendTest, DoomAllSparse) {
2089   InitSparseCache(nullptr, nullptr);
2090   EXPECT_THAT(DoomAllEntries(), IsOk());
2091   EXPECT_EQ(0, cache_->GetEntryCount());
2092 }
2093 
2094 // This test is for https://crbug.com/827492.
TEST_F(DiskCacheBackendTest,InMemorySparseEvict)2095 TEST_F(DiskCacheBackendTest, InMemorySparseEvict) {
2096   const int kMaxSize = 512;
2097 
2098   SetMaxSize(kMaxSize);
2099   SetMemoryOnlyMode();
2100   InitCache();
2101 
2102   scoped_refptr<net::IOBuffer> buffer = base::MakeRefCounted<net::IOBuffer>(64);
2103   CacheTestFillBuffer(buffer->data(), 64, false /* no_nulls */);
2104 
2105   std::vector<disk_cache::ScopedEntryPtr> entries;
2106 
2107   disk_cache::Entry* entry = nullptr;
2108   // Create a bunch of entries
2109   for (size_t i = 0; i < 14; i++) {
2110     std::string name = "http://www." + base::NumberToString(i) + ".com/";
2111     ASSERT_THAT(CreateEntry(name, &entry), IsOk());
2112     entries.push_back(disk_cache::ScopedEntryPtr(entry));
2113   }
2114 
2115   // Create several sparse entries and fill with enough data to
2116   // pass eviction threshold
2117   ASSERT_EQ(64, WriteSparseData(entries[0].get(), 0, buffer.get(), 64));
2118   ASSERT_EQ(net::ERR_FAILED,
2119             WriteSparseData(entries[0].get(), 10000, buffer.get(), 4));
2120   ASSERT_EQ(63, WriteSparseData(entries[1].get(), 0, buffer.get(), 63));
2121   ASSERT_EQ(64, WriteSparseData(entries[2].get(), 0, buffer.get(), 64));
2122   ASSERT_EQ(64, WriteSparseData(entries[3].get(), 0, buffer.get(), 64));
2123 
2124   // Close all the entries, leaving a populated LRU list
2125   // with all entries having refcount 0 (doom implies deletion)
2126   entries.clear();
2127 
2128   // Create a new entry, triggering buggy eviction
2129   ASSERT_THAT(CreateEntry("http://www.14.com/", &entry), IsOk());
2130   entry->Close();
2131 }
2132 
BackendDoomBetween()2133 void DiskCacheBackendTest::BackendDoomBetween() {
2134   InitCache();
2135 
2136   disk_cache::Entry* entry;
2137   ASSERT_THAT(CreateEntry("first", &entry), IsOk());
2138   entry->Close();
2139   FlushQueueForTest();
2140 
2141   AddDelay();
2142   Time middle_start = Time::Now();
2143 
2144   ASSERT_THAT(CreateEntry("second", &entry), IsOk());
2145   entry->Close();
2146   ASSERT_THAT(CreateEntry("third", &entry), IsOk());
2147   entry->Close();
2148   FlushQueueForTest();
2149 
2150   AddDelay();
2151   Time middle_end = Time::Now();
2152 
2153   ASSERT_THAT(CreateEntry("fourth", &entry), IsOk());
2154   entry->Close();
2155   ASSERT_THAT(OpenEntry("fourth", &entry), IsOk());
2156   entry->Close();
2157   FlushQueueForTest();
2158 
2159   AddDelay();
2160   Time final = Time::Now();
2161 
2162   ASSERT_EQ(4, cache_->GetEntryCount());
2163   EXPECT_THAT(DoomEntriesBetween(middle_start, middle_end), IsOk());
2164   ASSERT_EQ(2, cache_->GetEntryCount());
2165 
2166   ASSERT_THAT(OpenEntry("fourth", &entry), IsOk());
2167   entry->Close();
2168 
2169   EXPECT_THAT(DoomEntriesBetween(middle_start, final), IsOk());
2170   ASSERT_EQ(1, cache_->GetEntryCount());
2171 
2172   ASSERT_THAT(OpenEntry("first", &entry), IsOk());
2173   entry->Close();
2174 }
2175 
TEST_F(DiskCacheBackendTest,DoomBetween)2176 TEST_F(DiskCacheBackendTest, DoomBetween) {
2177   BackendDoomBetween();
2178 }
2179 
TEST_F(DiskCacheBackendTest,NewEvictionDoomBetween)2180 TEST_F(DiskCacheBackendTest, NewEvictionDoomBetween) {
2181   SetNewEviction();
2182   BackendDoomBetween();
2183 }
2184 
TEST_F(DiskCacheBackendTest,MemoryOnlyDoomBetween)2185 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomBetween) {
2186   SetMemoryOnlyMode();
2187   BackendDoomBetween();
2188 }
2189 
TEST_F(DiskCacheBackendTest,MemoryOnlyDoomEntriesBetweenSparse)2190 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomEntriesBetweenSparse) {
2191   SetMemoryOnlyMode();
2192   base::Time start, end;
2193   InitSparseCache(&start, &end);
2194   DoomEntriesBetween(start, end);
2195   EXPECT_EQ(3, cache_->GetEntryCount());
2196 
2197   start = end;
2198   end = base::Time::Now();
2199   DoomEntriesBetween(start, end);
2200   EXPECT_EQ(1, cache_->GetEntryCount());
2201 }
2202 
TEST_F(DiskCacheBackendTest,DoomEntriesBetweenSparse)2203 TEST_F(DiskCacheBackendTest, DoomEntriesBetweenSparse) {
2204   base::Time start, end;
2205   InitSparseCache(&start, &end);
2206   DoomEntriesBetween(start, end);
2207   EXPECT_EQ(9, cache_->GetEntryCount());
2208 
2209   start = end;
2210   end = base::Time::Now();
2211   DoomEntriesBetween(start, end);
2212   EXPECT_EQ(3, cache_->GetEntryCount());
2213 }
2214 
BackendCalculateSizeOfAllEntries()2215 void DiskCacheBackendTest::BackendCalculateSizeOfAllEntries() {
2216   InitCache();
2217 
2218   // The cache is initially empty.
2219   EXPECT_EQ(0, CalculateSizeOfAllEntries());
2220 
2221   // Generate random entries and populate them with data of respective
2222   // sizes 0, 1, ..., count - 1 bytes.
2223   std::set<std::string> key_pool;
2224   CreateSetOfRandomEntries(&key_pool);
2225 
2226   int count = 0;
2227   int total_size = 0;
2228   for (std::string key : key_pool) {
2229     std::string data(count, ' ');
2230     scoped_refptr<net::StringIOBuffer> buffer =
2231         base::MakeRefCounted<net::StringIOBuffer>(data);
2232 
2233     // Alternate between writing to first two streams to test that we do not
2234     // take only one stream into account.
2235     disk_cache::Entry* entry;
2236     ASSERT_THAT(OpenEntry(key, &entry), IsOk());
2237     ASSERT_EQ(count, WriteData(entry, count % 2, 0, buffer.get(), count, true));
2238     entry->Close();
2239 
2240     total_size += GetRoundedSize(count + GetEntryMetadataSize(key));
2241     ++count;
2242   }
2243 
2244   int result = CalculateSizeOfAllEntries();
2245   EXPECT_EQ(total_size, result);
2246 
2247   // Add another entry and test if the size is updated. Then remove it and test
2248   // if the size is back to original value.
2249   {
2250     const int last_entry_size = 47;
2251     std::string data(last_entry_size, ' ');
2252     scoped_refptr<net::StringIOBuffer> buffer =
2253         base::MakeRefCounted<net::StringIOBuffer>(data);
2254 
2255     disk_cache::Entry* entry;
2256     std::string key = GenerateKey(true);
2257     ASSERT_THAT(CreateEntry(key, &entry), IsOk());
2258     ASSERT_EQ(last_entry_size,
2259               WriteData(entry, 0, 0, buffer.get(), last_entry_size, true));
2260     entry->Close();
2261 
2262     int new_result = CalculateSizeOfAllEntries();
2263     EXPECT_EQ(
2264         result + GetRoundedSize(last_entry_size + GetEntryMetadataSize(key)),
2265         new_result);
2266 
2267     DoomEntry(key);
2268     new_result = CalculateSizeOfAllEntries();
2269     EXPECT_EQ(result, new_result);
2270   }
2271 
2272   // After dooming the entries, the size should be back to zero.
2273   ASSERT_THAT(DoomAllEntries(), IsOk());
2274   EXPECT_EQ(0, CalculateSizeOfAllEntries());
2275 }
2276 
TEST_F(DiskCacheBackendTest,CalculateSizeOfAllEntries)2277 TEST_F(DiskCacheBackendTest, CalculateSizeOfAllEntries) {
2278   BackendCalculateSizeOfAllEntries();
2279 }
2280 
TEST_F(DiskCacheBackendTest,MemoryOnlyCalculateSizeOfAllEntries)2281 TEST_F(DiskCacheBackendTest, MemoryOnlyCalculateSizeOfAllEntries) {
2282   SetMemoryOnlyMode();
2283   BackendCalculateSizeOfAllEntries();
2284 }
2285 
TEST_F(DiskCacheBackendTest,SimpleCacheCalculateSizeOfAllEntries)2286 TEST_F(DiskCacheBackendTest, SimpleCacheCalculateSizeOfAllEntries) {
2287   // Use net::APP_CACHE to make size estimations deterministic via
2288   // non-optimistic writes.
2289   SetCacheType(net::APP_CACHE);
2290   SetSimpleCacheMode();
2291   BackendCalculateSizeOfAllEntries();
2292 }
2293 
BackendCalculateSizeOfEntriesBetween(bool expect_access_time_comparisons)2294 void DiskCacheBackendTest::BackendCalculateSizeOfEntriesBetween(
2295     bool expect_access_time_comparisons) {
2296   InitCache();
2297 
2298   EXPECT_EQ(0, CalculateSizeOfEntriesBetween(base::Time(), base::Time::Max()));
2299 
2300   Time start = Time::Now();
2301 
2302   disk_cache::Entry* entry;
2303   ASSERT_THAT(CreateEntry("first", &entry), IsOk());
2304   entry->Close();
2305   FlushQueueForTest();
2306   base::RunLoop().RunUntilIdle();
2307 
2308   AddDelay();
2309   Time middle = Time::Now();
2310   AddDelay();
2311 
2312   ASSERT_THAT(CreateEntry("second", &entry), IsOk());
2313   entry->Close();
2314   ASSERT_THAT(CreateEntry("third_entry", &entry), IsOk());
2315   entry->Close();
2316   FlushQueueForTest();
2317   base::RunLoop().RunUntilIdle();
2318 
2319   AddDelay();
2320   Time end = Time::Now();
2321 
2322   int size_1 = GetRoundedSize(GetEntryMetadataSize("first"));
2323   int size_2 = GetRoundedSize(GetEntryMetadataSize("second"));
2324   int size_3 = GetRoundedSize(GetEntryMetadataSize("third_entry"));
2325 
2326   ASSERT_EQ(3, cache_->GetEntryCount());
2327   ASSERT_EQ(CalculateSizeOfAllEntries(),
2328             CalculateSizeOfEntriesBetween(base::Time(), base::Time::Max()));
2329 
2330   if (expect_access_time_comparisons) {
2331     int start_end = CalculateSizeOfEntriesBetween(start, end);
2332     ASSERT_EQ(CalculateSizeOfAllEntries(), start_end);
2333     ASSERT_EQ(size_1 + size_2 + size_3, start_end);
2334 
2335     ASSERT_EQ(size_1, CalculateSizeOfEntriesBetween(start, middle));
2336     ASSERT_EQ(size_2 + size_3, CalculateSizeOfEntriesBetween(middle, end));
2337   }
2338 
2339   // After dooming the entries, the size should be back to zero.
2340   ASSERT_THAT(DoomAllEntries(), IsOk());
2341   EXPECT_EQ(0, CalculateSizeOfEntriesBetween(base::Time(), base::Time::Max()));
2342 }
2343 
TEST_F(DiskCacheBackendTest,CalculateSizeOfEntriesBetween)2344 TEST_F(DiskCacheBackendTest, CalculateSizeOfEntriesBetween) {
2345   InitCache();
2346   ASSERT_EQ(net::ERR_NOT_IMPLEMENTED,
2347             CalculateSizeOfEntriesBetween(base::Time(), base::Time::Max()));
2348 }
2349 
TEST_F(DiskCacheBackendTest,MemoryOnlyCalculateSizeOfEntriesBetween)2350 TEST_F(DiskCacheBackendTest, MemoryOnlyCalculateSizeOfEntriesBetween) {
2351   SetMemoryOnlyMode();
2352   BackendCalculateSizeOfEntriesBetween(true);
2353 }
2354 
TEST_F(DiskCacheBackendTest,SimpleCacheCalculateSizeOfEntriesBetween)2355 TEST_F(DiskCacheBackendTest, SimpleCacheCalculateSizeOfEntriesBetween) {
2356   // Test normal mode in where access time range comparisons are supported.
2357   SetSimpleCacheMode();
2358   BackendCalculateSizeOfEntriesBetween(true);
2359 }
2360 
TEST_F(DiskCacheBackendTest,SimpleCacheAppCacheCalculateSizeOfEntriesBetween)2361 TEST_F(DiskCacheBackendTest, SimpleCacheAppCacheCalculateSizeOfEntriesBetween) {
2362   // Test SimpleCache in APP_CACHE mode separately since it does not support
2363   // access time range comparisons.
2364   SetCacheType(net::APP_CACHE);
2365   SetSimpleCacheMode();
2366   BackendCalculateSizeOfEntriesBetween(false);
2367 }
2368 
BackendTransaction(const std::string & name,int num_entries,bool load)2369 void DiskCacheBackendTest::BackendTransaction(const std::string& name,
2370                                               int num_entries,
2371                                               bool load) {
2372   success_ = false;
2373   ASSERT_TRUE(CopyTestCache(name));
2374   DisableFirstCleanup();
2375 
2376   uint32_t mask;
2377   if (load) {
2378     mask = 0xf;
2379     SetMaxSize(0x100000);
2380   } else {
2381     // Clear the settings from the previous run.
2382     mask = 0;
2383     SetMaxSize(0);
2384   }
2385   SetMask(mask);
2386 
2387   InitCache();
2388   ASSERT_EQ(num_entries + 1, cache_->GetEntryCount());
2389 
2390   std::string key("the first key");
2391   disk_cache::Entry* entry1;
2392   ASSERT_NE(net::OK, OpenEntry(key, &entry1));
2393 
2394   int actual = cache_->GetEntryCount();
2395   if (num_entries != actual) {
2396     ASSERT_TRUE(load);
2397     // If there is a heavy load, inserting an entry will make another entry
2398     // dirty (on the hash bucket) so two entries are removed.
2399     ASSERT_EQ(num_entries - 1, actual);
2400   }
2401 
2402   ResetCaches();
2403 
2404   ASSERT_TRUE(CheckCacheIntegrity(cache_path_, new_eviction_, MaxSize(), mask));
2405   success_ = true;
2406 }
2407 
BackendRecoverInsert()2408 void DiskCacheBackendTest::BackendRecoverInsert() {
2409   // Tests with an empty cache.
2410   BackendTransaction("insert_empty1", 0, false);
2411   ASSERT_TRUE(success_) << "insert_empty1";
2412   BackendTransaction("insert_empty2", 0, false);
2413   ASSERT_TRUE(success_) << "insert_empty2";
2414   BackendTransaction("insert_empty3", 0, false);
2415   ASSERT_TRUE(success_) << "insert_empty3";
2416 
2417   // Tests with one entry on the cache.
2418   BackendTransaction("insert_one1", 1, false);
2419   ASSERT_TRUE(success_) << "insert_one1";
2420   BackendTransaction("insert_one2", 1, false);
2421   ASSERT_TRUE(success_) << "insert_one2";
2422   BackendTransaction("insert_one3", 1, false);
2423   ASSERT_TRUE(success_) << "insert_one3";
2424 
2425   // Tests with one hundred entries on the cache, tiny index.
2426   BackendTransaction("insert_load1", 100, true);
2427   ASSERT_TRUE(success_) << "insert_load1";
2428   BackendTransaction("insert_load2", 100, true);
2429   ASSERT_TRUE(success_) << "insert_load2";
2430 }
2431 
TEST_F(DiskCacheBackendTest,RecoverInsert)2432 TEST_F(DiskCacheBackendTest, RecoverInsert) {
2433   BackendRecoverInsert();
2434 }
2435 
TEST_F(DiskCacheBackendTest,NewEvictionRecoverInsert)2436 TEST_F(DiskCacheBackendTest, NewEvictionRecoverInsert) {
2437   SetNewEviction();
2438   BackendRecoverInsert();
2439 }
2440 
BackendRecoverRemove()2441 void DiskCacheBackendTest::BackendRecoverRemove() {
2442   // Removing the only element.
2443   BackendTransaction("remove_one1", 0, false);
2444   ASSERT_TRUE(success_) << "remove_one1";
2445   BackendTransaction("remove_one2", 0, false);
2446   ASSERT_TRUE(success_) << "remove_one2";
2447   BackendTransaction("remove_one3", 0, false);
2448   ASSERT_TRUE(success_) << "remove_one3";
2449 
2450   // Removing the head.
2451   BackendTransaction("remove_head1", 1, false);
2452   ASSERT_TRUE(success_) << "remove_head1";
2453   BackendTransaction("remove_head2", 1, false);
2454   ASSERT_TRUE(success_) << "remove_head2";
2455   BackendTransaction("remove_head3", 1, false);
2456   ASSERT_TRUE(success_) << "remove_head3";
2457 
2458   // Removing the tail.
2459   BackendTransaction("remove_tail1", 1, false);
2460   ASSERT_TRUE(success_) << "remove_tail1";
2461   BackendTransaction("remove_tail2", 1, false);
2462   ASSERT_TRUE(success_) << "remove_tail2";
2463   BackendTransaction("remove_tail3", 1, false);
2464   ASSERT_TRUE(success_) << "remove_tail3";
2465 
2466   // Removing with one hundred entries on the cache, tiny index.
2467   BackendTransaction("remove_load1", 100, true);
2468   ASSERT_TRUE(success_) << "remove_load1";
2469   BackendTransaction("remove_load2", 100, true);
2470   ASSERT_TRUE(success_) << "remove_load2";
2471   BackendTransaction("remove_load3", 100, true);
2472   ASSERT_TRUE(success_) << "remove_load3";
2473 
2474   // This case cannot be reverted.
2475   BackendTransaction("remove_one4", 0, false);
2476   ASSERT_TRUE(success_) << "remove_one4";
2477   BackendTransaction("remove_head4", 1, false);
2478   ASSERT_TRUE(success_) << "remove_head4";
2479 }
2480 
2481 #if BUILDFLAG(IS_WIN)
2482 // http://crbug.com/396392
2483 #define MAYBE_RecoverRemove DISABLED_RecoverRemove
2484 #else
2485 #define MAYBE_RecoverRemove RecoverRemove
2486 #endif
TEST_F(DiskCacheBackendTest,MAYBE_RecoverRemove)2487 TEST_F(DiskCacheBackendTest, MAYBE_RecoverRemove) {
2488   BackendRecoverRemove();
2489 }
2490 
2491 #if BUILDFLAG(IS_WIN)
2492 // http://crbug.com/396392
2493 #define MAYBE_NewEvictionRecoverRemove DISABLED_NewEvictionRecoverRemove
2494 #else
2495 #define MAYBE_NewEvictionRecoverRemove NewEvictionRecoverRemove
2496 #endif
TEST_F(DiskCacheBackendTest,MAYBE_NewEvictionRecoverRemove)2497 TEST_F(DiskCacheBackendTest, MAYBE_NewEvictionRecoverRemove) {
2498   SetNewEviction();
2499   BackendRecoverRemove();
2500 }
2501 
BackendRecoverWithEviction()2502 void DiskCacheBackendTest::BackendRecoverWithEviction() {
2503   success_ = false;
2504   ASSERT_TRUE(CopyTestCache("insert_load1"));
2505   DisableFirstCleanup();
2506 
2507   SetMask(0xf);
2508   SetMaxSize(0x1000);
2509 
2510   // We should not crash here.
2511   InitCache();
2512   DisableIntegrityCheck();
2513 }
2514 
TEST_F(DiskCacheBackendTest,RecoverWithEviction)2515 TEST_F(DiskCacheBackendTest, RecoverWithEviction) {
2516   BackendRecoverWithEviction();
2517 }
2518 
TEST_F(DiskCacheBackendTest,NewEvictionRecoverWithEviction)2519 TEST_F(DiskCacheBackendTest, NewEvictionRecoverWithEviction) {
2520   SetNewEviction();
2521   BackendRecoverWithEviction();
2522 }
2523 
2524 // Tests that the |BackendImpl| fails to start with the wrong cache version.
TEST_F(DiskCacheTest,WrongVersion)2525 TEST_F(DiskCacheTest, WrongVersion) {
2526   ASSERT_TRUE(CopyTestCache("wrong_version"));
2527   net::TestCompletionCallback cb;
2528 
2529   std::unique_ptr<disk_cache::BackendImpl> cache(
2530       std::make_unique<disk_cache::BackendImpl>(cache_path_, nullptr, nullptr,
2531                                                 net::DISK_CACHE, nullptr));
2532   cache->Init(cb.callback());
2533   ASSERT_THAT(cb.WaitForResult(), IsError(net::ERR_FAILED));
2534 }
2535 
2536 // Tests that the disk cache successfully joins the control group, dropping the
2537 // existing cache in favour of a new empty cache.
2538 // Disabled on android since this test requires cache creator to create
2539 // blockfile caches.
2540 #if !BUILDFLAG(IS_ANDROID)
TEST_F(DiskCacheTest,SimpleCacheControlJoin)2541 TEST_F(DiskCacheTest, SimpleCacheControlJoin) {
2542   std::unique_ptr<disk_cache::BackendImpl> cache =
2543       CreateExistingEntryCache(cache_path_);
2544   ASSERT_TRUE(cache.get());
2545   cache.reset();
2546 
2547   // Instantiate the SimpleCacheTrial, forcing this run into the
2548   // ExperimentControl group.
2549   base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial",
2550                                          "ExperimentControl");
2551   TestBackendResultCompletionCallback cb;
2552   disk_cache::BackendResult rv = disk_cache::CreateCacheBackend(
2553       net::DISK_CACHE, net::CACHE_BACKEND_BLOCKFILE,
2554       /*file_operations=*/nullptr, cache_path_, 0,
2555       disk_cache::ResetHandling::kResetOnError, /*net_log=*/nullptr,
2556       cb.callback());
2557   rv = cb.GetResult(std::move(rv));
2558   ASSERT_THAT(rv.net_error, IsOk());
2559   EXPECT_EQ(0, rv.backend->GetEntryCount());
2560 }
2561 #endif
2562 
2563 // Tests that the disk cache can restart in the control group preserving
2564 // existing entries.
TEST_F(DiskCacheTest,SimpleCacheControlRestart)2565 TEST_F(DiskCacheTest, SimpleCacheControlRestart) {
2566   // Instantiate the SimpleCacheTrial, forcing this run into the
2567   // ExperimentControl group.
2568   base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial",
2569                                          "ExperimentControl");
2570 
2571   std::unique_ptr<disk_cache::BackendImpl> cache =
2572       CreateExistingEntryCache(cache_path_);
2573   ASSERT_TRUE(cache.get());
2574 
2575   net::TestCompletionCallback cb;
2576 
2577   const int kRestartCount = 5;
2578   for (int i = 0; i < kRestartCount; ++i) {
2579     cache = std::make_unique<disk_cache::BackendImpl>(
2580         cache_path_, nullptr, nullptr, net::DISK_CACHE, nullptr);
2581     cache->Init(cb.callback());
2582     ASSERT_THAT(cb.WaitForResult(), IsOk());
2583     EXPECT_EQ(1, cache->GetEntryCount());
2584 
2585     TestEntryResultCompletionCallback cb2;
2586     EntryResult result =
2587         cache->OpenEntry(kExistingEntryKey, net::HIGHEST, cb2.callback());
2588     result = cb2.GetResult(std::move(result));
2589     result.ReleaseEntry()->Close();
2590   }
2591 }
2592 
2593 // Tests that the disk cache can leave the control group preserving existing
2594 // entries.
TEST_F(DiskCacheTest,SimpleCacheControlLeave)2595 TEST_F(DiskCacheTest, SimpleCacheControlLeave) {
2596   {
2597     // Instantiate the SimpleCacheTrial, forcing this run into the
2598     // ExperimentControl group.
2599     base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial",
2600                                            "ExperimentControl");
2601 
2602     std::unique_ptr<disk_cache::BackendImpl> cache =
2603         CreateExistingEntryCache(cache_path_);
2604     ASSERT_TRUE(cache.get());
2605   }
2606 
2607   // Instantiate the SimpleCacheTrial, forcing this run into the
2608   // ExperimentNo group.
2609   base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial", "ExperimentNo");
2610   net::TestCompletionCallback cb;
2611 
2612   const int kRestartCount = 5;
2613   for (int i = 0; i < kRestartCount; ++i) {
2614     std::unique_ptr<disk_cache::BackendImpl> cache(
2615         std::make_unique<disk_cache::BackendImpl>(cache_path_, nullptr, nullptr,
2616                                                   net::DISK_CACHE, nullptr));
2617     cache->Init(cb.callback());
2618     ASSERT_THAT(cb.WaitForResult(), IsOk());
2619     EXPECT_EQ(1, cache->GetEntryCount());
2620 
2621     TestEntryResultCompletionCallback cb2;
2622     EntryResult result =
2623         cache->OpenEntry(kExistingEntryKey, net::HIGHEST, cb2.callback());
2624     result = cb2.GetResult(std::move(result));
2625     ASSERT_THAT(result.net_error(), IsOk());
2626     result.ReleaseEntry()->Close();
2627   }
2628 }
2629 
2630 // Tests that the cache is properly restarted on recovery error.
2631 // Disabled on android since this test requires cache creator to create
2632 // blockfile caches.
2633 #if !BUILDFLAG(IS_ANDROID)
TEST_F(DiskCacheBackendTest,DeleteOld)2634 TEST_F(DiskCacheBackendTest, DeleteOld) {
2635   ASSERT_TRUE(CopyTestCache("wrong_version"));
2636   SetNewEviction();
2637 
2638   TestBackendResultCompletionCallback cb;
2639   {
2640     base::ScopedDisallowBlocking disallow_blocking;
2641     base::FilePath path(cache_path_);
2642     disk_cache::BackendResult rv = disk_cache::CreateCacheBackend(
2643         net::DISK_CACHE, net::CACHE_BACKEND_BLOCKFILE,
2644         /*file_operations=*/nullptr, path, 0,
2645         disk_cache::ResetHandling::kResetOnError, /*net_log=*/nullptr,
2646         cb.callback());
2647     path.clear();  // Make sure path was captured by the previous call.
2648     rv = cb.GetResult(std::move(rv));
2649     ASSERT_THAT(rv.net_error, IsOk());
2650   }
2651   EXPECT_TRUE(CheckCacheIntegrity(cache_path_, new_eviction_, /*max_size = */ 0,
2652                                   mask_));
2653 }
2654 #endif
2655 
2656 // We want to be able to deal with messed up entries on disk.
BackendInvalidEntry2()2657 void DiskCacheBackendTest::BackendInvalidEntry2() {
2658   ASSERT_TRUE(CopyTestCache("bad_entry"));
2659   DisableFirstCleanup();
2660   InitCache();
2661 
2662   disk_cache::Entry *entry1, *entry2;
2663   ASSERT_THAT(OpenEntry("the first key", &entry1), IsOk());
2664   EXPECT_NE(net::OK, OpenEntry("some other key", &entry2));
2665   entry1->Close();
2666 
2667   // CheckCacheIntegrity will fail at this point.
2668   DisableIntegrityCheck();
2669 }
2670 
TEST_F(DiskCacheBackendTest,InvalidEntry2)2671 TEST_F(DiskCacheBackendTest, InvalidEntry2) {
2672   BackendInvalidEntry2();
2673 }
2674 
TEST_F(DiskCacheBackendTest,NewEvictionInvalidEntry2)2675 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry2) {
2676   SetNewEviction();
2677   BackendInvalidEntry2();
2678 }
2679 
2680 // Tests that we don't crash or hang when enumerating this cache.
BackendInvalidEntry3()2681 void DiskCacheBackendTest::BackendInvalidEntry3() {
2682   SetMask(0x1);        // 2-entry table.
2683   SetMaxSize(0x3000);  // 12 kB.
2684   DisableFirstCleanup();
2685   InitCache();
2686 
2687   disk_cache::Entry* entry;
2688   std::unique_ptr<TestIterator> iter = CreateIterator();
2689   while (iter->OpenNextEntry(&entry) == net::OK) {
2690     entry->Close();
2691   }
2692 }
2693 
TEST_F(DiskCacheBackendTest,InvalidEntry3)2694 TEST_F(DiskCacheBackendTest, InvalidEntry3) {
2695   ASSERT_TRUE(CopyTestCache("dirty_entry3"));
2696   BackendInvalidEntry3();
2697 }
2698 
TEST_F(DiskCacheBackendTest,NewEvictionInvalidEntry3)2699 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry3) {
2700   ASSERT_TRUE(CopyTestCache("dirty_entry4"));
2701   SetNewEviction();
2702   BackendInvalidEntry3();
2703   DisableIntegrityCheck();
2704 }
2705 
2706 // Test that we handle a dirty entry on the LRU list, already replaced with
2707 // the same key, and with hash collisions.
TEST_F(DiskCacheBackendTest,InvalidEntry4)2708 TEST_F(DiskCacheBackendTest, InvalidEntry4) {
2709   ASSERT_TRUE(CopyTestCache("dirty_entry3"));
2710   SetMask(0x1);        // 2-entry table.
2711   SetMaxSize(0x3000);  // 12 kB.
2712   DisableFirstCleanup();
2713   InitCache();
2714 
2715   TrimForTest(false);
2716 }
2717 
2718 // Test that we handle a dirty entry on the deleted list, already replaced with
2719 // the same key, and with hash collisions.
TEST_F(DiskCacheBackendTest,InvalidEntry5)2720 TEST_F(DiskCacheBackendTest, InvalidEntry5) {
2721   ASSERT_TRUE(CopyTestCache("dirty_entry4"));
2722   SetNewEviction();
2723   SetMask(0x1);        // 2-entry table.
2724   SetMaxSize(0x3000);  // 12 kB.
2725   DisableFirstCleanup();
2726   InitCache();
2727 
2728   TrimDeletedListForTest(false);
2729 }
2730 
TEST_F(DiskCacheBackendTest,InvalidEntry6)2731 TEST_F(DiskCacheBackendTest, InvalidEntry6) {
2732   ASSERT_TRUE(CopyTestCache("dirty_entry5"));
2733   SetMask(0x1);        // 2-entry table.
2734   SetMaxSize(0x3000);  // 12 kB.
2735   DisableFirstCleanup();
2736   InitCache();
2737 
2738   // There is a dirty entry (but marked as clean) at the end, pointing to a
2739   // deleted entry through the hash collision list. We should not re-insert the
2740   // deleted entry into the index table.
2741 
2742   TrimForTest(false);
2743   // The cache should be clean (as detected by CheckCacheIntegrity).
2744 }
2745 
2746 // Tests that we don't hang when there is a loop on the hash collision list.
2747 // The test cache could be a result of bug 69135.
TEST_F(DiskCacheBackendTest,BadNextEntry1)2748 TEST_F(DiskCacheBackendTest, BadNextEntry1) {
2749   ASSERT_TRUE(CopyTestCache("list_loop2"));
2750   SetMask(0x1);        // 2-entry table.
2751   SetMaxSize(0x3000);  // 12 kB.
2752   DisableFirstCleanup();
2753   InitCache();
2754 
2755   // The second entry points at itselft, and the first entry is not accessible
2756   // though the index, but it is at the head of the LRU.
2757 
2758   disk_cache::Entry* entry;
2759   ASSERT_THAT(CreateEntry("The first key", &entry), IsOk());
2760   entry->Close();
2761 
2762   TrimForTest(false);
2763   TrimForTest(false);
2764   ASSERT_THAT(OpenEntry("The first key", &entry), IsOk());
2765   entry->Close();
2766   EXPECT_EQ(1, cache_->GetEntryCount());
2767 }
2768 
2769 // Tests that we don't hang when there is a loop on the hash collision list.
2770 // The test cache could be a result of bug 69135.
TEST_F(DiskCacheBackendTest,BadNextEntry2)2771 TEST_F(DiskCacheBackendTest, BadNextEntry2) {
2772   ASSERT_TRUE(CopyTestCache("list_loop3"));
2773   SetMask(0x1);        // 2-entry table.
2774   SetMaxSize(0x3000);  // 12 kB.
2775   DisableFirstCleanup();
2776   InitCache();
2777 
2778   // There is a wide loop of 5 entries.
2779 
2780   disk_cache::Entry* entry;
2781   ASSERT_NE(net::OK, OpenEntry("Not present key", &entry));
2782 }
2783 
TEST_F(DiskCacheBackendTest,NewEvictionInvalidEntry6)2784 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry6) {
2785   ASSERT_TRUE(CopyTestCache("bad_rankings3"));
2786   DisableFirstCleanup();
2787   SetNewEviction();
2788   InitCache();
2789 
2790   // The second entry is dirty, but removing it should not corrupt the list.
2791   disk_cache::Entry* entry;
2792   ASSERT_NE(net::OK, OpenEntry("the second key", &entry));
2793   ASSERT_THAT(OpenEntry("the first key", &entry), IsOk());
2794 
2795   // This should not delete the cache.
2796   entry->Doom();
2797   FlushQueueForTest();
2798   entry->Close();
2799 
2800   ASSERT_THAT(OpenEntry("some other key", &entry), IsOk());
2801   entry->Close();
2802 }
2803 
2804 // Tests handling of corrupt entries by keeping the rankings node around, with
2805 // a fatal failure.
BackendInvalidEntry7()2806 void DiskCacheBackendTest::BackendInvalidEntry7() {
2807   const int kSize = 0x3000;  // 12 kB.
2808   SetMaxSize(kSize * 10);
2809   InitCache();
2810 
2811   std::string first("some key");
2812   std::string second("something else");
2813   disk_cache::Entry* entry;
2814   ASSERT_THAT(CreateEntry(first, &entry), IsOk());
2815   entry->Close();
2816   ASSERT_THAT(CreateEntry(second, &entry), IsOk());
2817 
2818   // Corrupt this entry.
2819   disk_cache::EntryImpl* entry_impl =
2820       static_cast<disk_cache::EntryImpl*>(entry);
2821 
2822   entry_impl->rankings()->Data()->next = 0;
2823   entry_impl->rankings()->Store();
2824   entry->Close();
2825   FlushQueueForTest();
2826   EXPECT_EQ(2, cache_->GetEntryCount());
2827 
2828   // This should detect the bad entry.
2829   EXPECT_NE(net::OK, OpenEntry(second, &entry));
2830   EXPECT_EQ(1, cache_->GetEntryCount());
2831 
2832   // We should delete the cache. The list still has a corrupt node.
2833   std::unique_ptr<TestIterator> iter = CreateIterator();
2834   EXPECT_NE(net::OK, iter->OpenNextEntry(&entry));
2835   FlushQueueForTest();
2836   EXPECT_EQ(0, cache_->GetEntryCount());
2837 }
2838 
TEST_F(DiskCacheBackendTest,InvalidEntry7)2839 TEST_F(DiskCacheBackendTest, InvalidEntry7) {
2840   BackendInvalidEntry7();
2841 }
2842 
TEST_F(DiskCacheBackendTest,NewEvictionInvalidEntry7)2843 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry7) {
2844   SetNewEviction();
2845   BackendInvalidEntry7();
2846 }
2847 
2848 // Tests handling of corrupt entries by keeping the rankings node around, with
2849 // a non fatal failure.
BackendInvalidEntry8()2850 void DiskCacheBackendTest::BackendInvalidEntry8() {
2851   const int kSize = 0x3000;  // 12 kB
2852   SetMaxSize(kSize * 10);
2853   InitCache();
2854 
2855   std::string first("some key");
2856   std::string second("something else");
2857   disk_cache::Entry* entry;
2858   ASSERT_THAT(CreateEntry(first, &entry), IsOk());
2859   entry->Close();
2860   ASSERT_THAT(CreateEntry(second, &entry), IsOk());
2861 
2862   // Corrupt this entry.
2863   disk_cache::EntryImpl* entry_impl =
2864       static_cast<disk_cache::EntryImpl*>(entry);
2865 
2866   entry_impl->rankings()->Data()->contents = 0;
2867   entry_impl->rankings()->Store();
2868   entry->Close();
2869   FlushQueueForTest();
2870   EXPECT_EQ(2, cache_->GetEntryCount());
2871 
2872   // This should detect the bad entry.
2873   EXPECT_NE(net::OK, OpenEntry(second, &entry));
2874   EXPECT_EQ(1, cache_->GetEntryCount());
2875 
2876   // We should not delete the cache.
2877   std::unique_ptr<TestIterator> iter = CreateIterator();
2878   ASSERT_THAT(iter->OpenNextEntry(&entry), IsOk());
2879   entry->Close();
2880   EXPECT_NE(net::OK, iter->OpenNextEntry(&entry));
2881   EXPECT_EQ(1, cache_->GetEntryCount());
2882 }
2883 
TEST_F(DiskCacheBackendTest,InvalidEntry8)2884 TEST_F(DiskCacheBackendTest, InvalidEntry8) {
2885   BackendInvalidEntry8();
2886 }
2887 
TEST_F(DiskCacheBackendTest,NewEvictionInvalidEntry8)2888 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry8) {
2889   SetNewEviction();
2890   BackendInvalidEntry8();
2891 }
2892 
2893 // Tests handling of corrupt entries detected by enumerations. Note that these
2894 // tests (xx9 to xx11) are basically just going though slightly different
2895 // codepaths so they are tighlty coupled with the code, but that is better than
2896 // not testing error handling code.
BackendInvalidEntry9(bool eviction)2897 void DiskCacheBackendTest::BackendInvalidEntry9(bool eviction) {
2898   const int kSize = 0x3000;  // 12 kB.
2899   SetMaxSize(kSize * 10);
2900   InitCache();
2901 
2902   std::string first("some key");
2903   std::string second("something else");
2904   disk_cache::Entry* entry;
2905   ASSERT_THAT(CreateEntry(first, &entry), IsOk());
2906   entry->Close();
2907   ASSERT_THAT(CreateEntry(second, &entry), IsOk());
2908 
2909   // Corrupt this entry.
2910   disk_cache::EntryImpl* entry_impl =
2911       static_cast<disk_cache::EntryImpl*>(entry);
2912 
2913   entry_impl->entry()->Data()->state = 0xbad;
2914   entry_impl->entry()->Store();
2915   entry->Close();
2916   FlushQueueForTest();
2917   EXPECT_EQ(2, cache_->GetEntryCount());
2918 
2919   if (eviction) {
2920     TrimForTest(false);
2921     EXPECT_EQ(1, cache_->GetEntryCount());
2922     TrimForTest(false);
2923     EXPECT_EQ(1, cache_->GetEntryCount());
2924   } else {
2925     // We should detect the problem through the list, but we should not delete
2926     // the entry, just fail the iteration.
2927     std::unique_ptr<TestIterator> iter = CreateIterator();
2928     EXPECT_NE(net::OK, iter->OpenNextEntry(&entry));
2929 
2930     // Now a full iteration will work, and return one entry.
2931     ASSERT_THAT(iter->OpenNextEntry(&entry), IsOk());
2932     entry->Close();
2933     EXPECT_NE(net::OK, iter->OpenNextEntry(&entry));
2934 
2935     // This should detect what's left of the bad entry.
2936     EXPECT_NE(net::OK, OpenEntry(second, &entry));
2937     EXPECT_EQ(2, cache_->GetEntryCount());
2938   }
2939   DisableIntegrityCheck();
2940 }
2941 
TEST_F(DiskCacheBackendTest,InvalidEntry9)2942 TEST_F(DiskCacheBackendTest, InvalidEntry9) {
2943   BackendInvalidEntry9(false);
2944 }
2945 
TEST_F(DiskCacheBackendTest,NewEvictionInvalidEntry9)2946 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry9) {
2947   SetNewEviction();
2948   BackendInvalidEntry9(false);
2949 }
2950 
TEST_F(DiskCacheBackendTest,TrimInvalidEntry9)2951 TEST_F(DiskCacheBackendTest, TrimInvalidEntry9) {
2952   BackendInvalidEntry9(true);
2953 }
2954 
TEST_F(DiskCacheBackendTest,NewEvictionTrimInvalidEntry9)2955 TEST_F(DiskCacheBackendTest, NewEvictionTrimInvalidEntry9) {
2956   SetNewEviction();
2957   BackendInvalidEntry9(true);
2958 }
2959 
2960 // Tests handling of corrupt entries detected by enumerations.
BackendInvalidEntry10(bool eviction)2961 void DiskCacheBackendTest::BackendInvalidEntry10(bool eviction) {
2962   const int kSize = 0x3000;  // 12 kB.
2963   SetMaxSize(kSize * 10);
2964   SetNewEviction();
2965   InitCache();
2966 
2967   std::string first("some key");
2968   std::string second("something else");
2969   disk_cache::Entry* entry;
2970   ASSERT_THAT(CreateEntry(first, &entry), IsOk());
2971   entry->Close();
2972   ASSERT_THAT(OpenEntry(first, &entry), IsOk());
2973   EXPECT_EQ(0, WriteData(entry, 0, 200, nullptr, 0, false));
2974   entry->Close();
2975   ASSERT_THAT(CreateEntry(second, &entry), IsOk());
2976 
2977   // Corrupt this entry.
2978   disk_cache::EntryImpl* entry_impl =
2979       static_cast<disk_cache::EntryImpl*>(entry);
2980 
2981   entry_impl->entry()->Data()->state = 0xbad;
2982   entry_impl->entry()->Store();
2983   entry->Close();
2984   ASSERT_THAT(CreateEntry("third", &entry), IsOk());
2985   entry->Close();
2986   EXPECT_EQ(3, cache_->GetEntryCount());
2987 
2988   // We have:
2989   // List 0: third -> second (bad).
2990   // List 1: first.
2991 
2992   if (eviction) {
2993     // Detection order: second -> first -> third.
2994     TrimForTest(false);
2995     EXPECT_EQ(3, cache_->GetEntryCount());
2996     TrimForTest(false);
2997     EXPECT_EQ(2, cache_->GetEntryCount());
2998     TrimForTest(false);
2999     EXPECT_EQ(1, cache_->GetEntryCount());
3000   } else {
3001     // Detection order: third -> second -> first.
3002     // We should detect the problem through the list, but we should not delete
3003     // the entry.
3004     std::unique_ptr<TestIterator> iter = CreateIterator();
3005     ASSERT_THAT(iter->OpenNextEntry(&entry), IsOk());
3006     entry->Close();
3007     ASSERT_THAT(iter->OpenNextEntry(&entry), IsOk());
3008     EXPECT_EQ(first, entry->GetKey());
3009     entry->Close();
3010     EXPECT_NE(net::OK, iter->OpenNextEntry(&entry));
3011   }
3012   DisableIntegrityCheck();
3013 }
3014 
TEST_F(DiskCacheBackendTest,InvalidEntry10)3015 TEST_F(DiskCacheBackendTest, InvalidEntry10) {
3016   BackendInvalidEntry10(false);
3017 }
3018 
TEST_F(DiskCacheBackendTest,TrimInvalidEntry10)3019 TEST_F(DiskCacheBackendTest, TrimInvalidEntry10) {
3020   BackendInvalidEntry10(true);
3021 }
3022 
3023 // Tests handling of corrupt entries detected by enumerations.
BackendInvalidEntry11(bool eviction)3024 void DiskCacheBackendTest::BackendInvalidEntry11(bool eviction) {
3025   const int kSize = 0x3000;  // 12 kB.
3026   SetMaxSize(kSize * 10);
3027   SetNewEviction();
3028   InitCache();
3029 
3030   std::string first("some key");
3031   std::string second("something else");
3032   disk_cache::Entry* entry;
3033   ASSERT_THAT(CreateEntry(first, &entry), IsOk());
3034   entry->Close();
3035   ASSERT_THAT(OpenEntry(first, &entry), IsOk());
3036   EXPECT_EQ(0, WriteData(entry, 0, 200, nullptr, 0, false));
3037   entry->Close();
3038   ASSERT_THAT(CreateEntry(second, &entry), IsOk());
3039   entry->Close();
3040   ASSERT_THAT(OpenEntry(second, &entry), IsOk());
3041   EXPECT_EQ(0, WriteData(entry, 0, 200, nullptr, 0, false));
3042 
3043   // Corrupt this entry.
3044   disk_cache::EntryImpl* entry_impl =
3045       static_cast<disk_cache::EntryImpl*>(entry);
3046 
3047   entry_impl->entry()->Data()->state = 0xbad;
3048   entry_impl->entry()->Store();
3049   entry->Close();
3050   ASSERT_THAT(CreateEntry("third", &entry), IsOk());
3051   entry->Close();
3052   FlushQueueForTest();
3053   EXPECT_EQ(3, cache_->GetEntryCount());
3054 
3055   // We have:
3056   // List 0: third.
3057   // List 1: second (bad) -> first.
3058 
3059   if (eviction) {
3060     // Detection order: third -> first -> second.
3061     TrimForTest(false);
3062     EXPECT_EQ(2, cache_->GetEntryCount());
3063     TrimForTest(false);
3064     EXPECT_EQ(1, cache_->GetEntryCount());
3065     TrimForTest(false);
3066     EXPECT_EQ(1, cache_->GetEntryCount());
3067   } else {
3068     // Detection order: third -> second.
3069     // We should detect the problem through the list, but we should not delete
3070     // the entry, just fail the iteration.
3071     std::unique_ptr<TestIterator> iter = CreateIterator();
3072     ASSERT_THAT(iter->OpenNextEntry(&entry), IsOk());
3073     entry->Close();
3074     EXPECT_NE(net::OK, iter->OpenNextEntry(&entry));
3075 
3076     // Now a full iteration will work, and return two entries.
3077     ASSERT_THAT(iter->OpenNextEntry(&entry), IsOk());
3078     entry->Close();
3079     ASSERT_THAT(iter->OpenNextEntry(&entry), IsOk());
3080     entry->Close();
3081     EXPECT_NE(net::OK, iter->OpenNextEntry(&entry));
3082   }
3083   DisableIntegrityCheck();
3084 }
3085 
TEST_F(DiskCacheBackendTest,InvalidEntry11)3086 TEST_F(DiskCacheBackendTest, InvalidEntry11) {
3087   BackendInvalidEntry11(false);
3088 }
3089 
TEST_F(DiskCacheBackendTest,TrimInvalidEntry11)3090 TEST_F(DiskCacheBackendTest, TrimInvalidEntry11) {
3091   BackendInvalidEntry11(true);
3092 }
3093 
3094 // Tests handling of corrupt entries in the middle of a long eviction run.
BackendTrimInvalidEntry12()3095 void DiskCacheBackendTest::BackendTrimInvalidEntry12() {
3096   const int kSize = 0x3000;  // 12 kB
3097   SetMaxSize(kSize * 10);
3098   InitCache();
3099 
3100   std::string first("some key");
3101   std::string second("something else");
3102   disk_cache::Entry* entry;
3103   ASSERT_THAT(CreateEntry(first, &entry), IsOk());
3104   entry->Close();
3105   ASSERT_THAT(CreateEntry(second, &entry), IsOk());
3106 
3107   // Corrupt this entry.
3108   disk_cache::EntryImpl* entry_impl =
3109       static_cast<disk_cache::EntryImpl*>(entry);
3110 
3111   entry_impl->entry()->Data()->state = 0xbad;
3112   entry_impl->entry()->Store();
3113   entry->Close();
3114   ASSERT_THAT(CreateEntry("third", &entry), IsOk());
3115   entry->Close();
3116   ASSERT_THAT(CreateEntry("fourth", &entry), IsOk());
3117   TrimForTest(true);
3118   EXPECT_EQ(1, cache_->GetEntryCount());
3119   entry->Close();
3120   DisableIntegrityCheck();
3121 }
3122 
TEST_F(DiskCacheBackendTest,TrimInvalidEntry12)3123 TEST_F(DiskCacheBackendTest, TrimInvalidEntry12) {
3124   BackendTrimInvalidEntry12();
3125 }
3126 
TEST_F(DiskCacheBackendTest,NewEvictionTrimInvalidEntry12)3127 TEST_F(DiskCacheBackendTest, NewEvictionTrimInvalidEntry12) {
3128   SetNewEviction();
3129   BackendTrimInvalidEntry12();
3130 }
3131 
3132 // We want to be able to deal with messed up entries on disk.
BackendInvalidRankings2()3133 void DiskCacheBackendTest::BackendInvalidRankings2() {
3134   ASSERT_TRUE(CopyTestCache("bad_rankings"));
3135   DisableFirstCleanup();
3136   InitCache();
3137 
3138   disk_cache::Entry *entry1, *entry2;
3139   EXPECT_NE(net::OK, OpenEntry("the first key", &entry1));
3140   ASSERT_THAT(OpenEntry("some other key", &entry2), IsOk());
3141   entry2->Close();
3142 
3143   // CheckCacheIntegrity will fail at this point.
3144   DisableIntegrityCheck();
3145 }
3146 
TEST_F(DiskCacheBackendTest,InvalidRankings2)3147 TEST_F(DiskCacheBackendTest, InvalidRankings2) {
3148   BackendInvalidRankings2();
3149 }
3150 
TEST_F(DiskCacheBackendTest,NewEvictionInvalidRankings2)3151 TEST_F(DiskCacheBackendTest, NewEvictionInvalidRankings2) {
3152   SetNewEviction();
3153   BackendInvalidRankings2();
3154 }
3155 
3156 // If the LRU is corrupt, we delete the cache.
BackendInvalidRankings()3157 void DiskCacheBackendTest::BackendInvalidRankings() {
3158   disk_cache::Entry* entry;
3159   std::unique_ptr<TestIterator> iter = CreateIterator();
3160   ASSERT_THAT(iter->OpenNextEntry(&entry), IsOk());
3161   entry->Close();
3162   EXPECT_EQ(2, cache_->GetEntryCount());
3163 
3164   EXPECT_NE(net::OK, iter->OpenNextEntry(&entry));
3165   FlushQueueForTest();  // Allow the restart to finish.
3166   EXPECT_EQ(0, cache_->GetEntryCount());
3167 }
3168 
TEST_F(DiskCacheBackendTest,InvalidRankingsSuccess)3169 TEST_F(DiskCacheBackendTest, InvalidRankingsSuccess) {
3170   ASSERT_TRUE(CopyTestCache("bad_rankings"));
3171   DisableFirstCleanup();
3172   InitCache();
3173   BackendInvalidRankings();
3174 }
3175 
TEST_F(DiskCacheBackendTest,NewEvictionInvalidRankingsSuccess)3176 TEST_F(DiskCacheBackendTest, NewEvictionInvalidRankingsSuccess) {
3177   ASSERT_TRUE(CopyTestCache("bad_rankings"));
3178   DisableFirstCleanup();
3179   SetNewEviction();
3180   InitCache();
3181   BackendInvalidRankings();
3182 }
3183 
TEST_F(DiskCacheBackendTest,InvalidRankingsFailure)3184 TEST_F(DiskCacheBackendTest, InvalidRankingsFailure) {
3185   ASSERT_TRUE(CopyTestCache("bad_rankings"));
3186   DisableFirstCleanup();
3187   InitCache();
3188   SetTestMode();  // Fail cache reinitialization.
3189   BackendInvalidRankings();
3190 }
3191 
TEST_F(DiskCacheBackendTest,NewEvictionInvalidRankingsFailure)3192 TEST_F(DiskCacheBackendTest, NewEvictionInvalidRankingsFailure) {
3193   ASSERT_TRUE(CopyTestCache("bad_rankings"));
3194   DisableFirstCleanup();
3195   SetNewEviction();
3196   InitCache();
3197   SetTestMode();  // Fail cache reinitialization.
3198   BackendInvalidRankings();
3199 }
3200 
3201 // If the LRU is corrupt and we have open entries, we disable the cache.
BackendDisable()3202 void DiskCacheBackendTest::BackendDisable() {
3203   disk_cache::Entry *entry1, *entry2;
3204   std::unique_ptr<TestIterator> iter = CreateIterator();
3205   ASSERT_THAT(iter->OpenNextEntry(&entry1), IsOk());
3206 
3207   EXPECT_NE(net::OK, iter->OpenNextEntry(&entry2));
3208   EXPECT_EQ(0, cache_->GetEntryCount());
3209   EXPECT_NE(net::OK, CreateEntry("Something new", &entry2));
3210 
3211   entry1->Close();
3212   FlushQueueForTest();  // Flushing the Close posts a task to restart the cache.
3213   FlushQueueForTest();  // This one actually allows that task to complete.
3214 
3215   EXPECT_EQ(0, cache_->GetEntryCount());
3216 }
3217 
TEST_F(DiskCacheBackendTest,DisableSuccess)3218 TEST_F(DiskCacheBackendTest, DisableSuccess) {
3219   ASSERT_TRUE(CopyTestCache("bad_rankings"));
3220   DisableFirstCleanup();
3221   InitCache();
3222   BackendDisable();
3223 }
3224 
TEST_F(DiskCacheBackendTest,NewEvictionDisableSuccess)3225 TEST_F(DiskCacheBackendTest, NewEvictionDisableSuccess) {
3226   ASSERT_TRUE(CopyTestCache("bad_rankings"));
3227   DisableFirstCleanup();
3228   SetNewEviction();
3229   InitCache();
3230   BackendDisable();
3231 }
3232 
TEST_F(DiskCacheBackendTest,DisableFailure)3233 TEST_F(DiskCacheBackendTest, DisableFailure) {
3234   ASSERT_TRUE(CopyTestCache("bad_rankings"));
3235   DisableFirstCleanup();
3236   InitCache();
3237   SetTestMode();  // Fail cache reinitialization.
3238   BackendDisable();
3239 }
3240 
TEST_F(DiskCacheBackendTest,NewEvictionDisableFailure)3241 TEST_F(DiskCacheBackendTest, NewEvictionDisableFailure) {
3242   ASSERT_TRUE(CopyTestCache("bad_rankings"));
3243   DisableFirstCleanup();
3244   SetNewEviction();
3245   InitCache();
3246   SetTestMode();  // Fail cache reinitialization.
3247   BackendDisable();
3248 }
3249 
3250 // This is another type of corruption on the LRU; disable the cache.
BackendDisable2()3251 void DiskCacheBackendTest::BackendDisable2() {
3252   EXPECT_EQ(8, cache_->GetEntryCount());
3253 
3254   disk_cache::Entry* entry;
3255   std::unique_ptr<TestIterator> iter = CreateIterator();
3256   int count = 0;
3257   while (iter->OpenNextEntry(&entry) == net::OK) {
3258     ASSERT_TRUE(nullptr != entry);
3259     entry->Close();
3260     count++;
3261     ASSERT_LT(count, 9);
3262   };
3263 
3264   FlushQueueForTest();
3265   EXPECT_EQ(0, cache_->GetEntryCount());
3266 }
3267 
TEST_F(DiskCacheBackendTest,DisableSuccess2)3268 TEST_F(DiskCacheBackendTest, DisableSuccess2) {
3269   ASSERT_TRUE(CopyTestCache("list_loop"));
3270   DisableFirstCleanup();
3271   InitCache();
3272   BackendDisable2();
3273 }
3274 
TEST_F(DiskCacheBackendTest,NewEvictionDisableSuccess2)3275 TEST_F(DiskCacheBackendTest, NewEvictionDisableSuccess2) {
3276   ASSERT_TRUE(CopyTestCache("list_loop"));
3277   DisableFirstCleanup();
3278   SetNewEviction();
3279   InitCache();
3280   BackendDisable2();
3281 }
3282 
TEST_F(DiskCacheBackendTest,DisableFailure2)3283 TEST_F(DiskCacheBackendTest, DisableFailure2) {
3284   ASSERT_TRUE(CopyTestCache("list_loop"));
3285   DisableFirstCleanup();
3286   InitCache();
3287   SetTestMode();  // Fail cache reinitialization.
3288   BackendDisable2();
3289 }
3290 
TEST_F(DiskCacheBackendTest,NewEvictionDisableFailure2)3291 TEST_F(DiskCacheBackendTest, NewEvictionDisableFailure2) {
3292   ASSERT_TRUE(CopyTestCache("list_loop"));
3293   DisableFirstCleanup();
3294   SetNewEviction();
3295   InitCache();
3296   SetTestMode();  // Fail cache reinitialization.
3297   BackendDisable2();
3298 }
3299 
3300 // If the index size changes when we disable the cache, we should not crash.
BackendDisable3()3301 void DiskCacheBackendTest::BackendDisable3() {
3302   disk_cache::Entry *entry1, *entry2;
3303   std::unique_ptr<TestIterator> iter = CreateIterator();
3304   EXPECT_EQ(2, cache_->GetEntryCount());
3305   ASSERT_THAT(iter->OpenNextEntry(&entry1), IsOk());
3306   entry1->Close();
3307 
3308   EXPECT_NE(net::OK, iter->OpenNextEntry(&entry2));
3309   FlushQueueForTest();
3310 
3311   ASSERT_THAT(CreateEntry("Something new", &entry2), IsOk());
3312   entry2->Close();
3313 
3314   EXPECT_EQ(1, cache_->GetEntryCount());
3315 }
3316 
TEST_F(DiskCacheBackendTest,DisableSuccess3)3317 TEST_F(DiskCacheBackendTest, DisableSuccess3) {
3318   ASSERT_TRUE(CopyTestCache("bad_rankings2"));
3319   DisableFirstCleanup();
3320   SetMaxSize(20 * 1024 * 1024);
3321   InitCache();
3322   BackendDisable3();
3323 }
3324 
TEST_F(DiskCacheBackendTest,NewEvictionDisableSuccess3)3325 TEST_F(DiskCacheBackendTest, NewEvictionDisableSuccess3) {
3326   ASSERT_TRUE(CopyTestCache("bad_rankings2"));
3327   DisableFirstCleanup();
3328   SetMaxSize(20 * 1024 * 1024);
3329   SetNewEviction();
3330   InitCache();
3331   BackendDisable3();
3332 }
3333 
3334 // If we disable the cache, already open entries should work as far as possible.
BackendDisable4()3335 void DiskCacheBackendTest::BackendDisable4() {
3336   disk_cache::Entry *entry1, *entry2, *entry3, *entry4;
3337   std::unique_ptr<TestIterator> iter = CreateIterator();
3338   ASSERT_THAT(iter->OpenNextEntry(&entry1), IsOk());
3339 
3340   char key2[2000];
3341   char key3[20000];
3342   CacheTestFillBuffer(key2, sizeof(key2), true);
3343   CacheTestFillBuffer(key3, sizeof(key3), true);
3344   key2[sizeof(key2) - 1] = '\0';
3345   key3[sizeof(key3) - 1] = '\0';
3346   ASSERT_THAT(CreateEntry(key2, &entry2), IsOk());
3347   ASSERT_THAT(CreateEntry(key3, &entry3), IsOk());
3348 
3349   const int kBufSize = 20000;
3350   scoped_refptr<net::IOBuffer> buf =
3351       base::MakeRefCounted<net::IOBuffer>(kBufSize);
3352   memset(buf->data(), 0, kBufSize);
3353   EXPECT_EQ(100, WriteData(entry2, 0, 0, buf.get(), 100, false));
3354   EXPECT_EQ(kBufSize, WriteData(entry3, 0, 0, buf.get(), kBufSize, false));
3355 
3356   // This line should disable the cache but not delete it.
3357   EXPECT_NE(net::OK, iter->OpenNextEntry(&entry4));
3358   EXPECT_EQ(0, cache_->GetEntryCount());
3359 
3360   EXPECT_NE(net::OK, CreateEntry("cache is disabled", &entry4));
3361 
3362   EXPECT_EQ(100, ReadData(entry2, 0, 0, buf.get(), 100));
3363   EXPECT_EQ(100, WriteData(entry2, 0, 0, buf.get(), 100, false));
3364   EXPECT_EQ(100, WriteData(entry2, 1, 0, buf.get(), 100, false));
3365 
3366   EXPECT_EQ(kBufSize, ReadData(entry3, 0, 0, buf.get(), kBufSize));
3367   EXPECT_EQ(kBufSize, WriteData(entry3, 0, 0, buf.get(), kBufSize, false));
3368   EXPECT_EQ(kBufSize, WriteData(entry3, 1, 0, buf.get(), kBufSize, false));
3369 
3370   std::string key = entry2->GetKey();
3371   EXPECT_EQ(sizeof(key2) - 1, key.size());
3372   key = entry3->GetKey();
3373   EXPECT_EQ(sizeof(key3) - 1, key.size());
3374 
3375   entry1->Close();
3376   entry2->Close();
3377   entry3->Close();
3378   FlushQueueForTest();  // Flushing the Close posts a task to restart the cache.
3379   FlushQueueForTest();  // This one actually allows that task to complete.
3380 
3381   EXPECT_EQ(0, cache_->GetEntryCount());
3382 }
3383 
TEST_F(DiskCacheBackendTest,DisableSuccess4)3384 TEST_F(DiskCacheBackendTest, DisableSuccess4) {
3385   ASSERT_TRUE(CopyTestCache("bad_rankings"));
3386   DisableFirstCleanup();
3387   InitCache();
3388   BackendDisable4();
3389 }
3390 
TEST_F(DiskCacheBackendTest,NewEvictionDisableSuccess4)3391 TEST_F(DiskCacheBackendTest, NewEvictionDisableSuccess4) {
3392   ASSERT_TRUE(CopyTestCache("bad_rankings"));
3393   DisableFirstCleanup();
3394   SetNewEviction();
3395   InitCache();
3396   BackendDisable4();
3397 }
3398 
3399 // Tests the exposed API with a disabled cache.
BackendDisabledAPI()3400 void DiskCacheBackendTest::BackendDisabledAPI() {
3401   cache_impl_->SetUnitTestMode();  // Simulate failure restarting the cache.
3402 
3403   disk_cache::Entry *entry1, *entry2;
3404   std::unique_ptr<TestIterator> iter = CreateIterator();
3405   EXPECT_EQ(2, cache_->GetEntryCount());
3406   ASSERT_THAT(iter->OpenNextEntry(&entry1), IsOk());
3407   entry1->Close();
3408   EXPECT_NE(net::OK, iter->OpenNextEntry(&entry2));
3409   FlushQueueForTest();
3410   // The cache should be disabled.
3411 
3412   EXPECT_EQ(net::DISK_CACHE, cache_->GetCacheType());
3413   EXPECT_EQ(0, cache_->GetEntryCount());
3414   EXPECT_NE(net::OK, OpenEntry("First", &entry2));
3415   EXPECT_NE(net::OK, CreateEntry("Something new", &entry2));
3416   EXPECT_NE(net::OK, DoomEntry("First"));
3417   EXPECT_NE(net::OK, DoomAllEntries());
3418   EXPECT_NE(net::OK, DoomEntriesBetween(Time(), Time::Now()));
3419   EXPECT_NE(net::OK, DoomEntriesSince(Time()));
3420   iter = CreateIterator();
3421   EXPECT_NE(net::OK, iter->OpenNextEntry(&entry2));
3422 
3423   base::StringPairs stats;
3424   cache_->GetStats(&stats);
3425   EXPECT_TRUE(stats.empty());
3426   OnExternalCacheHit("First");
3427 }
3428 
TEST_F(DiskCacheBackendTest,DisabledAPI)3429 TEST_F(DiskCacheBackendTest, DisabledAPI) {
3430   ASSERT_TRUE(CopyTestCache("bad_rankings2"));
3431   DisableFirstCleanup();
3432   InitCache();
3433   BackendDisabledAPI();
3434 }
3435 
TEST_F(DiskCacheBackendTest,NewEvictionDisabledAPI)3436 TEST_F(DiskCacheBackendTest, NewEvictionDisabledAPI) {
3437   ASSERT_TRUE(CopyTestCache("bad_rankings2"));
3438   DisableFirstCleanup();
3439   SetNewEviction();
3440   InitCache();
3441   BackendDisabledAPI();
3442 }
3443 
3444 // Test that some eviction of some kind happens.
BackendEviction()3445 void DiskCacheBackendTest::BackendEviction() {
3446   const int kMaxSize = 200 * 1024;
3447   const int kMaxEntryCount = 20;
3448   const int kWriteSize = kMaxSize / kMaxEntryCount;
3449 
3450   const int kWriteEntryCount = kMaxEntryCount * 2;
3451 
3452   static_assert(kWriteEntryCount * kWriteSize > kMaxSize,
3453                 "must write more than MaxSize");
3454 
3455   SetMaxSize(kMaxSize);
3456   InitSparseCache(nullptr, nullptr);
3457 
3458   scoped_refptr<net::IOBuffer> buffer =
3459       base::MakeRefCounted<net::IOBuffer>(kWriteSize);
3460   CacheTestFillBuffer(buffer->data(), kWriteSize, false);
3461 
3462   std::string key_prefix("prefix");
3463   for (int i = 0; i < kWriteEntryCount; ++i) {
3464     AddDelay();
3465     disk_cache::Entry* entry = nullptr;
3466     ASSERT_THAT(CreateEntry(key_prefix + base::NumberToString(i), &entry),
3467                 IsOk());
3468     disk_cache::ScopedEntryPtr entry_closer(entry);
3469     EXPECT_EQ(kWriteSize,
3470               WriteData(entry, 1, 0, buffer.get(), kWriteSize, false));
3471   }
3472 
3473   int size = CalculateSizeOfAllEntries();
3474   EXPECT_GT(kMaxSize, size);
3475 }
3476 
TEST_F(DiskCacheBackendTest,BackendEviction)3477 TEST_F(DiskCacheBackendTest, BackendEviction) {
3478   BackendEviction();
3479 }
3480 
TEST_F(DiskCacheBackendTest,MemoryOnlyBackendEviction)3481 TEST_F(DiskCacheBackendTest, MemoryOnlyBackendEviction) {
3482   SetMemoryOnlyMode();
3483   BackendEviction();
3484 }
3485 
3486 // TODO(morlovich): Enable BackendEviction test for simple cache after
3487 // performance problems are addressed. See crbug.com/588184 for more
3488 // information.
3489 
3490 // This overly specific looking test is a regression test aimed at
3491 // crbug.com/589186.
TEST_F(DiskCacheBackendTest,MemoryOnlyUseAfterFree)3492 TEST_F(DiskCacheBackendTest, MemoryOnlyUseAfterFree) {
3493   SetMemoryOnlyMode();
3494 
3495   const int kMaxSize = 200 * 1024;
3496   const int kMaxEntryCount = 20;
3497   const int kWriteSize = kMaxSize / kMaxEntryCount;
3498 
3499   SetMaxSize(kMaxSize);
3500   InitCache();
3501 
3502   scoped_refptr<net::IOBuffer> buffer =
3503       base::MakeRefCounted<net::IOBuffer>(kWriteSize);
3504   CacheTestFillBuffer(buffer->data(), kWriteSize, false);
3505 
3506   // Create an entry to be our sparse entry that gets written later.
3507   disk_cache::Entry* entry;
3508   ASSERT_THAT(CreateEntry("first parent", &entry), IsOk());
3509   disk_cache::ScopedEntryPtr first_parent(entry);
3510 
3511   // Create a ton of entries, and keep them open, to put the cache well above
3512   // its eviction threshhold.
3513   const int kTooManyEntriesCount = kMaxEntryCount * 2;
3514   std::list<disk_cache::ScopedEntryPtr> open_entries;
3515   std::string key_prefix("prefix");
3516   for (int i = 0; i < kTooManyEntriesCount; ++i) {
3517     ASSERT_THAT(CreateEntry(key_prefix + base::NumberToString(i), &entry),
3518                 IsOk());
3519     // Not checking the result because it will start to fail once the max size
3520     // is reached.
3521     WriteData(entry, 1, 0, buffer.get(), kWriteSize, false);
3522     open_entries.push_back(disk_cache::ScopedEntryPtr(entry));
3523   }
3524 
3525   // Writing this sparse data should not crash. Ignoring the result because
3526   // we're only concerned with not crashing in this particular test.
3527   first_parent->WriteSparseData(32768, buffer.get(), 1024,
3528                                 net::CompletionOnceCallback());
3529 }
3530 
TEST_F(DiskCacheBackendTest,MemoryCapsWritesToMaxSize)3531 TEST_F(DiskCacheBackendTest, MemoryCapsWritesToMaxSize) {
3532   // Verify that the memory backend won't grow beyond its max size if lots of
3533   // open entries (each smaller than the max entry size) are trying to write
3534   // beyond the max size.
3535   SetMemoryOnlyMode();
3536 
3537   const int kMaxSize = 100 * 1024;       // 100KB cache
3538   const int kNumEntries = 20;            // 20 entries to write
3539   const int kWriteSize = kMaxSize / 10;  // Each entry writes 1/10th the max
3540 
3541   SetMaxSize(kMaxSize);
3542   InitCache();
3543 
3544   scoped_refptr<net::IOBuffer> buffer =
3545       base::MakeRefCounted<net::IOBuffer>(kWriteSize);
3546   CacheTestFillBuffer(buffer->data(), kWriteSize, false);
3547 
3548   // Create an entry to be the final entry that gets written later.
3549   disk_cache::Entry* entry;
3550   ASSERT_THAT(CreateEntry("final", &entry), IsOk());
3551   disk_cache::ScopedEntryPtr final_entry(entry);
3552 
3553   // Create a ton of entries, write to the cache, and keep the entries open.
3554   // They should start failing writes once the cache fills.
3555   std::list<disk_cache::ScopedEntryPtr> open_entries;
3556   std::string key_prefix("prefix");
3557   for (int i = 0; i < kNumEntries; ++i) {
3558     ASSERT_THAT(CreateEntry(key_prefix + base::NumberToString(i), &entry),
3559                 IsOk());
3560     WriteData(entry, 1, 0, buffer.get(), kWriteSize, false);
3561     open_entries.push_back(disk_cache::ScopedEntryPtr(entry));
3562   }
3563   EXPECT_GE(kMaxSize, CalculateSizeOfAllEntries());
3564 
3565   // Any more writing at this point should cause an error.
3566   EXPECT_THAT(
3567       WriteData(final_entry.get(), 1, 0, buffer.get(), kWriteSize, false),
3568       IsError(net::ERR_INSUFFICIENT_RESOURCES));
3569 }
3570 
TEST_F(DiskCacheTest,Backend_UsageStatsTimer)3571 TEST_F(DiskCacheTest, Backend_UsageStatsTimer) {
3572   MessageLoopHelper helper;
3573 
3574   ASSERT_TRUE(CleanupCacheDir());
3575   // Want to use our thread since we call SyncInit ourselves.
3576   std::unique_ptr<disk_cache::BackendImpl> cache(
3577       std::make_unique<disk_cache::BackendImpl>(
3578           cache_path_, nullptr,
3579           base::SingleThreadTaskRunner::GetCurrentDefault(), net::DISK_CACHE,
3580           nullptr));
3581   ASSERT_TRUE(nullptr != cache.get());
3582   cache->SetUnitTestMode();
3583   ASSERT_THAT(cache->SyncInit(), IsOk());
3584 
3585   // Wait for a callback that never comes... about 2 secs :). The message loop
3586   // has to run to allow invocation of the usage timer.
3587   helper.WaitUntilCacheIoFinished(1);
3588 }
3589 
TEST_F(DiskCacheBackendTest,TimerNotCreated)3590 TEST_F(DiskCacheBackendTest, TimerNotCreated) {
3591   ASSERT_TRUE(CopyTestCache("wrong_version"));
3592 
3593   // Want to use our thread since we call SyncInit ourselves.
3594   std::unique_ptr<disk_cache::BackendImpl> cache(
3595       std::make_unique<disk_cache::BackendImpl>(
3596           cache_path_, nullptr,
3597           base::SingleThreadTaskRunner::GetCurrentDefault(), net::DISK_CACHE,
3598           nullptr));
3599   ASSERT_TRUE(nullptr != cache.get());
3600   cache->SetUnitTestMode();
3601   ASSERT_NE(net::OK, cache->SyncInit());
3602 
3603   ASSERT_TRUE(nullptr == cache->GetTimerForTest());
3604 
3605   DisableIntegrityCheck();
3606 }
3607 
TEST_F(DiskCacheBackendTest,Backend_UsageStats)3608 TEST_F(DiskCacheBackendTest, Backend_UsageStats) {
3609   InitCache();
3610   disk_cache::Entry* entry;
3611   ASSERT_THAT(CreateEntry("key", &entry), IsOk());
3612   entry->Close();
3613   FlushQueueForTest();
3614 
3615   disk_cache::StatsItems stats;
3616   cache_->GetStats(&stats);
3617   EXPECT_FALSE(stats.empty());
3618 
3619   disk_cache::StatsItems::value_type hits("Create hit", "0x1");
3620   EXPECT_EQ(1, base::ranges::count(stats, hits));
3621 
3622   ResetCaches();
3623 
3624   // Now open the cache and verify that the stats are still there.
3625   DisableFirstCleanup();
3626   InitCache();
3627   EXPECT_EQ(1, cache_->GetEntryCount());
3628 
3629   stats.clear();
3630   cache_->GetStats(&stats);
3631   EXPECT_FALSE(stats.empty());
3632 
3633   EXPECT_EQ(1, base::ranges::count(stats, hits));
3634 }
3635 
BackendDoomAll()3636 void DiskCacheBackendTest::BackendDoomAll() {
3637   InitCache();
3638 
3639   disk_cache::Entry *entry1, *entry2;
3640   ASSERT_THAT(CreateEntry("first", &entry1), IsOk());
3641   ASSERT_THAT(CreateEntry("second", &entry2), IsOk());
3642   entry1->Close();
3643   entry2->Close();
3644 
3645   ASSERT_THAT(CreateEntry("third", &entry1), IsOk());
3646   ASSERT_THAT(CreateEntry("fourth", &entry2), IsOk());
3647 
3648   ASSERT_EQ(4, cache_->GetEntryCount());
3649   EXPECT_THAT(DoomAllEntries(), IsOk());
3650   ASSERT_EQ(0, cache_->GetEntryCount());
3651 
3652   // We should stop posting tasks at some point (if we post any).
3653   base::RunLoop().RunUntilIdle();
3654 
3655   disk_cache::Entry *entry3, *entry4;
3656   EXPECT_NE(net::OK, OpenEntry("third", &entry3));
3657   ASSERT_THAT(CreateEntry("third", &entry3), IsOk());
3658   ASSERT_THAT(CreateEntry("fourth", &entry4), IsOk());
3659 
3660   EXPECT_THAT(DoomAllEntries(), IsOk());
3661   ASSERT_EQ(0, cache_->GetEntryCount());
3662 
3663   entry1->Close();
3664   entry2->Close();
3665   entry3->Doom();  // The entry should be already doomed, but this must work.
3666   entry3->Close();
3667   entry4->Close();
3668 
3669   // Now try with all references released.
3670   ASSERT_THAT(CreateEntry("third", &entry1), IsOk());
3671   ASSERT_THAT(CreateEntry("fourth", &entry2), IsOk());
3672   entry1->Close();
3673   entry2->Close();
3674 
3675   ASSERT_EQ(2, cache_->GetEntryCount());
3676   EXPECT_THAT(DoomAllEntries(), IsOk());
3677   ASSERT_EQ(0, cache_->GetEntryCount());
3678 
3679   EXPECT_THAT(DoomAllEntries(), IsOk());
3680 }
3681 
TEST_F(DiskCacheBackendTest,DoomAll)3682 TEST_F(DiskCacheBackendTest, DoomAll) {
3683   BackendDoomAll();
3684 }
3685 
TEST_F(DiskCacheBackendTest,NewEvictionDoomAll)3686 TEST_F(DiskCacheBackendTest, NewEvictionDoomAll) {
3687   SetNewEviction();
3688   BackendDoomAll();
3689 }
3690 
TEST_F(DiskCacheBackendTest,MemoryOnlyDoomAll)3691 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomAll) {
3692   SetMemoryOnlyMode();
3693   BackendDoomAll();
3694 }
3695 
TEST_F(DiskCacheBackendTest,AppCacheOnlyDoomAll)3696 TEST_F(DiskCacheBackendTest, AppCacheOnlyDoomAll) {
3697   SetCacheType(net::APP_CACHE);
3698   BackendDoomAll();
3699 }
3700 
TEST_F(DiskCacheBackendTest,ShaderCacheOnlyDoomAll)3701 TEST_F(DiskCacheBackendTest, ShaderCacheOnlyDoomAll) {
3702   SetCacheType(net::SHADER_CACHE);
3703   BackendDoomAll();
3704 }
3705 
3706 // If the index size changes when we doom the cache, we should not crash.
BackendDoomAll2()3707 void DiskCacheBackendTest::BackendDoomAll2() {
3708   EXPECT_EQ(2, cache_->GetEntryCount());
3709   EXPECT_THAT(DoomAllEntries(), IsOk());
3710 
3711   disk_cache::Entry* entry;
3712   ASSERT_THAT(CreateEntry("Something new", &entry), IsOk());
3713   entry->Close();
3714 
3715   EXPECT_EQ(1, cache_->GetEntryCount());
3716 }
3717 
TEST_F(DiskCacheBackendTest,DoomAll2)3718 TEST_F(DiskCacheBackendTest, DoomAll2) {
3719   ASSERT_TRUE(CopyTestCache("bad_rankings2"));
3720   DisableFirstCleanup();
3721   SetMaxSize(20 * 1024 * 1024);
3722   InitCache();
3723   BackendDoomAll2();
3724 }
3725 
TEST_F(DiskCacheBackendTest,NewEvictionDoomAll2)3726 TEST_F(DiskCacheBackendTest, NewEvictionDoomAll2) {
3727   ASSERT_TRUE(CopyTestCache("bad_rankings2"));
3728   DisableFirstCleanup();
3729   SetMaxSize(20 * 1024 * 1024);
3730   SetNewEviction();
3731   InitCache();
3732   BackendDoomAll2();
3733 }
3734 
3735 // We should be able to create the same entry on multiple simultaneous instances
3736 // of the cache.
TEST_F(DiskCacheTest,MultipleInstances)3737 TEST_F(DiskCacheTest, MultipleInstances) {
3738   base::ScopedTempDir store1, store2;
3739   ASSERT_TRUE(store1.CreateUniqueTempDir());
3740   ASSERT_TRUE(store2.CreateUniqueTempDir());
3741 
3742   TestBackendResultCompletionCallback cb;
3743 
3744   const int kNumberOfCaches = 2;
3745   std::unique_ptr<disk_cache::Backend> caches[kNumberOfCaches];
3746 
3747   disk_cache::BackendResult rv = disk_cache::CreateCacheBackend(
3748       net::DISK_CACHE, net::CACHE_BACKEND_DEFAULT, /*file_operations=*/nullptr,
3749       store1.GetPath(), 0, disk_cache::ResetHandling::kNeverReset,
3750       /*net_log=*/nullptr, cb.callback());
3751   rv = cb.GetResult(std::move(rv));
3752   ASSERT_THAT(rv.net_error, IsOk());
3753   caches[0] = std::move(rv.backend);
3754   rv = disk_cache::CreateCacheBackend(
3755       net::GENERATED_BYTE_CODE_CACHE, net::CACHE_BACKEND_DEFAULT,
3756       /*file_operations=*/nullptr, store2.GetPath(), 0,
3757       disk_cache::ResetHandling::kNeverReset, /*net_log=*/nullptr,
3758       cb.callback());
3759   rv = cb.GetResult(std::move(rv));
3760   ASSERT_THAT(rv.net_error, IsOk());
3761   caches[1] = std::move(rv.backend);
3762 
3763   ASSERT_TRUE(caches[0].get() != nullptr && caches[1].get() != nullptr);
3764 
3765   std::string key("the first key");
3766   for (auto& cache : caches) {
3767     TestEntryResultCompletionCallback cb2;
3768     EntryResult result = cache->CreateEntry(key, net::HIGHEST, cb2.callback());
3769     result = cb2.GetResult(std::move(result));
3770     ASSERT_THAT(result.net_error(), IsOk());
3771     result.ReleaseEntry()->Close();
3772   }
3773 }
3774 
3775 // Test the six regions of the curve that determines the max cache size.
TEST_F(DiskCacheTest,AutomaticMaxSize)3776 TEST_F(DiskCacheTest, AutomaticMaxSize) {
3777   using disk_cache::kDefaultCacheSize;
3778   int64_t large_size = kDefaultCacheSize;
3779 
3780   // Region 1: expected = available * 0.8
3781   EXPECT_EQ((kDefaultCacheSize - 1) * 8 / 10,
3782             disk_cache::PreferredCacheSize(large_size - 1));
3783   EXPECT_EQ(kDefaultCacheSize * 8 / 10,
3784             disk_cache::PreferredCacheSize(large_size));
3785   EXPECT_EQ(kDefaultCacheSize - 1,
3786             disk_cache::PreferredCacheSize(large_size * 10 / 8 - 1));
3787 
3788   // Region 2: expected = default_size
3789   EXPECT_EQ(kDefaultCacheSize,
3790             disk_cache::PreferredCacheSize(large_size * 10 / 8));
3791   EXPECT_EQ(kDefaultCacheSize,
3792             disk_cache::PreferredCacheSize(large_size * 10 - 1));
3793 
3794   // Region 3: expected = available * 0.1
3795   EXPECT_EQ(kDefaultCacheSize, disk_cache::PreferredCacheSize(large_size * 10));
3796   EXPECT_EQ((kDefaultCacheSize * 25 - 1) / 10,
3797             disk_cache::PreferredCacheSize(large_size * 25 - 1));
3798 
3799   // Region 4: expected = default_size * 2.5
3800   EXPECT_EQ(kDefaultCacheSize * 25 / 10,
3801             disk_cache::PreferredCacheSize(large_size * 25));
3802   EXPECT_EQ(kDefaultCacheSize * 25 / 10,
3803             disk_cache::PreferredCacheSize(large_size * 100 - 1));
3804   EXPECT_EQ(kDefaultCacheSize * 25 / 10,
3805             disk_cache::PreferredCacheSize(large_size * 100));
3806   EXPECT_EQ(kDefaultCacheSize * 25 / 10,
3807             disk_cache::PreferredCacheSize(large_size * 250 - 1));
3808 
3809   // Region 5: expected = available * 0.1
3810   int64_t largest_size = kDefaultCacheSize * 4;
3811   EXPECT_EQ(kDefaultCacheSize * 25 / 10,
3812             disk_cache::PreferredCacheSize(large_size * 250));
3813   EXPECT_EQ(largest_size - 1,
3814             disk_cache::PreferredCacheSize(largest_size * 100 - 1));
3815 
3816   // Region 6: expected = largest possible size
3817   EXPECT_EQ(largest_size, disk_cache::PreferredCacheSize(largest_size * 100));
3818   EXPECT_EQ(largest_size, disk_cache::PreferredCacheSize(largest_size * 10000));
3819 }
3820 
3821 // Tests that we can "migrate" a running instance from one experiment group to
3822 // another.
TEST_F(DiskCacheBackendTest,Histograms)3823 TEST_F(DiskCacheBackendTest, Histograms) {
3824   InitCache();
3825   disk_cache::BackendImpl* backend_ = cache_impl_;  // Needed be the macro.
3826 
3827   for (int i = 1; i < 3; i++) {
3828     CACHE_UMA(HOURS, "FillupTime", i, 28);
3829   }
3830 }
3831 
3832 // Make sure that we keep the total memory used by the internal buffers under
3833 // control.
TEST_F(DiskCacheBackendTest,TotalBuffersSize1)3834 TEST_F(DiskCacheBackendTest, TotalBuffersSize1) {
3835   InitCache();
3836   std::string key("the first key");
3837   disk_cache::Entry* entry;
3838   ASSERT_THAT(CreateEntry(key, &entry), IsOk());
3839 
3840   const int kSize = 200;
3841   scoped_refptr<net::IOBuffer> buffer =
3842       base::MakeRefCounted<net::IOBuffer>(kSize);
3843   CacheTestFillBuffer(buffer->data(), kSize, true);
3844 
3845   for (int i = 0; i < 10; i++) {
3846     SCOPED_TRACE(i);
3847     // Allocate 2MB for this entry.
3848     EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, true));
3849     EXPECT_EQ(kSize, WriteData(entry, 1, 0, buffer.get(), kSize, true));
3850     EXPECT_EQ(kSize,
3851               WriteData(entry, 0, 1024 * 1024, buffer.get(), kSize, false));
3852     EXPECT_EQ(kSize,
3853               WriteData(entry, 1, 1024 * 1024, buffer.get(), kSize, false));
3854 
3855     // Delete one of the buffers and truncate the other.
3856     EXPECT_EQ(0, WriteData(entry, 0, 0, buffer.get(), 0, true));
3857     EXPECT_EQ(0, WriteData(entry, 1, 10, buffer.get(), 0, true));
3858 
3859     // Delete the second buffer, writing 10 bytes to disk.
3860     entry->Close();
3861     ASSERT_THAT(OpenEntry(key, &entry), IsOk());
3862   }
3863 
3864   entry->Close();
3865   EXPECT_EQ(0, cache_impl_->GetTotalBuffersSize());
3866 }
3867 
3868 // This test assumes at least 150MB of system memory.
TEST_F(DiskCacheBackendTest,TotalBuffersSize2)3869 TEST_F(DiskCacheBackendTest, TotalBuffersSize2) {
3870   InitCache();
3871 
3872   const int kOneMB = 1024 * 1024;
3873   EXPECT_TRUE(cache_impl_->IsAllocAllowed(0, kOneMB));
3874   EXPECT_EQ(kOneMB, cache_impl_->GetTotalBuffersSize());
3875 
3876   EXPECT_TRUE(cache_impl_->IsAllocAllowed(0, kOneMB));
3877   EXPECT_EQ(kOneMB * 2, cache_impl_->GetTotalBuffersSize());
3878 
3879   EXPECT_TRUE(cache_impl_->IsAllocAllowed(0, kOneMB));
3880   EXPECT_EQ(kOneMB * 3, cache_impl_->GetTotalBuffersSize());
3881 
3882   cache_impl_->BufferDeleted(kOneMB);
3883   EXPECT_EQ(kOneMB * 2, cache_impl_->GetTotalBuffersSize());
3884 
3885   // Check the upper limit.
3886   EXPECT_FALSE(cache_impl_->IsAllocAllowed(0, 30 * kOneMB));
3887 
3888   for (int i = 0; i < 30; i++)
3889     cache_impl_->IsAllocAllowed(0, kOneMB);  // Ignore the result.
3890 
3891   EXPECT_FALSE(cache_impl_->IsAllocAllowed(0, kOneMB));
3892 }
3893 
3894 // Tests that sharing of external files works and we are able to delete the
3895 // files when we need to.
TEST_F(DiskCacheBackendTest,FileSharing)3896 TEST_F(DiskCacheBackendTest, FileSharing) {
3897   InitCache();
3898 
3899   disk_cache::Addr address(0x80000001);
3900   ASSERT_TRUE(cache_impl_->CreateExternalFile(&address));
3901   base::FilePath name = cache_impl_->GetFileName(address);
3902 
3903   {
3904     auto file = base::MakeRefCounted<disk_cache::File>(false);
3905     file->Init(name);
3906 
3907 #if BUILDFLAG(IS_WIN)
3908     DWORD sharing = FILE_SHARE_READ | FILE_SHARE_WRITE;
3909     DWORD access = GENERIC_READ | GENERIC_WRITE;
3910     base::win::ScopedHandle file2(CreateFile(name.value().c_str(), access,
3911                                              sharing, nullptr, OPEN_EXISTING, 0,
3912                                              nullptr));
3913     EXPECT_FALSE(file2.IsValid());
3914 
3915     sharing |= FILE_SHARE_DELETE;
3916     file2.Set(CreateFile(name.value().c_str(), access, sharing, nullptr,
3917                          OPEN_EXISTING, 0, nullptr));
3918     EXPECT_TRUE(file2.IsValid());
3919 #endif
3920 
3921     EXPECT_TRUE(base::DeleteFile(name));
3922 
3923     // We should be able to use the file.
3924     const int kSize = 200;
3925     char buffer1[kSize];
3926     char buffer2[kSize];
3927     memset(buffer1, 't', kSize);
3928     memset(buffer2, 0, kSize);
3929     EXPECT_TRUE(file->Write(buffer1, kSize, 0));
3930     EXPECT_TRUE(file->Read(buffer2, kSize, 0));
3931     EXPECT_EQ(0, memcmp(buffer1, buffer2, kSize));
3932   }
3933 
3934   base::File file(name, base::File::FLAG_OPEN | base::File::FLAG_READ);
3935   EXPECT_FALSE(file.IsValid());
3936   EXPECT_EQ(file.error_details(), base::File::FILE_ERROR_NOT_FOUND);
3937 }
3938 
TEST_F(DiskCacheBackendTest,UpdateRankForExternalCacheHit)3939 TEST_F(DiskCacheBackendTest, UpdateRankForExternalCacheHit) {
3940   InitCache();
3941 
3942   disk_cache::Entry* entry;
3943 
3944   for (int i = 0; i < 2; ++i) {
3945     std::string key = base::StringPrintf("key%d", i);
3946     ASSERT_THAT(CreateEntry(key, &entry), IsOk());
3947     entry->Close();
3948   }
3949 
3950   // Ping the oldest entry.
3951   OnExternalCacheHit("key0");
3952 
3953   TrimForTest(false);
3954 
3955   // Make sure the older key remains.
3956   EXPECT_EQ(1, cache_->GetEntryCount());
3957   ASSERT_THAT(OpenEntry("key0", &entry), IsOk());
3958   entry->Close();
3959 }
3960 
TEST_F(DiskCacheBackendTest,ShaderCacheUpdateRankForExternalCacheHit)3961 TEST_F(DiskCacheBackendTest, ShaderCacheUpdateRankForExternalCacheHit) {
3962   SetCacheType(net::SHADER_CACHE);
3963   InitCache();
3964 
3965   disk_cache::Entry* entry;
3966 
3967   for (int i = 0; i < 2; ++i) {
3968     std::string key = base::StringPrintf("key%d", i);
3969     ASSERT_THAT(CreateEntry(key, &entry), IsOk());
3970     entry->Close();
3971   }
3972 
3973   // Ping the oldest entry.
3974   OnExternalCacheHit("key0");
3975 
3976   TrimForTest(false);
3977 
3978   // Make sure the older key remains.
3979   EXPECT_EQ(1, cache_->GetEntryCount());
3980   ASSERT_THAT(OpenEntry("key0", &entry), IsOk());
3981   entry->Close();
3982 }
3983 
TEST_F(DiskCacheBackendTest,SimpleCacheShutdownWithPendingCreate)3984 TEST_F(DiskCacheBackendTest, SimpleCacheShutdownWithPendingCreate) {
3985   // Use net::APP_CACHE to make size estimations deterministic via
3986   // non-optimistic writes.
3987   SetCacheType(net::APP_CACHE);
3988   SetSimpleCacheMode();
3989   BackendShutdownWithPendingCreate(false);
3990 }
3991 
TEST_F(DiskCacheBackendTest,SimpleCacheShutdownWithPendingDoom)3992 TEST_F(DiskCacheBackendTest, SimpleCacheShutdownWithPendingDoom) {
3993   SetCacheType(net::APP_CACHE);
3994   SetSimpleCacheMode();
3995   BackendShutdownWithPendingDoom();
3996 }
3997 
TEST_F(DiskCacheBackendTest,SimpleCacheShutdownWithPendingFileIO)3998 TEST_F(DiskCacheBackendTest, SimpleCacheShutdownWithPendingFileIO) {
3999   SetCacheType(net::APP_CACHE);
4000   SetSimpleCacheMode();
4001   BackendShutdownWithPendingFileIO(false);
4002 }
4003 
TEST_F(DiskCacheBackendTest,SimpleCacheBasics)4004 TEST_F(DiskCacheBackendTest, SimpleCacheBasics) {
4005   SetSimpleCacheMode();
4006   BackendBasics();
4007 }
4008 
TEST_F(DiskCacheBackendTest,SimpleCacheAppCacheBasics)4009 TEST_F(DiskCacheBackendTest, SimpleCacheAppCacheBasics) {
4010   SetCacheType(net::APP_CACHE);
4011   SetSimpleCacheMode();
4012   BackendBasics();
4013 }
4014 
TEST_F(DiskCacheBackendTest,SimpleCacheKeying)4015 TEST_F(DiskCacheBackendTest, SimpleCacheKeying) {
4016   SetSimpleCacheMode();
4017   BackendKeying();
4018 }
4019 
TEST_F(DiskCacheBackendTest,SimpleCacheAppCacheKeying)4020 TEST_F(DiskCacheBackendTest, SimpleCacheAppCacheKeying) {
4021   SetSimpleCacheMode();
4022   SetCacheType(net::APP_CACHE);
4023   BackendKeying();
4024 }
4025 
TEST_F(DiskCacheBackendTest,SimpleCacheLoad)4026 TEST_F(DiskCacheBackendTest, SimpleCacheLoad) {
4027   SetMaxSize(0x100000);
4028   SetSimpleCacheMode();
4029   BackendLoad();
4030 }
4031 
TEST_F(DiskCacheBackendTest,SimpleCacheAppCacheLoad)4032 TEST_F(DiskCacheBackendTest, SimpleCacheAppCacheLoad) {
4033   SetCacheType(net::APP_CACHE);
4034   SetSimpleCacheMode();
4035   SetMaxSize(0x100000);
4036   BackendLoad();
4037 }
4038 
TEST_F(DiskCacheBackendTest,SimpleDoomRecent)4039 TEST_F(DiskCacheBackendTest, SimpleDoomRecent) {
4040   SetSimpleCacheMode();
4041   BackendDoomRecent();
4042 }
4043 
4044 // crbug.com/330926, crbug.com/370677
TEST_F(DiskCacheBackendTest,DISABLED_SimpleDoomBetween)4045 TEST_F(DiskCacheBackendTest, DISABLED_SimpleDoomBetween) {
4046   SetSimpleCacheMode();
4047   BackendDoomBetween();
4048 }
4049 
TEST_F(DiskCacheBackendTest,SimpleCacheDoomAll)4050 TEST_F(DiskCacheBackendTest, SimpleCacheDoomAll) {
4051   SetSimpleCacheMode();
4052   BackendDoomAll();
4053 }
4054 
TEST_F(DiskCacheBackendTest,SimpleCacheAppCacheOnlyDoomAll)4055 TEST_F(DiskCacheBackendTest, SimpleCacheAppCacheOnlyDoomAll) {
4056   SetCacheType(net::APP_CACHE);
4057   SetSimpleCacheMode();
4058   BackendDoomAll();
4059 }
4060 
TEST_F(DiskCacheBackendTest,SimpleCacheOpenMissingFile)4061 TEST_F(DiskCacheBackendTest, SimpleCacheOpenMissingFile) {
4062   SetSimpleCacheMode();
4063   InitCache();
4064 
4065   const char key[] = "the first key";
4066   disk_cache::Entry* entry = nullptr;
4067 
4068   ASSERT_THAT(CreateEntry(key, &entry), IsOk());
4069   ASSERT_TRUE(entry != nullptr);
4070   entry->Close();
4071   entry = nullptr;
4072 
4073   // To make sure the file creation completed we need to call open again so that
4074   // we block until it actually created the files.
4075   ASSERT_THAT(OpenEntry(key, &entry), IsOk());
4076   ASSERT_TRUE(entry != nullptr);
4077   entry->Close();
4078   entry = nullptr;
4079 
4080   // Delete one of the files in the entry.
4081   base::FilePath to_delete_file = cache_path_.AppendASCII(
4082       disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, 0));
4083   EXPECT_TRUE(base::PathExists(to_delete_file));
4084   EXPECT_TRUE(base::DeleteFile(to_delete_file));
4085 
4086   // Failing to open the entry should delete the rest of these files.
4087   ASSERT_THAT(OpenEntry(key, &entry), IsError(net::ERR_FAILED));
4088 
4089   // Confirm the rest of the files are gone.
4090   for (int i = 1; i < disk_cache::kSimpleEntryNormalFileCount; ++i) {
4091     base::FilePath should_be_gone_file(cache_path_.AppendASCII(
4092         disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, i)));
4093     EXPECT_FALSE(base::PathExists(should_be_gone_file));
4094   }
4095 }
4096 
TEST_F(DiskCacheBackendTest,SimpleCacheOpenBadFile)4097 TEST_F(DiskCacheBackendTest, SimpleCacheOpenBadFile) {
4098   SetSimpleCacheMode();
4099   InitCache();
4100 
4101   const char key[] = "the first key";
4102   disk_cache::Entry* entry = nullptr;
4103 
4104   ASSERT_THAT(CreateEntry(key, &entry), IsOk());
4105   disk_cache::Entry* null = nullptr;
4106   ASSERT_NE(null, entry);
4107   entry->Close();
4108   entry = nullptr;
4109 
4110   // To make sure the file creation completed we need to call open again so that
4111   // we block until it actually created the files.
4112   ASSERT_THAT(OpenEntry(key, &entry), IsOk());
4113   ASSERT_NE(null, entry);
4114   entry->Close();
4115   entry = nullptr;
4116 
4117   // The entry is being closed on the Simple Cache worker pool
4118   disk_cache::FlushCacheThreadForTesting();
4119   base::RunLoop().RunUntilIdle();
4120 
4121   // Write an invalid header for stream 0 and stream 1.
4122   base::FilePath entry_file1_path = cache_path_.AppendASCII(
4123       disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, 0));
4124 
4125   disk_cache::SimpleFileHeader header;
4126   header.initial_magic_number = UINT64_C(0xbadf00d);
4127   EXPECT_TRUE(base::WriteFile(entry_file1_path,
4128                               base::as_bytes(base::make_span(&header, 1u))));
4129   ASSERT_THAT(OpenEntry(key, &entry), IsError(net::ERR_FAILED));
4130 }
4131 
4132 // Tests that the Simple Cache Backend fails to initialize with non-matching
4133 // file structure on disk.
TEST_F(DiskCacheBackendTest,SimpleCacheOverBlockfileCache)4134 TEST_F(DiskCacheBackendTest, SimpleCacheOverBlockfileCache) {
4135   // Create a cache structure with the |BackendImpl|.
4136   InitCache();
4137   disk_cache::Entry* entry;
4138   const int kSize = 50;
4139   scoped_refptr<net::IOBuffer> buffer =
4140       base::MakeRefCounted<net::IOBuffer>(kSize);
4141   CacheTestFillBuffer(buffer->data(), kSize, false);
4142   ASSERT_THAT(CreateEntry("key", &entry), IsOk());
4143   ASSERT_EQ(0, WriteData(entry, 0, 0, buffer.get(), 0, false));
4144   entry->Close();
4145   ResetCaches();
4146 
4147   // Check that the |SimpleBackendImpl| does not favor this structure.
4148   auto simple_cache = std::make_unique<disk_cache::SimpleBackendImpl>(
4149       /*file_operations_factory=*/nullptr, cache_path_, nullptr, nullptr, 0,
4150       net::DISK_CACHE, nullptr);
4151   net::TestCompletionCallback cb;
4152   simple_cache->Init(cb.callback());
4153   EXPECT_NE(net::OK, cb.WaitForResult());
4154   simple_cache.reset();
4155   DisableIntegrityCheck();
4156 }
4157 
4158 // Tests that the |BackendImpl| refuses to initialize on top of the files
4159 // generated by the Simple Cache Backend.
TEST_F(DiskCacheBackendTest,BlockfileCacheOverSimpleCache)4160 TEST_F(DiskCacheBackendTest, BlockfileCacheOverSimpleCache) {
4161   // Create a cache structure with the |SimpleBackendImpl|.
4162   SetSimpleCacheMode();
4163   InitCache();
4164   disk_cache::Entry* entry;
4165   const int kSize = 50;
4166   scoped_refptr<net::IOBuffer> buffer =
4167       base::MakeRefCounted<net::IOBuffer>(kSize);
4168   CacheTestFillBuffer(buffer->data(), kSize, false);
4169   ASSERT_THAT(CreateEntry("key", &entry), IsOk());
4170   ASSERT_EQ(0, WriteData(entry, 0, 0, buffer.get(), 0, false));
4171   entry->Close();
4172   ResetCaches();
4173 
4174   // Check that the |BackendImpl| does not favor this structure.
4175   auto cache = std::make_unique<disk_cache::BackendImpl>(
4176       cache_path_, nullptr, nullptr, net::DISK_CACHE, nullptr);
4177   cache->SetUnitTestMode();
4178   net::TestCompletionCallback cb;
4179   cache->Init(cb.callback());
4180   EXPECT_NE(net::OK, cb.WaitForResult());
4181   cache.reset();
4182   DisableIntegrityCheck();
4183 }
4184 
TEST_F(DiskCacheBackendTest,SimpleCacheFixEnumerators)4185 TEST_F(DiskCacheBackendTest, SimpleCacheFixEnumerators) {
4186   SetSimpleCacheMode();
4187   BackendFixEnumerators();
4188 }
4189 
4190 // Tests basic functionality of the SimpleBackend implementation of the
4191 // enumeration API.
TEST_F(DiskCacheBackendTest,SimpleCacheEnumerationBasics)4192 TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationBasics) {
4193   SetSimpleCacheMode();
4194   InitCache();
4195   std::set<std::string> key_pool;
4196   ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool));
4197 
4198   // Check that enumeration returns all entries.
4199   std::set<std::string> keys_to_match(key_pool);
4200   std::unique_ptr<TestIterator> iter = CreateIterator();
4201   size_t count = 0;
4202   ASSERT_TRUE(EnumerateAndMatchKeys(-1, iter.get(), &keys_to_match, &count));
4203   iter.reset();
4204   EXPECT_EQ(key_pool.size(), count);
4205   EXPECT_TRUE(keys_to_match.empty());
4206 
4207   // Check that opening entries does not affect enumeration.
4208   keys_to_match = key_pool;
4209   iter = CreateIterator();
4210   count = 0;
4211   disk_cache::Entry* entry_opened_before;
4212   ASSERT_THAT(OpenEntry(*(key_pool.begin()), &entry_opened_before), IsOk());
4213   ASSERT_TRUE(EnumerateAndMatchKeys(key_pool.size() / 2, iter.get(),
4214                                     &keys_to_match, &count));
4215 
4216   disk_cache::Entry* entry_opened_middle;
4217   ASSERT_EQ(net::OK, OpenEntry(*(keys_to_match.begin()), &entry_opened_middle));
4218   ASSERT_TRUE(EnumerateAndMatchKeys(-1, iter.get(), &keys_to_match, &count));
4219   iter.reset();
4220   entry_opened_before->Close();
4221   entry_opened_middle->Close();
4222 
4223   EXPECT_EQ(key_pool.size(), count);
4224   EXPECT_TRUE(keys_to_match.empty());
4225 }
4226 
4227 // Tests that the enumerations are not affected by dooming an entry in the
4228 // middle.
TEST_F(DiskCacheBackendTest,SimpleCacheEnumerationWhileDoomed)4229 TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationWhileDoomed) {
4230   SetSimpleCacheMode();
4231   InitCache();
4232   std::set<std::string> key_pool;
4233   ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool));
4234 
4235   // Check that enumeration returns all entries but the doomed one.
4236   std::set<std::string> keys_to_match(key_pool);
4237   std::unique_ptr<TestIterator> iter = CreateIterator();
4238   size_t count = 0;
4239   ASSERT_TRUE(EnumerateAndMatchKeys(key_pool.size() / 2, iter.get(),
4240                                     &keys_to_match, &count));
4241 
4242   std::string key_to_delete = *(keys_to_match.begin());
4243   DoomEntry(key_to_delete);
4244   keys_to_match.erase(key_to_delete);
4245   key_pool.erase(key_to_delete);
4246   ASSERT_TRUE(EnumerateAndMatchKeys(-1, iter.get(), &keys_to_match, &count));
4247   iter.reset();
4248 
4249   EXPECT_EQ(key_pool.size(), count);
4250   EXPECT_TRUE(keys_to_match.empty());
4251 }
4252 
4253 // Tests that enumerations are not affected by corrupt files.
TEST_F(DiskCacheBackendTest,SimpleCacheEnumerationCorruption)4254 TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationCorruption) {
4255   SetSimpleCacheMode();
4256   InitCache();
4257   // Create a corrupt entry.
4258   const std::string key = "the key";
4259   disk_cache::Entry* corrupted_entry;
4260 
4261   ASSERT_THAT(CreateEntry(key, &corrupted_entry), IsOk());
4262   ASSERT_TRUE(corrupted_entry);
4263   const int kSize = 50;
4264   scoped_refptr<net::IOBuffer> buffer =
4265       base::MakeRefCounted<net::IOBuffer>(kSize);
4266   CacheTestFillBuffer(buffer->data(), kSize, false);
4267   ASSERT_EQ(kSize,
4268             WriteData(corrupted_entry, 0, 0, buffer.get(), kSize, false));
4269   ASSERT_EQ(kSize, ReadData(corrupted_entry, 0, 0, buffer.get(), kSize));
4270   corrupted_entry->Close();
4271   // Let all I/O finish so it doesn't race with corrupting the file below.
4272   RunUntilIdle();
4273 
4274   std::set<std::string> key_pool;
4275   ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool));
4276 
4277   EXPECT_TRUE(
4278       disk_cache::simple_util::CreateCorruptFileForTests(key, cache_path_));
4279   EXPECT_EQ(key_pool.size() + 1, static_cast<size_t>(cache_->GetEntryCount()));
4280 
4281   // Check that enumeration returns all entries but the corrupt one.
4282   std::set<std::string> keys_to_match(key_pool);
4283   std::unique_ptr<TestIterator> iter = CreateIterator();
4284   size_t count = 0;
4285   ASSERT_TRUE(EnumerateAndMatchKeys(-1, iter.get(), &keys_to_match, &count));
4286   iter.reset();
4287 
4288   EXPECT_EQ(key_pool.size(), count);
4289   EXPECT_TRUE(keys_to_match.empty());
4290 }
4291 
4292 // Tests that enumerations don't leak memory when the backend is destructed
4293 // mid-enumeration.
TEST_F(DiskCacheBackendTest,SimpleCacheEnumerationDestruction)4294 TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationDestruction) {
4295   SetSimpleCacheMode();
4296   InitCache();
4297   std::set<std::string> key_pool;
4298   ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool));
4299 
4300   std::unique_ptr<TestIterator> iter = CreateIterator();
4301   disk_cache::Entry* entry = nullptr;
4302   ASSERT_THAT(iter->OpenNextEntry(&entry), IsOk());
4303   EXPECT_TRUE(entry);
4304   disk_cache::ScopedEntryPtr entry_closer(entry);
4305 
4306   ResetCaches();
4307   // This test passes if we don't leak memory.
4308 }
4309 
4310 // Verify that tasks run in priority order when the experiment is enabled.
4311 // Test has races, disabling until fixed: https://crbug.com/853283
TEST_F(DiskCacheBackendTest,DISABLED_SimpleCachePrioritizedEntryOrder)4312 TEST_F(DiskCacheBackendTest, DISABLED_SimpleCachePrioritizedEntryOrder) {
4313   base::test::ScopedFeatureList scoped_feature_list;
4314   SetSimpleCacheMode();
4315   InitCache();
4316 
4317   // Set the SimpleCache's worker pool to a sequenced type for testing
4318   // priority order.
4319   disk_cache::SimpleBackendImpl* simple_cache =
4320       static_cast<disk_cache::SimpleBackendImpl*>(cache_.get());
4321   auto task_runner = base::ThreadPool::CreateSequencedTaskRunner(
4322       {base::TaskPriority::USER_VISIBLE, base::MayBlock()});
4323   simple_cache->SetTaskRunnerForTesting(task_runner);
4324 
4325   // Create three entries. Priority order is 3, 1, 2 because 3 has the highest
4326   // request priority and 1 is created before 2.
4327   disk_cache::Entry* entry1 = nullptr;
4328   disk_cache::Entry* entry2 = nullptr;
4329   disk_cache::Entry* entry3 = nullptr;
4330   ASSERT_THAT(CreateEntryWithPriority("first", net::LOWEST, &entry1), IsOk());
4331   ASSERT_THAT(CreateEntryWithPriority("second", net::LOWEST, &entry2), IsOk());
4332   ASSERT_THAT(CreateEntryWithPriority("third", net::HIGHEST, &entry3), IsOk());
4333 
4334   // Write some data to the entries.
4335   const int kSize = 10;
4336   scoped_refptr<net::IOBuffer> buf1 =
4337       base::MakeRefCounted<net::IOBuffer>(kSize);
4338   scoped_refptr<net::IOBuffer> buf2 =
4339       base::MakeRefCounted<net::IOBuffer>(kSize);
4340   scoped_refptr<net::IOBuffer> buf3 =
4341       base::MakeRefCounted<net::IOBuffer>(kSize);
4342   CacheTestFillBuffer(buf1->data(), kSize, false);
4343   CacheTestFillBuffer(buf2->data(), kSize, false);
4344   CacheTestFillBuffer(buf3->data(), kSize, false);
4345 
4346   // Write to stream 2 because it's the only stream that can't be read from
4347   // synchronously.
4348   EXPECT_EQ(kSize, WriteData(entry1, 2, 0, buf1.get(), kSize, true));
4349   EXPECT_EQ(kSize, WriteData(entry2, 2, 0, buf1.get(), kSize, true));
4350   EXPECT_EQ(kSize, WriteData(entry3, 2, 0, buf1.get(), kSize, true));
4351 
4352   // Wait until the task_runner's queue is empty (WriteData might have
4353   // optimistically returned synchronously but still had some tasks to run in
4354   // the worker pool.
4355   base::RunLoop run_loop;
4356   task_runner->PostTaskAndReply(FROM_HERE, base::DoNothing(),
4357                                 run_loop.QuitClosure());
4358   run_loop.Run();
4359 
4360   std::vector<int> finished_read_order;
4361   auto finished_callback = [](std::vector<int>* finished_read_order,
4362                               int entry_number, base::OnceClosure quit_closure,
4363                               int rv) {
4364     finished_read_order->push_back(entry_number);
4365     if (quit_closure)
4366       std::move(quit_closure).Run();
4367   };
4368 
4369   scoped_refptr<net::IOBuffer> read_buf1 =
4370       base::MakeRefCounted<net::IOBuffer>(kSize);
4371   scoped_refptr<net::IOBuffer> read_buf2 =
4372       base::MakeRefCounted<net::IOBuffer>(kSize);
4373   scoped_refptr<net::IOBuffer> read_buf3 =
4374       base::MakeRefCounted<net::IOBuffer>(kSize);
4375 
4376   // Read from the entries in order 2, 3, 1. They should be reprioritized to
4377   // 3, 1, 2.
4378   base::RunLoop read_run_loop;
4379 
4380   entry2->ReadData(2, 0, read_buf2.get(), kSize,
4381                    base::BindOnce(finished_callback, &finished_read_order, 2,
4382                                   read_run_loop.QuitClosure()));
4383   entry3->ReadData(2, 0, read_buf3.get(), kSize,
4384                    base::BindOnce(finished_callback, &finished_read_order, 3,
4385                                   base::OnceClosure()));
4386   entry1->ReadData(2, 0, read_buf1.get(), kSize,
4387                    base::BindOnce(finished_callback, &finished_read_order, 1,
4388                                   base::OnceClosure()));
4389   EXPECT_EQ(0u, finished_read_order.size());
4390 
4391   read_run_loop.Run();
4392   EXPECT_EQ((std::vector<int>{3, 1, 2}), finished_read_order);
4393   entry1->Close();
4394   entry2->Close();
4395   entry3->Close();
4396 }
4397 
4398 // Tests that enumerations include entries with long keys.
TEST_F(DiskCacheBackendTest,SimpleCacheEnumerationLongKeys)4399 TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationLongKeys) {
4400   SetSimpleCacheMode();
4401   InitCache();
4402   std::set<std::string> key_pool;
4403   ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool));
4404 
4405   const size_t long_key_length =
4406       disk_cache::SimpleSynchronousEntry::kInitialHeaderRead + 10;
4407   std::string long_key(long_key_length, 'X');
4408   key_pool.insert(long_key);
4409   disk_cache::Entry* entry = nullptr;
4410   ASSERT_THAT(CreateEntry(long_key.c_str(), &entry), IsOk());
4411   entry->Close();
4412 
4413   std::unique_ptr<TestIterator> iter = CreateIterator();
4414   size_t count = 0;
4415   EXPECT_TRUE(EnumerateAndMatchKeys(-1, iter.get(), &key_pool, &count));
4416   EXPECT_TRUE(key_pool.empty());
4417 }
4418 
4419 // Tests that a SimpleCache doesn't crash when files are deleted very quickly
4420 // after closing.
4421 // NOTE: IF THIS TEST IS FLAKY THEN IT IS FAILING. See https://crbug.com/416940
TEST_F(DiskCacheBackendTest,SimpleCacheDeleteQuickly)4422 TEST_F(DiskCacheBackendTest, SimpleCacheDeleteQuickly) {
4423   SetSimpleCacheMode();
4424   for (int i = 0; i < 100; ++i) {
4425     InitCache();
4426     ResetCaches();
4427     EXPECT_TRUE(CleanupCacheDir());
4428   }
4429 }
4430 
TEST_F(DiskCacheBackendTest,SimpleCacheLateDoom)4431 TEST_F(DiskCacheBackendTest, SimpleCacheLateDoom) {
4432   SetSimpleCacheMode();
4433   InitCache();
4434 
4435   disk_cache::Entry *entry1, *entry2;
4436   ASSERT_THAT(CreateEntry("first", &entry1), IsOk());
4437   ASSERT_THAT(CreateEntry("second", &entry2), IsOk());
4438   entry1->Close();
4439 
4440   // Ensure that the directory mtime is flushed to disk before serializing the
4441   // index.
4442   disk_cache::FlushCacheThreadForTesting();
4443 #if BUILDFLAG(IS_POSIX)
4444   base::File cache_dir(cache_path_,
4445                        base::File::FLAG_OPEN | base::File::FLAG_READ);
4446   EXPECT_TRUE(cache_dir.Flush());
4447 #endif  // BUILDFLAG(IS_POSIX)
4448   ResetCaches();
4449   disk_cache::FlushCacheThreadForTesting();
4450 
4451   // The index is now written. Dooming the last entry can't delete a file,
4452   // because that would advance the cache directory mtime and invalidate the
4453   // index.
4454   entry2->Doom();
4455   entry2->Close();
4456 
4457   DisableFirstCleanup();
4458   InitCache();
4459   EXPECT_EQ(disk_cache::SimpleIndex::INITIALIZE_METHOD_LOADED,
4460             simple_cache_impl_->index()->init_method());
4461 }
4462 
TEST_F(DiskCacheBackendTest,SimpleCacheNegMaxSize)4463 TEST_F(DiskCacheBackendTest, SimpleCacheNegMaxSize) {
4464   SetMaxSize(-1);
4465   SetSimpleCacheMode();
4466   InitCache();
4467   // We don't know what it will pick, but it's limited to what
4468   // disk_cache::PreferredCacheSize would return, scaled by the size experiment,
4469   // which only goes as much as 4x. It definitely should not be MAX_UINT64.
4470   EXPECT_NE(simple_cache_impl_->index()->max_size(),
4471             std::numeric_limits<uint64_t>::max());
4472 
4473   int max_default_size =
4474       2 * disk_cache::PreferredCacheSize(std::numeric_limits<int32_t>::max());
4475 
4476   ASSERT_GE(max_default_size, 0);
4477   EXPECT_LT(simple_cache_impl_->index()->max_size(),
4478             static_cast<unsigned>(max_default_size));
4479 
4480   uint64_t max_size_without_scaling = simple_cache_impl_->index()->max_size();
4481 
4482   // Scale to 200%. The size should be twice of |max_size_without_scaling| but
4483   // since that's capped on 20% of available size, checking for the size to be
4484   // between max_size_without_scaling and max_size_without_scaling*2.
4485   {
4486     base::test::ScopedFeatureList scoped_feature_list;
4487     std::map<std::string, std::string> field_trial_params;
4488     field_trial_params["percent_relative_size"] = "200";
4489     scoped_feature_list.InitAndEnableFeatureWithParameters(
4490         disk_cache::kChangeDiskCacheSizeExperiment, field_trial_params);
4491 
4492     InitCache();
4493 
4494     uint64_t max_size_scaled = simple_cache_impl_->index()->max_size();
4495 
4496     EXPECT_GE(max_size_scaled, max_size_without_scaling);
4497     EXPECT_LE(max_size_scaled, 2 * max_size_without_scaling);
4498   }
4499 }
4500 
TEST_F(DiskCacheBackendTest,SimpleLastModified)4501 TEST_F(DiskCacheBackendTest, SimpleLastModified) {
4502   // Simple cache used to incorrectly set LastModified on entries based on
4503   // timestamp of the cache directory, and not the entries' file
4504   // (https://crbug.com/714143). So this test arranges for a situation
4505   // where this would occur by doing:
4506   // 1) Write entry 1
4507   // 2) Delay
4508   // 3) Write entry 2. This sets directory time stamp to be different from
4509   //    timestamp of entry 1 (due to the delay)
4510   // It then checks whether the entry 1 got the proper timestamp or not.
4511 
4512   SetSimpleCacheMode();
4513   InitCache();
4514   std::string key1 = GenerateKey(true);
4515   std::string key2 = GenerateKey(true);
4516 
4517   disk_cache::Entry* entry1;
4518   ASSERT_THAT(CreateEntry(key1, &entry1), IsOk());
4519 
4520   // Make the Create complete --- SimpleCache can handle it optimistically,
4521   // and if we let it go fully async then trying to flush the Close might just
4522   // flush the Create.
4523   disk_cache::FlushCacheThreadForTesting();
4524   base::RunLoop().RunUntilIdle();
4525 
4526   entry1->Close();
4527 
4528   // Make the ::Close actually complete, since it is asynchronous.
4529   disk_cache::FlushCacheThreadForTesting();
4530   base::RunLoop().RunUntilIdle();
4531 
4532   Time entry1_timestamp = Time::NowFromSystemTime();
4533 
4534   // Don't want AddDelay since it sleep 1s(!) for SimpleCache, and we don't
4535   // care about reduced precision in index here.
4536   while (base::Time::NowFromSystemTime() <=
4537          (entry1_timestamp + base::Milliseconds(10))) {
4538     base::PlatformThread::Sleep(base::Milliseconds(1));
4539   }
4540 
4541   disk_cache::Entry* entry2;
4542   ASSERT_THAT(CreateEntry(key2, &entry2), IsOk());
4543   entry2->Close();
4544   disk_cache::FlushCacheThreadForTesting();
4545   base::RunLoop().RunUntilIdle();
4546 
4547   disk_cache::Entry* reopen_entry1;
4548   ASSERT_THAT(OpenEntry(key1, &reopen_entry1), IsOk());
4549 
4550   // This shouldn't pick up entry2's write time incorrectly.
4551   EXPECT_LE(reopen_entry1->GetLastModified(), entry1_timestamp);
4552   reopen_entry1->Close();
4553 }
4554 
TEST_F(DiskCacheBackendTest,SimpleFdLimit)4555 TEST_F(DiskCacheBackendTest, SimpleFdLimit) {
4556   base::HistogramTester histogram_tester;
4557   SetSimpleCacheMode();
4558   // Make things blocking so CreateEntry actually waits for file to be
4559   // created.
4560   SetCacheType(net::APP_CACHE);
4561   InitCache();
4562 
4563   disk_cache::Entry* entries[kLargeNumEntries];
4564   std::string keys[kLargeNumEntries];
4565   for (int i = 0; i < kLargeNumEntries; ++i) {
4566     keys[i] = GenerateKey(true);
4567     ASSERT_THAT(CreateEntry(keys[i], &entries[i]), IsOk());
4568   }
4569 
4570   // Note the fixture sets the file limit to 64.
4571   histogram_tester.ExpectBucketCount("SimpleCache.FileDescriptorLimiterAction",
4572                                      disk_cache::FD_LIMIT_CLOSE_FILE,
4573                                      kLargeNumEntries - 64);
4574   histogram_tester.ExpectBucketCount("SimpleCache.FileDescriptorLimiterAction",
4575                                      disk_cache::FD_LIMIT_REOPEN_FILE, 0);
4576   histogram_tester.ExpectBucketCount("SimpleCache.FileDescriptorLimiterAction",
4577                                      disk_cache::FD_LIMIT_FAIL_REOPEN_FILE, 0);
4578 
4579   const int kSize = 25000;
4580   scoped_refptr<net::IOBuffer> buf1 =
4581       base::MakeRefCounted<net::IOBuffer>(kSize);
4582   CacheTestFillBuffer(buf1->data(), kSize, false);
4583 
4584   scoped_refptr<net::IOBuffer> buf2 =
4585       base::MakeRefCounted<net::IOBuffer>(kSize);
4586   CacheTestFillBuffer(buf2->data(), kSize, false);
4587 
4588   // Doom an entry and create a new one with same name, to test that both
4589   // re-open properly.
4590   EXPECT_EQ(net::OK, DoomEntry(keys[0]));
4591   disk_cache::Entry* alt_entry;
4592   ASSERT_THAT(CreateEntry(keys[0], &alt_entry), IsOk());
4593 
4594   // One more file closure here to accommodate for alt_entry.
4595   histogram_tester.ExpectBucketCount("SimpleCache.FileDescriptorLimiterAction",
4596                                      disk_cache::FD_LIMIT_CLOSE_FILE,
4597                                      kLargeNumEntries - 64 + 1);
4598   histogram_tester.ExpectBucketCount("SimpleCache.FileDescriptorLimiterAction",
4599                                      disk_cache::FD_LIMIT_REOPEN_FILE, 0);
4600   histogram_tester.ExpectBucketCount("SimpleCache.FileDescriptorLimiterAction",
4601                                      disk_cache::FD_LIMIT_FAIL_REOPEN_FILE, 0);
4602 
4603   // Do some writes in [1...kLargeNumEntries) range, both testing bring those in
4604   // and kicking out [0] and [alt_entry]. These have to be to stream != 0 to
4605   // actually need files.
4606   for (int i = 1; i < kLargeNumEntries; ++i) {
4607     EXPECT_EQ(kSize, WriteData(entries[i], 1, 0, buf1.get(), kSize, true));
4608     scoped_refptr<net::IOBuffer> read_buf =
4609         base::MakeRefCounted<net::IOBuffer>(kSize);
4610     ASSERT_EQ(kSize, ReadData(entries[i], 1, 0, read_buf.get(), kSize));
4611     EXPECT_EQ(0, memcmp(read_buf->data(), buf1->data(), kSize));
4612   }
4613 
4614   histogram_tester.ExpectBucketCount(
4615       "SimpleCache.FileDescriptorLimiterAction",
4616       disk_cache::FD_LIMIT_CLOSE_FILE,
4617       kLargeNumEntries - 64 + 1 + kLargeNumEntries - 1);
4618   histogram_tester.ExpectBucketCount("SimpleCache.FileDescriptorLimiterAction",
4619                                      disk_cache::FD_LIMIT_REOPEN_FILE,
4620                                      kLargeNumEntries - 1);
4621   histogram_tester.ExpectBucketCount("SimpleCache.FileDescriptorLimiterAction",
4622                                      disk_cache::FD_LIMIT_FAIL_REOPEN_FILE, 0);
4623   EXPECT_EQ(kSize, WriteData(entries[0], 1, 0, buf1.get(), kSize, true));
4624   EXPECT_EQ(kSize, WriteData(alt_entry, 1, 0, buf2.get(), kSize, true));
4625 
4626   scoped_refptr<net::IOBuffer> read_buf =
4627       base::MakeRefCounted<net::IOBuffer>(kSize);
4628   ASSERT_EQ(kSize, ReadData(entries[0], 1, 0, read_buf.get(), kSize));
4629   EXPECT_EQ(0, memcmp(read_buf->data(), buf1->data(), kSize));
4630 
4631   scoped_refptr<net::IOBuffer> read_buf2 =
4632       base::MakeRefCounted<net::IOBuffer>(kSize);
4633   ASSERT_EQ(kSize, ReadData(alt_entry, 1, 0, read_buf2.get(), kSize));
4634   EXPECT_EQ(0, memcmp(read_buf2->data(), buf2->data(), kSize));
4635 
4636   // Two more things than last time --- entries[0] and |alt_entry|
4637   histogram_tester.ExpectBucketCount(
4638       "SimpleCache.FileDescriptorLimiterAction",
4639       disk_cache::FD_LIMIT_CLOSE_FILE,
4640       kLargeNumEntries - 64 + 1 + kLargeNumEntries - 1 + 2);
4641   histogram_tester.ExpectBucketCount("SimpleCache.FileDescriptorLimiterAction",
4642                                      disk_cache::FD_LIMIT_REOPEN_FILE,
4643                                      kLargeNumEntries + 1);
4644   histogram_tester.ExpectBucketCount("SimpleCache.FileDescriptorLimiterAction",
4645                                      disk_cache::FD_LIMIT_FAIL_REOPEN_FILE, 0);
4646 
4647   for (auto* entry : entries) {
4648     entry->Close();
4649     RunUntilIdle();
4650   }
4651   alt_entry->Close();
4652   RunUntilIdle();
4653 
4654   // Closes have to pull things in to write out the footer, but they also
4655   // free up FDs.
4656   histogram_tester.ExpectBucketCount(
4657       "SimpleCache.FileDescriptorLimiterAction",
4658       disk_cache::FD_LIMIT_CLOSE_FILE,
4659       kLargeNumEntries - 64 + 1 + kLargeNumEntries - 1 + 2);
4660   histogram_tester.ExpectBucketCount(
4661       "SimpleCache.FileDescriptorLimiterAction",
4662       disk_cache::FD_LIMIT_REOPEN_FILE,
4663       kLargeNumEntries - 64 + 1 + kLargeNumEntries - 1 + 2);
4664   histogram_tester.ExpectBucketCount("SimpleCache.FileDescriptorLimiterAction",
4665                                      disk_cache::FD_LIMIT_FAIL_REOPEN_FILE, 0);
4666 }
4667 
TEST_F(DiskCacheBackendTest,SparseEvict)4668 TEST_F(DiskCacheBackendTest, SparseEvict) {
4669   const int kMaxSize = 512;
4670 
4671   SetMaxSize(kMaxSize);
4672   InitCache();
4673 
4674   scoped_refptr<net::IOBuffer> buffer = base::MakeRefCounted<net::IOBuffer>(64);
4675   CacheTestFillBuffer(buffer->data(), 64, false);
4676 
4677   disk_cache::Entry* entry0 = nullptr;
4678   ASSERT_THAT(CreateEntry("http://www.0.com/", &entry0), IsOk());
4679 
4680   disk_cache::Entry* entry1 = nullptr;
4681   ASSERT_THAT(CreateEntry("http://www.1.com/", &entry1), IsOk());
4682 
4683   disk_cache::Entry* entry2 = nullptr;
4684   // This strange looking domain name affects cache trim order
4685   // due to hashing
4686   ASSERT_THAT(CreateEntry("http://www.15360.com/", &entry2), IsOk());
4687 
4688   // Write sparse data to put us over the eviction threshold
4689   ASSERT_EQ(64, WriteSparseData(entry0, 0, buffer.get(), 64));
4690   ASSERT_EQ(1, WriteSparseData(entry0, 67108923, buffer.get(), 1));
4691   ASSERT_EQ(1, WriteSparseData(entry1, 53, buffer.get(), 1));
4692   ASSERT_EQ(1, WriteSparseData(entry2, 0, buffer.get(), 1));
4693 
4694   // Closing these in a special order should not lead to buggy reentrant
4695   // eviction.
4696   entry1->Close();
4697   entry2->Close();
4698   entry0->Close();
4699 }
4700 
TEST_F(DiskCacheBackendTest,InMemorySparseDoom)4701 TEST_F(DiskCacheBackendTest, InMemorySparseDoom) {
4702   const int kMaxSize = 512;
4703 
4704   SetMaxSize(kMaxSize);
4705   SetMemoryOnlyMode();
4706   InitCache();
4707 
4708   scoped_refptr<net::IOBuffer> buffer = base::MakeRefCounted<net::IOBuffer>(64);
4709   CacheTestFillBuffer(buffer->data(), 64, false);
4710 
4711   disk_cache::Entry* entry = nullptr;
4712   ASSERT_THAT(CreateEntry("http://www.0.com/", &entry), IsOk());
4713 
4714   ASSERT_EQ(net::ERR_FAILED, WriteSparseData(entry, 4337, buffer.get(), 64));
4715   entry->Close();
4716 
4717   // Dooming all entries at this point should properly iterate over
4718   // the parent and its children
4719   DoomAllEntries();
4720 }
4721 
TEST_F(DiskCacheBackendTest,BlockFileMaxSizeLimit)4722 TEST_F(DiskCacheBackendTest, BlockFileMaxSizeLimit) {
4723   InitCache();
4724 
4725   int64_t size = std::numeric_limits<int32_t>::max();
4726   SetMaxSize(size, true /* should_succeed */);
4727 
4728   size += 1;
4729   SetMaxSize(size, false /* should_succeed */);
4730 }
4731 
TEST_F(DiskCacheBackendTest,InMemoryMaxSizeLimit)4732 TEST_F(DiskCacheBackendTest, InMemoryMaxSizeLimit) {
4733   SetMemoryOnlyMode();
4734   InitCache();
4735 
4736   int64_t size = std::numeric_limits<int32_t>::max();
4737   SetMaxSize(size, true /* should_succeed */);
4738 
4739   size += 1;
4740   SetMaxSize(size, false /* should_succeed */);
4741 }
4742 
TEST_F(DiskCacheBackendTest,SimpleMaxSizeLimit)4743 TEST_F(DiskCacheBackendTest, SimpleMaxSizeLimit) {
4744   SetSimpleCacheMode();
4745   InitCache();
4746 
4747   int64_t size = std::numeric_limits<int32_t>::max();
4748   SetMaxSize(size, true /* should_succeed */);
4749 
4750   size += 1;
4751   SetMaxSize(size, true /* should_succeed */);
4752 }
4753 
BackendOpenOrCreateEntry()4754 void DiskCacheBackendTest::BackendOpenOrCreateEntry() {
4755   // Avoid the weird kNoRandom flag on blockfile, since this needs to
4756   // test cleanup behavior actually used in production.
4757   if (memory_only_) {
4758     InitCache();
4759   } else {
4760     CleanupCacheDir();
4761     // Since we're not forcing a clean shutdown, integrity check may fail.
4762     DisableIntegrityCheck();
4763     CreateBackend(disk_cache::kNone);
4764   }
4765 
4766   // Test that new key is created.
4767   disk_cache::EntryResult es1 = OpenOrCreateEntry("first");
4768   ASSERT_THAT(es1.net_error(), IsOk());
4769   ASSERT_FALSE(es1.opened());
4770   disk_cache::Entry* e1 = es1.ReleaseEntry();
4771   ASSERT_TRUE(nullptr != e1);
4772 
4773   // Test that existing key is opened and its entry matches.
4774   disk_cache::EntryResult es2 = OpenOrCreateEntry("first");
4775   ASSERT_THAT(es2.net_error(), IsOk());
4776   ASSERT_TRUE(es2.opened());
4777   disk_cache::Entry* e2 = es2.ReleaseEntry();
4778   ASSERT_TRUE(nullptr != e2);
4779   ASSERT_EQ(e1, e2);
4780 
4781   // Test that different keys' entries are not the same.
4782   disk_cache::EntryResult es3 = OpenOrCreateEntry("second");
4783   ASSERT_THAT(es3.net_error(), IsOk());
4784   ASSERT_FALSE(es3.opened());
4785   disk_cache::Entry* e3 = es3.ReleaseEntry();
4786   ASSERT_TRUE(nullptr != e3);
4787   ASSERT_NE(e3, e1);
4788 
4789   // Test that a new entry can be created with the same key as a doomed entry.
4790   e3->Doom();
4791   disk_cache::EntryResult es4 = OpenOrCreateEntry("second");
4792   ASSERT_THAT(es4.net_error(), IsOk());
4793   ASSERT_FALSE(es4.opened());
4794   disk_cache::Entry* e4 = es4.ReleaseEntry();
4795   ASSERT_TRUE(nullptr != e4);
4796   ASSERT_NE(e4, e3);
4797 
4798   // Verify the expected number of entries
4799   ASSERT_EQ(2, cache_->GetEntryCount());
4800 
4801   e1->Close();
4802   e2->Close();
4803   e3->Close();
4804   e4->Close();
4805 
4806   // Test proper cancellation of callback. In-memory cache
4807   // is always synchronous, so this isn't' meaningful for it.
4808   if (!memory_only_) {
4809     TestEntryResultCompletionCallback callback;
4810 
4811     // Using "first" here:
4812     // 1) It's an existing entry, so SimpleCache can't cheat with an optimistic
4813     //    create.
4814     // 2) "second"'s creation is a cheated post-doom create one, which also
4815     //    makes testing trickier.
4816     EntryResult result =
4817         cache_->OpenOrCreateEntry("first", net::HIGHEST, callback.callback());
4818     ASSERT_EQ(net::ERR_IO_PENDING, result.net_error());
4819     cache_ = nullptr;
4820 
4821     // Callback is supposed to be cancelled, so have to flush everything
4822     // to check for any trouble.
4823     disk_cache::FlushCacheThreadForTesting();
4824     RunUntilIdle();
4825     EXPECT_FALSE(callback.have_result());
4826   }
4827 }
4828 
TEST_F(DiskCacheBackendTest,InMemoryOnlyOpenOrCreateEntry)4829 TEST_F(DiskCacheBackendTest, InMemoryOnlyOpenOrCreateEntry) {
4830   SetMemoryOnlyMode();
4831   BackendOpenOrCreateEntry();
4832 }
4833 
TEST_F(DiskCacheBackendTest,MAYBE_BlockFileOpenOrCreateEntry)4834 TEST_F(DiskCacheBackendTest, MAYBE_BlockFileOpenOrCreateEntry) {
4835   BackendOpenOrCreateEntry();
4836 }
4837 
TEST_F(DiskCacheBackendTest,MAYBE_SimpleOpenOrCreateEntry)4838 TEST_F(DiskCacheBackendTest, MAYBE_SimpleOpenOrCreateEntry) {
4839   SetSimpleCacheMode();
4840   BackendOpenOrCreateEntry();
4841 }
4842 
BackendDeadOpenNextEntry()4843 void DiskCacheBackendTest::BackendDeadOpenNextEntry() {
4844   InitCache();
4845   std::unique_ptr<disk_cache::Backend::Iterator> iter =
4846       cache_->CreateIterator();
4847   ResetCaches();
4848   EntryResult result = iter->OpenNextEntry(base::DoNothing());
4849   ASSERT_EQ(net::ERR_FAILED, result.net_error());
4850 }
4851 
TEST_F(DiskCacheBackendTest,BlockFileBackendDeadOpenNextEntry)4852 TEST_F(DiskCacheBackendTest, BlockFileBackendDeadOpenNextEntry) {
4853   BackendDeadOpenNextEntry();
4854 }
4855 
TEST_F(DiskCacheBackendTest,SimpleBackendDeadOpenNextEntry)4856 TEST_F(DiskCacheBackendTest, SimpleBackendDeadOpenNextEntry) {
4857   SetSimpleCacheMode();
4858   BackendDeadOpenNextEntry();
4859 }
4860 
TEST_F(DiskCacheBackendTest,InMemorySimpleBackendDeadOpenNextEntry)4861 TEST_F(DiskCacheBackendTest, InMemorySimpleBackendDeadOpenNextEntry) {
4862   SetMemoryOnlyMode();
4863   BackendDeadOpenNextEntry();
4864 }
4865 
BackendIteratorConcurrentDoom()4866 void DiskCacheBackendTest::BackendIteratorConcurrentDoom() {
4867   disk_cache::Entry* entry1 = nullptr;
4868   disk_cache::Entry* entry2 = nullptr;
4869   EXPECT_EQ(net::OK, CreateEntry("Key0", &entry1));
4870   EXPECT_EQ(net::OK, CreateEntry("Key1", &entry2));
4871 
4872   std::unique_ptr<disk_cache::Backend::Iterator> iter =
4873       cache_->CreateIterator();
4874 
4875   disk_cache::Entry* entry3 = nullptr;
4876   EXPECT_EQ(net::OK, OpenEntry("Key0", &entry3));
4877 
4878   TestEntryResultCompletionCallback cb;
4879   EntryResult result_iter = iter->OpenNextEntry(cb.callback());
4880   result_iter = cb.GetResult(std::move(result_iter));
4881   EXPECT_EQ(net::OK, result_iter.net_error());
4882 
4883   net::TestCompletionCallback cb_doom;
4884   int rv_doom = cache_->DoomAllEntries(cb_doom.callback());
4885   EXPECT_EQ(net::OK, cb_doom.GetResult(rv_doom));
4886 
4887   TestEntryResultCompletionCallback cb2;
4888   EntryResult result_iter2 = iter->OpenNextEntry(cb2.callback());
4889   result_iter2 = cb2.GetResult(std::move(result_iter2));
4890 
4891   EXPECT_TRUE(result_iter2.net_error() == net::ERR_FAILED ||
4892               result_iter2.net_error() == net::OK);
4893 
4894   entry1->Close();
4895   entry2->Close();
4896   entry3->Close();
4897 }
4898 
TEST_F(DiskCacheBackendTest,BlockFileIteratorConcurrentDoom)4899 TEST_F(DiskCacheBackendTest, BlockFileIteratorConcurrentDoom) {
4900   // Init in normal mode, bug not reproducible with kNoRandom. Still need to
4901   // let the test fixture know the new eviction algorithm will be on.
4902   CleanupCacheDir();
4903   SetNewEviction();
4904   CreateBackend(disk_cache::kNone);
4905   BackendIteratorConcurrentDoom();
4906 }
4907 
TEST_F(DiskCacheBackendTest,SimpleIteratorConcurrentDoom)4908 TEST_F(DiskCacheBackendTest, SimpleIteratorConcurrentDoom) {
4909   SetSimpleCacheMode();
4910   InitCache();
4911   BackendIteratorConcurrentDoom();
4912 }
4913 
TEST_F(DiskCacheBackendTest,InMemoryConcurrentDoom)4914 TEST_F(DiskCacheBackendTest, InMemoryConcurrentDoom) {
4915   SetMemoryOnlyMode();
4916   InitCache();
4917   BackendIteratorConcurrentDoom();
4918 }
4919 
TEST_F(DiskCacheBackendTest,EmptyCorruptSimpleCacheRecovery)4920 TEST_F(DiskCacheBackendTest, EmptyCorruptSimpleCacheRecovery) {
4921   SetSimpleCacheMode();
4922 
4923   const std::string kCorruptData("corrupted");
4924 
4925   // Create a corrupt fake index in an otherwise empty simple cache.
4926   ASSERT_TRUE(base::PathExists(cache_path_));
4927   const base::FilePath index = cache_path_.AppendASCII("index");
4928   ASSERT_TRUE(base::WriteFile(index, kCorruptData));
4929 
4930   TestBackendResultCompletionCallback cb;
4931 
4932   // Simple cache should be able to recover.
4933   disk_cache::BackendResult rv = disk_cache::CreateCacheBackend(
4934       net::APP_CACHE, net::CACHE_BACKEND_SIMPLE, /*file_operations=*/nullptr,
4935       cache_path_, 0, disk_cache::ResetHandling::kNeverReset,
4936       /*net_log=*/nullptr, cb.callback());
4937   rv = cb.GetResult(std::move(rv));
4938   EXPECT_THAT(rv.net_error, IsOk());
4939 }
4940 
TEST_F(DiskCacheBackendTest,MAYBE_NonEmptyCorruptSimpleCacheDoesNotRecover)4941 TEST_F(DiskCacheBackendTest, MAYBE_NonEmptyCorruptSimpleCacheDoesNotRecover) {
4942   SetSimpleCacheMode();
4943   BackendOpenOrCreateEntry();
4944 
4945   const std::string kCorruptData("corrupted");
4946 
4947   // Corrupt the fake index file for the populated simple cache.
4948   ASSERT_TRUE(base::PathExists(cache_path_));
4949   const base::FilePath index = cache_path_.AppendASCII("index");
4950   ASSERT_TRUE(base::WriteFile(index, kCorruptData));
4951 
4952   TestBackendResultCompletionCallback cb;
4953 
4954   // Simple cache should not be able to recover when there are entry files.
4955   disk_cache::BackendResult rv = disk_cache::CreateCacheBackend(
4956       net::APP_CACHE, net::CACHE_BACKEND_SIMPLE, /*file_operations=*/nullptr,
4957       cache_path_, 0, disk_cache::ResetHandling::kNeverReset,
4958       /*net_log=*/nullptr, cb.callback());
4959   rv = cb.GetResult(std::move(rv));
4960   EXPECT_THAT(rv.net_error, IsError(net::ERR_FAILED));
4961 }
4962 
TEST_F(DiskCacheBackendTest,SimpleOwnershipTransferBackendDestroyRace)4963 TEST_F(DiskCacheBackendTest, SimpleOwnershipTransferBackendDestroyRace) {
4964   struct CleanupContext {
4965     explicit CleanupContext(bool* ran_ptr) : ran_ptr(ran_ptr) {}
4966     ~CleanupContext() {
4967       *ran_ptr = true;
4968     }
4969 
4970     raw_ptr<bool> ran_ptr;
4971   };
4972 
4973   const char kKey[] = "skeleton";
4974 
4975   // This test was for a fix for see https://crbug.com/946349, but the mechanics
4976   // of that failure became impossible after a follow up API refactor. Still,
4977   // the timing is strange, and warrant coverage; in particular this tests what
4978   // happen if the SimpleBackendImpl is destroyed after SimpleEntryImpl
4979   // decides to return an entry to the caller, but before the callback is run.
4980   SetSimpleCacheMode();
4981   InitCache();
4982 
4983   disk_cache::Entry* entry = nullptr;
4984   ASSERT_THAT(CreateEntry(kKey, &entry), IsOk());
4985   // Make sure create actually succeeds, not just optimistically.
4986   RunUntilIdle();
4987 
4988   bool cleanup_context_ran = false;
4989   auto cleanup_context = std::make_unique<CleanupContext>(&cleanup_context_ran);
4990 
4991   // The OpenEntry code below will find a pre-existing entry in a READY state,
4992   // so it will immediately post a task to return a result. Destroying the
4993   // backend before running the event loop again will run that callback in the
4994   // dead-backend state, while OpenEntry completion was still with it alive.
4995 
4996   EntryResult result = cache_->OpenEntry(
4997       kKey, net::HIGHEST,
4998       base::BindOnce(
4999           [](std::unique_ptr<CleanupContext>, EntryResult result) {
5000             // The callback is here for ownership of CleanupContext,
5001             // and it shouldn't get invoked in this test. Normal
5002             // one would transfer result.entry to CleanupContext.
5003             ADD_FAILURE() << "This should not actually run";
5004 
5005             // ... but if it ran, it also shouldn't see the pointer.
5006             EXPECT_EQ(nullptr, result.ReleaseEntry());
5007           },
5008           std::move(cleanup_context)));
5009   EXPECT_EQ(net::ERR_IO_PENDING, result.net_error());
5010   ResetCaches();
5011 
5012   // Give CleanupContext a chance to do its thing.
5013   RunUntilIdle();
5014   EXPECT_TRUE(cleanup_context_ran);
5015 
5016   entry->Close();
5017 }
5018 
5019 // Verify that reloading the cache will preserve indices in kNeverReset mode.
TEST_F(DiskCacheBackendTest,SimpleCacheSoftResetKeepsValues)5020 TEST_F(DiskCacheBackendTest, SimpleCacheSoftResetKeepsValues) {
5021   SetSimpleCacheMode();
5022   SetCacheType(net::APP_CACHE);
5023   DisableFirstCleanup();
5024   CleanupCacheDir();
5025 
5026   {  // Do the initial cache creation then delete the values.
5027     TestBackendResultCompletionCallback cb;
5028 
5029     // Create an initial back-end and wait for indexing
5030     disk_cache::BackendResult rv = disk_cache::CreateCacheBackend(
5031         net::APP_CACHE, net::CACHE_BACKEND_SIMPLE, /*file_operations=*/nullptr,
5032         cache_path_, 0, disk_cache::ResetHandling::kNeverReset,
5033         /*net_log=*/nullptr, cb.callback());
5034     rv = cb.GetResult(std::move(rv));
5035     EXPECT_THAT(rv.net_error, IsOk());
5036     std::unique_ptr<disk_cache::Backend> cache = std::move(rv.backend);
5037     ASSERT_TRUE(cache.get());
5038     WaitForSimpleCacheIndexAndCheck(cache.get());
5039 
5040     // Create an entry in the cache
5041     CreateKeyAndCheck(cache.get(), "key");
5042   }
5043 
5044   RunUntilIdle();
5045 
5046   {  // Do the second cache creation with no reset flag, preserving entries.
5047     TestBackendResultCompletionCallback cb;
5048 
5049     disk_cache::BackendResult rv = disk_cache::CreateCacheBackend(
5050         net::APP_CACHE, net::CACHE_BACKEND_SIMPLE, /*file_operations=*/nullptr,
5051         cache_path_, 0, disk_cache::ResetHandling::kNeverReset,
5052         /*net_log=*/nullptr, cb.callback());
5053     rv = cb.GetResult(std::move(rv));
5054     EXPECT_THAT(rv.net_error, IsOk());
5055     std::unique_ptr<disk_cache::Backend> cache = std::move(rv.backend);
5056     ASSERT_TRUE(cache.get());
5057     WaitForSimpleCacheIndexAndCheck(cache.get());
5058 
5059     // The entry should be present, as a forced reset was not called for.
5060     EXPECT_TRUE(static_cast<disk_cache::SimpleBackendImpl*>(cache.get())
5061                     ->index()
5062                     ->Has(disk_cache::simple_util::GetEntryHashKey("key")));
5063   }
5064 }
5065 
5066 // Verify that reloading the cache will not preserve indices in Reset mode.
TEST_F(DiskCacheBackendTest,SimpleCacheHardResetDropsValues)5067 TEST_F(DiskCacheBackendTest, SimpleCacheHardResetDropsValues) {
5068   SetSimpleCacheMode();
5069   SetCacheType(net::APP_CACHE);
5070   DisableFirstCleanup();
5071   CleanupCacheDir();
5072 
5073   {  // Create the initial back-end.
5074     TestBackendResultCompletionCallback cb;
5075 
5076     disk_cache::BackendResult rv = disk_cache::CreateCacheBackend(
5077         net::APP_CACHE, net::CACHE_BACKEND_SIMPLE, /*file_operations=*/nullptr,
5078         cache_path_, 0, disk_cache::ResetHandling::kNeverReset,
5079         /*net_log=*/nullptr, cb.callback());
5080     rv = cb.GetResult(std::move(rv));
5081     EXPECT_THAT(rv.net_error, IsOk());
5082     std::unique_ptr<disk_cache::Backend> cache = std::move(rv.backend);
5083     ASSERT_TRUE(cache.get());
5084     WaitForSimpleCacheIndexAndCheck(cache.get());
5085 
5086     // Create an entry in the cache.
5087     CreateKeyAndCheck(cache.get(), "key");
5088   }
5089 
5090   RunUntilIdle();
5091 
5092   {  // Re-load cache with a reset flag, which should ignore existing entries.
5093     TestBackendResultCompletionCallback cb;
5094 
5095     disk_cache::BackendResult rv = disk_cache::CreateCacheBackend(
5096         net::APP_CACHE, net::CACHE_BACKEND_SIMPLE, /*file_operations=*/nullptr,
5097         cache_path_, 0, disk_cache::ResetHandling::kReset, /*net_log=*/nullptr,
5098         cb.callback());
5099     rv = cb.GetResult(std::move(rv));
5100     EXPECT_THAT(rv.net_error, IsOk());
5101     std::unique_ptr<disk_cache::Backend> cache = std::move(rv.backend);
5102     ASSERT_TRUE(cache.get());
5103     WaitForSimpleCacheIndexAndCheck(cache.get());
5104 
5105     // The entry shouldn't be present, as a forced reset was called for.
5106     EXPECT_FALSE(static_cast<disk_cache::SimpleBackendImpl*>(cache.get())
5107                      ->index()
5108                      ->Has(disk_cache::simple_util::GetEntryHashKey("key")));
5109 
5110     // Add the entry back in the cache, then make sure it's present.
5111     CreateKeyAndCheck(cache.get(), "key");
5112 
5113     EXPECT_TRUE(static_cast<disk_cache::SimpleBackendImpl*>(cache.get())
5114                     ->index()
5115                     ->Has(disk_cache::simple_util::GetEntryHashKey("key")));
5116   }
5117 }
5118 
5119 // Test to make sure cancelation of backend operation that got queued after
5120 // a pending doom on backend destruction happens properly.
TEST_F(DiskCacheBackendTest,SimpleCancelOpPendingDoom)5121 TEST_F(DiskCacheBackendTest, SimpleCancelOpPendingDoom) {
5122   struct CleanupContext {
5123     explicit CleanupContext(bool* ran_ptr) : ran_ptr(ran_ptr) {}
5124     ~CleanupContext() { *ran_ptr = true; }
5125 
5126     raw_ptr<bool> ran_ptr;
5127   };
5128 
5129   const char kKey[] = "skeleton";
5130 
5131   // Disable optimistic ops.
5132   SetCacheType(net::APP_CACHE);
5133   SetSimpleCacheMode();
5134   InitCache();
5135 
5136   disk_cache::Entry* entry = nullptr;
5137   ASSERT_THAT(CreateEntry(kKey, &entry), IsOk());
5138   entry->Close();
5139 
5140   // Queue doom.
5141   cache_->DoomEntry(kKey, net::LOWEST, base::DoNothing());
5142 
5143   // Queue create after it.
5144   bool cleanup_context_ran = false;
5145   auto cleanup_context = std::make_unique<CleanupContext>(&cleanup_context_ran);
5146 
5147   EntryResult entry_result = cache_->CreateEntry(
5148       kKey, net::HIGHEST,
5149       base::BindOnce(
5150           [](std::unique_ptr<CleanupContext>, EntryResult result) {
5151             ADD_FAILURE() << "This should not actually run";
5152           },
5153           std::move(cleanup_context)));
5154 
5155   EXPECT_EQ(net::ERR_IO_PENDING, entry_result.net_error());
5156   ResetCaches();
5157 
5158   RunUntilIdle();
5159   EXPECT_TRUE(cleanup_context_ran);
5160 }
5161 
TEST_F(DiskCacheBackendTest,SimpleDontLeakPostDoomCreate)5162 TEST_F(DiskCacheBackendTest, SimpleDontLeakPostDoomCreate) {
5163   // If an entry has been optimistically created after a pending doom, and the
5164   // backend destroyed before the doom completed, the entry would get wedged,
5165   // with no operations on it workable and entry leaked.
5166   // (See https://crbug.com/1015774).
5167   const char kKey[] = "for_lock";
5168   const int kBufSize = 2 * 1024;
5169   scoped_refptr<net::IOBuffer> buffer =
5170       base::MakeRefCounted<net::IOBuffer>(kBufSize);
5171   CacheTestFillBuffer(buffer->data(), kBufSize, true);
5172 
5173   SetSimpleCacheMode();
5174   InitCache();
5175 
5176   disk_cache::Entry* entry = nullptr;
5177   ASSERT_THAT(CreateEntry(kKey, &entry), IsOk());
5178   entry->Close();
5179 
5180   // Make sure create actually succeeds, not just optimistically.
5181   RunUntilIdle();
5182 
5183   // Queue doom.
5184   int rv = cache_->DoomEntry(kKey, net::LOWEST, base::DoNothing());
5185   ASSERT_EQ(net::ERR_IO_PENDING, rv);
5186 
5187   // And then do a create. This actually succeeds optimistically.
5188   EntryResult result =
5189       cache_->CreateEntry(kKey, net::LOWEST, base::DoNothing());
5190   ASSERT_EQ(net::OK, result.net_error());
5191   entry = result.ReleaseEntry();
5192 
5193   ResetCaches();
5194 
5195   // Entry is still supposed to be operable. This part is needed to see the bug
5196   // without a leak checker.
5197   EXPECT_EQ(kBufSize, WriteData(entry, 1, 0, buffer.get(), kBufSize, false));
5198 
5199   entry->Close();
5200 
5201   // Should not have leaked files here.
5202 }
5203 
TEST_F(DiskCacheBackendTest,BlockFileDelayedWriteFailureRecovery)5204 TEST_F(DiskCacheBackendTest, BlockFileDelayedWriteFailureRecovery) {
5205   // Test that blockfile recovers appropriately when some entries are
5206   // in a screwed up state due to an error in delayed writeback.
5207   //
5208   // https://crbug.com/1086727
5209   InitCache();
5210 
5211   const char kKey[] = "Key2";
5212   disk_cache::Entry* entry = nullptr;
5213   ASSERT_THAT(CreateEntry(kKey, &entry), IsOk());
5214 
5215   const int kBufSize = 24320;
5216   scoped_refptr<net::IOBuffer> buffer =
5217       base::MakeRefCounted<net::IOBuffer>(kBufSize);
5218   CacheTestFillBuffer(buffer->data(), kBufSize, true);
5219 
5220   ASSERT_EQ(kBufSize, WriteSparseData(entry, 0, buffer.get(), kBufSize));
5221 
5222   // Setting the size limit artificially low injects a failure on writing back
5223   // data buffered above.
5224   SetMaxSize(4096);
5225 
5226   // This causes SparseControl to close the child entry corresponding to
5227   // low portion of offset space, triggering the writeback --- which fails
5228   // due to the space cap, and in particular fails to allocate data for
5229   // a stream, so it gets address 0.
5230   ASSERT_EQ(net::ERR_FAILED, WriteSparseData(entry, 16773118, buffer.get(), 4));
5231 
5232   // Now try reading the broken child. This should report an error, not
5233   // DCHECK.
5234   ASSERT_EQ(net::ERR_FAILED, ReadSparseData(entry, 4, buffer.get(), 4));
5235 
5236   entry->Close();
5237 }
5238 
TEST_F(DiskCacheBackendTest,BlockFileInsertAliasing)5239 TEST_F(DiskCacheBackendTest, BlockFileInsertAliasing) {
5240   // Test for not having rankings corruption due to aliasing between iterator
5241   // and other ranking list copies during insertion operations.
5242   //
5243   // https://crbug.com/1156288
5244 
5245   // Need to disable weird extra sync behavior to hit the bug.
5246   CreateBackend(disk_cache::kNone);
5247   SetNewEviction();  // default, but integrity check doesn't realize that.
5248 
5249   const char kKey[] = "Key0";
5250   const char kKeyA[] = "KeyAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA41";
5251   disk_cache::Entry* entry = nullptr;
5252   ASSERT_THAT(CreateEntry(kKey, &entry), IsOk());
5253 
5254   const int kBufSize = 61188;
5255   scoped_refptr<net::IOBuffer> buffer =
5256       base::MakeRefCounted<net::IOBuffer>(kBufSize);
5257   CacheTestFillBuffer(buffer->data(), kBufSize, true);
5258 
5259   net::TestCompletionCallback cb_write64;
5260   EXPECT_EQ(net::ERR_IO_PENDING,
5261             entry->WriteSparseData(8, buffer.get(), 64, cb_write64.callback()));
5262 
5263   net::TestCompletionCallback cb_write61k;
5264   EXPECT_EQ(net::ERR_IO_PENDING,
5265             entry->WriteSparseData(16773118, buffer.get(), 61188,
5266                                    cb_write61k.callback()));
5267 
5268   EXPECT_EQ(64, cb_write64.WaitForResult());
5269   EXPECT_EQ(61188, cb_write61k.WaitForResult());
5270 
5271   EXPECT_EQ(4128, WriteSparseData(entry, 2147479550, buffer.get(), 4128));
5272 
5273   std::unique_ptr<TestIterator> iter = CreateIterator();
5274   EXPECT_EQ(4128, WriteSparseData(entry, 2147479550, buffer.get(), 4128));
5275   EXPECT_EQ(64, WriteSparseData(entry, 8, buffer.get(), 64));
5276 
5277   disk_cache::Entry* itEntry1 = nullptr;
5278   ASSERT_EQ(net::OK, iter->OpenNextEntry(&itEntry1));
5279   // These are actually child nodes for range.
5280 
5281   entry->Close();
5282 
5283   disk_cache::Entry* itEntry2 = nullptr;
5284   ASSERT_EQ(net::OK, iter->OpenNextEntry(&itEntry2));
5285 
5286   net::TestCompletionCallback doom_cb;
5287   EXPECT_EQ(net::ERR_IO_PENDING, cache_->DoomAllEntries(doom_cb.callback()));
5288 
5289   TestEntryResultCompletionCallback cb_create1;
5290   disk_cache::EntryResult result =
5291       cache_->CreateEntry(kKey, net::HIGHEST, cb_create1.callback());
5292   EXPECT_EQ(net::OK, doom_cb.WaitForResult());
5293   result = cb_create1.WaitForResult();
5294   EXPECT_EQ(net::OK, result.net_error());
5295   entry = result.ReleaseEntry();
5296 
5297   disk_cache::Entry* entryA = nullptr;
5298   ASSERT_THAT(CreateEntry(kKeyA, &entryA), IsOk());
5299   entryA->Close();
5300 
5301   disk_cache::Entry* itEntry3 = nullptr;
5302   EXPECT_EQ(net::OK, iter->OpenNextEntry(&itEntry3));
5303 
5304   EXPECT_EQ(net::OK, DoomEntry(kKeyA));
5305   itEntry1->Close();
5306   entry->Close();
5307   itEntry2->Close();
5308   if (itEntry3)
5309     itEntry3->Close();
5310 }
5311 
TEST_F(DiskCacheBackendTest,MemCacheBackwardsClock)5312 TEST_F(DiskCacheBackendTest, MemCacheBackwardsClock) {
5313   // Test to make sure that wall clock going backwards is tolerated.
5314 
5315   base::SimpleTestClock clock;
5316   clock.SetNow(base::Time::Now());
5317 
5318   SetMemoryOnlyMode();
5319   InitCache();
5320   mem_cache_->SetClockForTesting(&clock);
5321 
5322   const int kBufSize = 4 * 1024;
5323   scoped_refptr<net::IOBuffer> buffer =
5324       base::MakeRefCounted<net::IOBuffer>(kBufSize);
5325   CacheTestFillBuffer(buffer->data(), kBufSize, true);
5326 
5327   disk_cache::Entry* entry = nullptr;
5328   ASSERT_THAT(CreateEntry("key1", &entry), IsOk());
5329   EXPECT_EQ(kBufSize, WriteData(entry, 0, 0, buffer.get(), kBufSize, false));
5330   entry->Close();
5331 
5332   clock.Advance(-base::Hours(1));
5333 
5334   ASSERT_THAT(CreateEntry("key2", &entry), IsOk());
5335   EXPECT_EQ(kBufSize, WriteData(entry, 0, 0, buffer.get(), kBufSize, false));
5336   entry->Close();
5337 
5338   EXPECT_LE(2 * kBufSize,
5339             CalculateSizeOfEntriesBetween(base::Time(), base::Time::Max()));
5340   EXPECT_EQ(net::OK, DoomEntriesBetween(base::Time(), base::Time::Max()));
5341   EXPECT_EQ(0, CalculateSizeOfEntriesBetween(base::Time(), base::Time::Max()));
5342   EXPECT_EQ(0, CalculateSizeOfAllEntries());
5343 
5344   mem_cache_->SetClockForTesting(nullptr);
5345 }
5346 
TEST_F(DiskCacheBackendTest,SimpleOpenOrCreateIndexError)5347 TEST_F(DiskCacheBackendTest, SimpleOpenOrCreateIndexError) {
5348   // Exercise behavior of OpenOrCreateEntry in SimpleCache where the index
5349   // incorrectly claims the entry is missing. Regression test for
5350   // https://crbug.com/1316034
5351   const char kKey[] = "http://example.org";
5352 
5353   const int kBufSize = 256;
5354   scoped_refptr<net::IOBuffer> buffer =
5355       base::MakeRefCounted<net::IOBuffer>(kBufSize);
5356   CacheTestFillBuffer(buffer->data(), kBufSize, /*no_nulls=*/false);
5357 
5358   SetSimpleCacheMode();
5359   InitCache();
5360 
5361   // Create an entry.
5362   disk_cache::Entry* entry = nullptr;
5363   ASSERT_THAT(CreateEntry(kKey, &entry), IsOk());
5364 
5365   EXPECT_EQ(kBufSize, WriteData(entry, /*index=*/1, /*offset=*/0, buffer.get(),
5366                                 /*len=*/kBufSize, /*truncate=*/false));
5367   entry->Close();
5368 
5369   // Mess up the index to say it's not there.
5370   simple_cache_impl_->index()->Remove(
5371       disk_cache::simple_util::GetEntryHashKey(kKey));
5372 
5373   // Reopening with OpenOrCreateEntry should still work.
5374   disk_cache::EntryResult result = OpenOrCreateEntry(kKey);
5375   ASSERT_THAT(result.net_error(), IsOk());
5376   ASSERT_TRUE(result.opened());
5377   entry = result.ReleaseEntry();
5378   EXPECT_EQ(kBufSize, entry->GetDataSize(/*index=*/1));
5379   entry->Close();
5380 }
5381 
TEST_F(DiskCacheBackendTest,SimpleOpenOrCreateIndexErrorOptimistic)5382 TEST_F(DiskCacheBackendTest, SimpleOpenOrCreateIndexErrorOptimistic) {
5383   // Exercise behavior of OpenOrCreateEntry in SimpleCache where the index
5384   // incorrectly claims the entry is missing and we do an optimistic create.
5385   // Covers a codepath adjacent to the one that caused https://crbug.com/1316034
5386   const char kKey[] = "http://example.org";
5387 
5388   SetSimpleCacheMode();
5389   InitCache();
5390 
5391   const int kBufSize = 256;
5392   scoped_refptr<net::IOBuffer> buffer =
5393       base::MakeRefCounted<net::IOBuffer>(kBufSize);
5394   CacheTestFillBuffer(buffer->data(), kBufSize, /*no_nulls=*/false);
5395 
5396   // Create an entry.
5397   disk_cache::Entry* entry = nullptr;
5398   ASSERT_THAT(CreateEntry(kKey, &entry), IsOk());
5399   EXPECT_EQ(kBufSize, WriteData(entry, /*index=*/1, /*offset=*/0, buffer.get(),
5400                                 /*len=*/kBufSize, /*truncate=*/false));
5401   entry->Close();
5402 
5403   // Let all the I/O finish, so that OpenOrCreateEntry can try optimistic path.
5404   RunUntilIdle();
5405 
5406   // Mess up the index to say it's not there.
5407   simple_cache_impl_->index()->Remove(
5408       disk_cache::simple_util::GetEntryHashKey(kKey));
5409 
5410   // Reopening with OpenOrCreateEntry should still work, but since the backend
5411   // chose to be optimistic based on index, the result should be a fresh empty
5412   // entry.
5413   disk_cache::EntryResult result = OpenOrCreateEntry(kKey);
5414   ASSERT_THAT(result.net_error(), IsOk());
5415   ASSERT_FALSE(result.opened());
5416   entry = result.ReleaseEntry();
5417   EXPECT_EQ(0, entry->GetDataSize(/*index=*/1));
5418   entry->Close();
5419 }
5420 
TEST_F(DiskCacheBackendTest,SimpleDoomAfterBackendDestruction)5421 TEST_F(DiskCacheBackendTest, SimpleDoomAfterBackendDestruction) {
5422   // Test for when validating file headers/footers during close on simple
5423   // backend fails. To get the header to be checked on close, there needs to be
5424   // a stream 2, since 0/1 are validated on open, and no other operation must
5425   // have happened to stream 2, since those will force it, too. A way of getting
5426   // the validation to fail is to perform a doom on the file after the backend
5427   // is destroyed, since that will truncated the files to mark them invalid. See
5428   // https://crbug.com/1317884
5429   const char kKey[] = "Key0";
5430 
5431   const int kBufSize = 256;
5432   scoped_refptr<net::IOBuffer> buffer =
5433       base::MakeRefCounted<net::IOBuffer>(kBufSize);
5434   CacheTestFillBuffer(buffer->data(), kBufSize, /*no_nulls=*/false);
5435 
5436   SetCacheType(net::SHADER_CACHE);
5437   SetSimpleCacheMode();
5438 
5439   InitCache();
5440   disk_cache::Entry* entry = nullptr;
5441   ASSERT_THAT(CreateEntry(kKey, &entry), IsOk());
5442 
5443   EXPECT_EQ(0, WriteData(entry, /*index=*/2, /*offset=*/1, buffer.get(),
5444                          /*len=*/0, /*truncate=*/false));
5445   entry->Close();
5446 
5447   ASSERT_THAT(OpenEntry(kKey, &entry), IsOk());
5448   ResetCaches();
5449 
5450   entry->Doom();
5451   entry->Close();
5452 }
5453 
BackendValidateMigrated()5454 void DiskCacheBackendTest::BackendValidateMigrated() {
5455   // Blockfile 3.0 migration test.
5456   DisableFirstCleanup();  // started from copied dir, not cleaned dir.
5457   InitCache();
5458 
5459   // The total size comes straight from the headers, and is expected to be 1258
5460   // for either set of testdata.
5461   EXPECT_EQ(1258, CalculateSizeOfAllEntries());
5462   EXPECT_EQ(1, cache_->GetEntryCount());
5463 
5464   disk_cache::Entry* entry = nullptr;
5465   ASSERT_THAT(OpenEntry("https://example.org/data", &entry), IsOk());
5466 
5467   // Size of the actual payload.
5468   EXPECT_EQ(1234, entry->GetDataSize(1));
5469 
5470   entry->Close();
5471 }
5472 
TEST_F(DiskCacheBackendTest,BlockfileMigrate20)5473 TEST_F(DiskCacheBackendTest, BlockfileMigrate20) {
5474   ASSERT_TRUE(CopyTestCache("good_2_0"));
5475   BackendValidateMigrated();
5476 }
5477 
TEST_F(DiskCacheBackendTest,BlockfileMigrate21)5478 TEST_F(DiskCacheBackendTest, BlockfileMigrate21) {
5479   ASSERT_TRUE(CopyTestCache("good_2_1"));
5480   BackendValidateMigrated();
5481 }
5482 
TEST_F(DiskCacheBackendTest,BlockfileMigrateNewEviction20)5483 TEST_F(DiskCacheBackendTest, BlockfileMigrateNewEviction20) {
5484   ASSERT_TRUE(CopyTestCache("good_2_0"));
5485   SetNewEviction();
5486   BackendValidateMigrated();
5487 }
5488 
TEST_F(DiskCacheBackendTest,BlockfileMigrateNewEviction21)5489 TEST_F(DiskCacheBackendTest, BlockfileMigrateNewEviction21) {
5490   ASSERT_TRUE(CopyTestCache("good_2_1"));
5491   SetNewEviction();
5492   BackendValidateMigrated();
5493 }
5494