• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "base/basictypes.h"
6 #include "base/file_util.h"
7 #include "base/metrics/field_trial.h"
8 #include "base/port.h"
9 #include "base/strings/string_util.h"
10 #include "base/strings/stringprintf.h"
11 #include "base/third_party/dynamic_annotations/dynamic_annotations.h"
12 #include "base/threading/platform_thread.h"
13 #include "base/threading/thread_restrictions.h"
14 #include "net/base/cache_type.h"
15 #include "net/base/io_buffer.h"
16 #include "net/base/net_errors.h"
17 #include "net/base/test_completion_callback.h"
18 #include "net/disk_cache/blockfile/backend_impl.h"
19 #include "net/disk_cache/blockfile/entry_impl.h"
20 #include "net/disk_cache/blockfile/experiments.h"
21 #include "net/disk_cache/blockfile/histogram_macros.h"
22 #include "net/disk_cache/blockfile/mapped_file.h"
23 #include "net/disk_cache/cache_util.h"
24 #include "net/disk_cache/disk_cache_test_base.h"
25 #include "net/disk_cache/disk_cache_test_util.h"
26 #include "net/disk_cache/memory/mem_backend_impl.h"
27 #include "net/disk_cache/simple/simple_backend_impl.h"
28 #include "net/disk_cache/simple/simple_entry_format.h"
29 #include "net/disk_cache/simple/simple_test_util.h"
30 #include "net/disk_cache/simple/simple_util.h"
31 #include "net/disk_cache/tracing/tracing_cache_backend.h"
32 #include "testing/gtest/include/gtest/gtest.h"
33 
34 #if defined(OS_WIN)
35 #include "base/win/scoped_handle.h"
36 #endif
37 
38 // Provide a BackendImpl object to macros from histogram_macros.h.
39 #define CACHE_UMA_BACKEND_IMPL_OBJ backend_
40 
41 using base::Time;
42 
43 namespace {
44 
45 const char kExistingEntryKey[] = "existing entry key";
46 
CreateExistingEntryCache(const base::Thread & cache_thread,base::FilePath & cache_path)47 scoped_ptr<disk_cache::BackendImpl> CreateExistingEntryCache(
48     const base::Thread& cache_thread,
49     base::FilePath& cache_path) {
50   net::TestCompletionCallback cb;
51 
52   scoped_ptr<disk_cache::BackendImpl> cache(new disk_cache::BackendImpl(
53       cache_path, cache_thread.message_loop_proxy(), NULL));
54   int rv = cache->Init(cb.callback());
55   if (cb.GetResult(rv) != net::OK)
56     return scoped_ptr<disk_cache::BackendImpl>();
57 
58   disk_cache::Entry* entry = NULL;
59   rv = cache->CreateEntry(kExistingEntryKey, &entry, cb.callback());
60   if (cb.GetResult(rv) != net::OK)
61     return scoped_ptr<disk_cache::BackendImpl>();
62   entry->Close();
63 
64   return cache.Pass();
65 }
66 
67 }  // namespace
68 
69 // Tests that can run with different types of caches.
70 class DiskCacheBackendTest : public DiskCacheTestWithCache {
71  protected:
72   // Some utility methods:
73 
74   // Perform IO operations on the cache until there is pending IO.
75   int GeneratePendingIO(net::TestCompletionCallback* cb);
76 
77   // Adds 5 sparse entries. |doomed_start| and |doomed_end| if not NULL,
78   // will be filled with times, used by DoomEntriesSince and DoomEntriesBetween.
79   // There are 4 entries after doomed_start and 2 after doomed_end.
80   void InitSparseCache(base::Time* doomed_start, base::Time* doomed_end);
81 
82   bool CreateSetOfRandomEntries(std::set<std::string>* key_pool);
83   bool EnumerateAndMatchKeys(int max_to_open,
84                              void** iter,
85                              std::set<std::string>* keys_to_match,
86                              size_t* count);
87 
88   // Actual tests:
89   void BackendBasics();
90   void BackendKeying();
91   void BackendShutdownWithPendingFileIO(bool fast);
92   void BackendShutdownWithPendingIO(bool fast);
93   void BackendShutdownWithPendingCreate(bool fast);
94   void BackendSetSize();
95   void BackendLoad();
96   void BackendChain();
97   void BackendValidEntry();
98   void BackendInvalidEntry();
99   void BackendInvalidEntryRead();
100   void BackendInvalidEntryWithLoad();
101   void BackendTrimInvalidEntry();
102   void BackendTrimInvalidEntry2();
103   void BackendEnumerations();
104   void BackendEnumerations2();
105   void BackendInvalidEntryEnumeration();
106   void BackendFixEnumerators();
107   void BackendDoomRecent();
108   void BackendDoomBetween();
109   void BackendTransaction(const std::string& name, int num_entries, bool load);
110   void BackendRecoverInsert();
111   void BackendRecoverRemove();
112   void BackendRecoverWithEviction();
113   void BackendInvalidEntry2();
114   void BackendInvalidEntry3();
115   void BackendInvalidEntry7();
116   void BackendInvalidEntry8();
117   void BackendInvalidEntry9(bool eviction);
118   void BackendInvalidEntry10(bool eviction);
119   void BackendInvalidEntry11(bool eviction);
120   void BackendTrimInvalidEntry12();
121   void BackendDoomAll();
122   void BackendDoomAll2();
123   void BackendInvalidRankings();
124   void BackendInvalidRankings2();
125   void BackendDisable();
126   void BackendDisable2();
127   void BackendDisable3();
128   void BackendDisable4();
129   void TracingBackendBasics();
130 };
131 
GeneratePendingIO(net::TestCompletionCallback * cb)132 int DiskCacheBackendTest::GeneratePendingIO(net::TestCompletionCallback* cb) {
133   if (!use_current_thread_) {
134     ADD_FAILURE();
135     return net::ERR_FAILED;
136   }
137 
138   disk_cache::Entry* entry;
139   int rv = cache_->CreateEntry("some key", &entry, cb->callback());
140   if (cb->GetResult(rv) != net::OK)
141     return net::ERR_CACHE_CREATE_FAILURE;
142 
143   const int kSize = 25000;
144   scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
145   CacheTestFillBuffer(buffer->data(), kSize, false);
146 
147   for (int i = 0; i < 10 * 1024 * 1024; i += 64 * 1024) {
148     // We are using the current thread as the cache thread because we want to
149     // be able to call directly this method to make sure that the OS (instead
150     // of us switching thread) is returning IO pending.
151     if (!simple_cache_mode_) {
152       rv = static_cast<disk_cache::EntryImpl*>(entry)->WriteDataImpl(
153           0, i, buffer.get(), kSize, cb->callback(), false);
154     } else {
155       rv = entry->WriteData(0, i, buffer.get(), kSize, cb->callback(), false);
156     }
157 
158     if (rv == net::ERR_IO_PENDING)
159       break;
160     if (rv != kSize)
161       rv = net::ERR_FAILED;
162   }
163 
164   // Don't call Close() to avoid going through the queue or we'll deadlock
165   // waiting for the operation to finish.
166   if (!simple_cache_mode_)
167     static_cast<disk_cache::EntryImpl*>(entry)->Release();
168   else
169     entry->Close();
170 
171   return rv;
172 }
173 
InitSparseCache(base::Time * doomed_start,base::Time * doomed_end)174 void DiskCacheBackendTest::InitSparseCache(base::Time* doomed_start,
175                                            base::Time* doomed_end) {
176   InitCache();
177 
178   const int kSize = 50;
179   // This must be greater then MemEntryImpl::kMaxSparseEntrySize.
180   const int kOffset = 10 + 1024 * 1024;
181 
182   disk_cache::Entry* entry0 = NULL;
183   disk_cache::Entry* entry1 = NULL;
184   disk_cache::Entry* entry2 = NULL;
185 
186   scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
187   CacheTestFillBuffer(buffer->data(), kSize, false);
188 
189   ASSERT_EQ(net::OK, CreateEntry("zeroth", &entry0));
190   ASSERT_EQ(kSize, WriteSparseData(entry0, 0, buffer.get(), kSize));
191   ASSERT_EQ(kSize,
192             WriteSparseData(entry0, kOffset + kSize, buffer.get(), kSize));
193   entry0->Close();
194 
195   FlushQueueForTest();
196   AddDelay();
197   if (doomed_start)
198     *doomed_start = base::Time::Now();
199 
200   // Order in rankings list:
201   // first_part1, first_part2, second_part1, second_part2
202   ASSERT_EQ(net::OK, CreateEntry("first", &entry1));
203   ASSERT_EQ(kSize, WriteSparseData(entry1, 0, buffer.get(), kSize));
204   ASSERT_EQ(kSize,
205             WriteSparseData(entry1, kOffset + kSize, buffer.get(), kSize));
206   entry1->Close();
207 
208   ASSERT_EQ(net::OK, CreateEntry("second", &entry2));
209   ASSERT_EQ(kSize, WriteSparseData(entry2, 0, buffer.get(), kSize));
210   ASSERT_EQ(kSize,
211             WriteSparseData(entry2, kOffset + kSize, buffer.get(), kSize));
212   entry2->Close();
213 
214   FlushQueueForTest();
215   AddDelay();
216   if (doomed_end)
217     *doomed_end = base::Time::Now();
218 
219   // Order in rankings list:
220   // third_part1, fourth_part1, third_part2, fourth_part2
221   disk_cache::Entry* entry3 = NULL;
222   disk_cache::Entry* entry4 = NULL;
223   ASSERT_EQ(net::OK, CreateEntry("third", &entry3));
224   ASSERT_EQ(kSize, WriteSparseData(entry3, 0, buffer.get(), kSize));
225   ASSERT_EQ(net::OK, CreateEntry("fourth", &entry4));
226   ASSERT_EQ(kSize, WriteSparseData(entry4, 0, buffer.get(), kSize));
227   ASSERT_EQ(kSize,
228             WriteSparseData(entry3, kOffset + kSize, buffer.get(), kSize));
229   ASSERT_EQ(kSize,
230             WriteSparseData(entry4, kOffset + kSize, buffer.get(), kSize));
231   entry3->Close();
232   entry4->Close();
233 
234   FlushQueueForTest();
235   AddDelay();
236 }
237 
238 // Creates entries based on random keys. Stores these keys in |key_pool|.
CreateSetOfRandomEntries(std::set<std::string> * key_pool)239 bool DiskCacheBackendTest::CreateSetOfRandomEntries(
240     std::set<std::string>* key_pool) {
241   const int kNumEntries = 10;
242 
243   for (int i = 0; i < kNumEntries; ++i) {
244     std::string key = GenerateKey(true);
245     disk_cache::Entry* entry;
246     if (CreateEntry(key, &entry) != net::OK)
247       return false;
248     key_pool->insert(key);
249     entry->Close();
250   }
251   return key_pool->size() == implicit_cast<size_t>(cache_->GetEntryCount());
252 }
253 
254 // Performs iteration over the backend and checks that the keys of entries
255 // opened are in |keys_to_match|, then erases them. Up to |max_to_open| entries
256 // will be opened, if it is positive. Otherwise, iteration will continue until
257 // OpenNextEntry stops returning net::OK.
EnumerateAndMatchKeys(int max_to_open,void ** iter,std::set<std::string> * keys_to_match,size_t * count)258 bool DiskCacheBackendTest::EnumerateAndMatchKeys(
259     int max_to_open,
260     void** iter,
261     std::set<std::string>* keys_to_match,
262     size_t* count) {
263   disk_cache::Entry* entry;
264 
265   while (OpenNextEntry(iter, &entry) == net::OK) {
266     if (!entry)
267       return false;
268     EXPECT_EQ(1U, keys_to_match->erase(entry->GetKey()));
269     entry->Close();
270     ++(*count);
271     if (max_to_open >= 0 && implicit_cast<int>(*count) >= max_to_open)
272       break;
273   };
274 
275   return true;
276 }
277 
BackendBasics()278 void DiskCacheBackendTest::BackendBasics() {
279   InitCache();
280   disk_cache::Entry *entry1 = NULL, *entry2 = NULL;
281   EXPECT_NE(net::OK, OpenEntry("the first key", &entry1));
282   ASSERT_EQ(net::OK, CreateEntry("the first key", &entry1));
283   ASSERT_TRUE(NULL != entry1);
284   entry1->Close();
285   entry1 = NULL;
286 
287   ASSERT_EQ(net::OK, OpenEntry("the first key", &entry1));
288   ASSERT_TRUE(NULL != entry1);
289   entry1->Close();
290   entry1 = NULL;
291 
292   EXPECT_NE(net::OK, CreateEntry("the first key", &entry1));
293   ASSERT_EQ(net::OK, OpenEntry("the first key", &entry1));
294   EXPECT_NE(net::OK, OpenEntry("some other key", &entry2));
295   ASSERT_EQ(net::OK, CreateEntry("some other key", &entry2));
296   ASSERT_TRUE(NULL != entry1);
297   ASSERT_TRUE(NULL != entry2);
298   EXPECT_EQ(2, cache_->GetEntryCount());
299 
300   disk_cache::Entry* entry3 = NULL;
301   ASSERT_EQ(net::OK, OpenEntry("some other key", &entry3));
302   ASSERT_TRUE(NULL != entry3);
303   EXPECT_TRUE(entry2 == entry3);
304   EXPECT_EQ(2, cache_->GetEntryCount());
305 
306   EXPECT_EQ(net::OK, DoomEntry("some other key"));
307   EXPECT_EQ(1, cache_->GetEntryCount());
308   entry1->Close();
309   entry2->Close();
310   entry3->Close();
311 
312   EXPECT_EQ(net::OK, DoomEntry("the first key"));
313   EXPECT_EQ(0, cache_->GetEntryCount());
314 
315   ASSERT_EQ(net::OK, CreateEntry("the first key", &entry1));
316   ASSERT_EQ(net::OK, CreateEntry("some other key", &entry2));
317   entry1->Doom();
318   entry1->Close();
319   EXPECT_EQ(net::OK, DoomEntry("some other key"));
320   EXPECT_EQ(0, cache_->GetEntryCount());
321   entry2->Close();
322 }
323 
TEST_F(DiskCacheBackendTest,Basics)324 TEST_F(DiskCacheBackendTest, Basics) {
325   BackendBasics();
326 }
327 
TEST_F(DiskCacheBackendTest,NewEvictionBasics)328 TEST_F(DiskCacheBackendTest, NewEvictionBasics) {
329   SetNewEviction();
330   BackendBasics();
331 }
332 
TEST_F(DiskCacheBackendTest,MemoryOnlyBasics)333 TEST_F(DiskCacheBackendTest, MemoryOnlyBasics) {
334   SetMemoryOnlyMode();
335   BackendBasics();
336 }
337 
TEST_F(DiskCacheBackendTest,AppCacheBasics)338 TEST_F(DiskCacheBackendTest, AppCacheBasics) {
339   SetCacheType(net::APP_CACHE);
340   BackendBasics();
341 }
342 
TEST_F(DiskCacheBackendTest,ShaderCacheBasics)343 TEST_F(DiskCacheBackendTest, ShaderCacheBasics) {
344   SetCacheType(net::SHADER_CACHE);
345   BackendBasics();
346 }
347 
BackendKeying()348 void DiskCacheBackendTest::BackendKeying() {
349   InitCache();
350   const char* kName1 = "the first key";
351   const char* kName2 = "the first Key";
352   disk_cache::Entry *entry1, *entry2;
353   ASSERT_EQ(net::OK, CreateEntry(kName1, &entry1));
354 
355   ASSERT_EQ(net::OK, CreateEntry(kName2, &entry2));
356   EXPECT_TRUE(entry1 != entry2) << "Case sensitive";
357   entry2->Close();
358 
359   char buffer[30];
360   base::strlcpy(buffer, kName1, arraysize(buffer));
361   ASSERT_EQ(net::OK, OpenEntry(buffer, &entry2));
362   EXPECT_TRUE(entry1 == entry2);
363   entry2->Close();
364 
365   base::strlcpy(buffer + 1, kName1, arraysize(buffer) - 1);
366   ASSERT_EQ(net::OK, OpenEntry(buffer + 1, &entry2));
367   EXPECT_TRUE(entry1 == entry2);
368   entry2->Close();
369 
370   base::strlcpy(buffer + 3,  kName1, arraysize(buffer) - 3);
371   ASSERT_EQ(net::OK, OpenEntry(buffer + 3, &entry2));
372   EXPECT_TRUE(entry1 == entry2);
373   entry2->Close();
374 
375   // Now verify long keys.
376   char buffer2[20000];
377   memset(buffer2, 's', sizeof(buffer2));
378   buffer2[1023] = '\0';
379   ASSERT_EQ(net::OK, CreateEntry(buffer2, &entry2)) << "key on block file";
380   entry2->Close();
381 
382   buffer2[1023] = 'g';
383   buffer2[19999] = '\0';
384   ASSERT_EQ(net::OK, CreateEntry(buffer2, &entry2)) << "key on external file";
385   entry2->Close();
386   entry1->Close();
387 }
388 
TEST_F(DiskCacheBackendTest,Keying)389 TEST_F(DiskCacheBackendTest, Keying) {
390   BackendKeying();
391 }
392 
TEST_F(DiskCacheBackendTest,NewEvictionKeying)393 TEST_F(DiskCacheBackendTest, NewEvictionKeying) {
394   SetNewEviction();
395   BackendKeying();
396 }
397 
TEST_F(DiskCacheBackendTest,MemoryOnlyKeying)398 TEST_F(DiskCacheBackendTest, MemoryOnlyKeying) {
399   SetMemoryOnlyMode();
400   BackendKeying();
401 }
402 
TEST_F(DiskCacheBackendTest,AppCacheKeying)403 TEST_F(DiskCacheBackendTest, AppCacheKeying) {
404   SetCacheType(net::APP_CACHE);
405   BackendKeying();
406 }
407 
TEST_F(DiskCacheBackendTest,ShaderCacheKeying)408 TEST_F(DiskCacheBackendTest, ShaderCacheKeying) {
409   SetCacheType(net::SHADER_CACHE);
410   BackendKeying();
411 }
412 
TEST_F(DiskCacheTest,CreateBackend)413 TEST_F(DiskCacheTest, CreateBackend) {
414   net::TestCompletionCallback cb;
415 
416   {
417     ASSERT_TRUE(CleanupCacheDir());
418     base::Thread cache_thread("CacheThread");
419     ASSERT_TRUE(cache_thread.StartWithOptions(
420         base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
421 
422     // Test the private factory method(s).
423     scoped_ptr<disk_cache::Backend> cache;
424     cache = disk_cache::MemBackendImpl::CreateBackend(0, NULL);
425     ASSERT_TRUE(cache.get());
426     cache.reset();
427 
428     // Now test the public API.
429     int rv =
430         disk_cache::CreateCacheBackend(net::DISK_CACHE,
431                                        net::CACHE_BACKEND_DEFAULT,
432                                        cache_path_,
433                                        0,
434                                        false,
435                                        cache_thread.message_loop_proxy().get(),
436                                        NULL,
437                                        &cache,
438                                        cb.callback());
439     ASSERT_EQ(net::OK, cb.GetResult(rv));
440     ASSERT_TRUE(cache.get());
441     cache.reset();
442 
443     rv = disk_cache::CreateCacheBackend(net::MEMORY_CACHE,
444                                         net::CACHE_BACKEND_DEFAULT,
445                                         base::FilePath(), 0,
446                                         false, NULL, NULL, &cache,
447                                         cb.callback());
448     ASSERT_EQ(net::OK, cb.GetResult(rv));
449     ASSERT_TRUE(cache.get());
450     cache.reset();
451   }
452 
453   base::MessageLoop::current()->RunUntilIdle();
454 }
455 
456 // Tests that |BackendImpl| fails to initialize with a missing file.
TEST_F(DiskCacheBackendTest,CreateBackend_MissingFile)457 TEST_F(DiskCacheBackendTest, CreateBackend_MissingFile) {
458   ASSERT_TRUE(CopyTestCache("bad_entry"));
459   base::FilePath filename = cache_path_.AppendASCII("data_1");
460   base::DeleteFile(filename, false);
461   base::Thread cache_thread("CacheThread");
462   ASSERT_TRUE(cache_thread.StartWithOptions(
463       base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
464   net::TestCompletionCallback cb;
465 
466   bool prev = base::ThreadRestrictions::SetIOAllowed(false);
467   scoped_ptr<disk_cache::BackendImpl> cache(new disk_cache::BackendImpl(
468       cache_path_, cache_thread.message_loop_proxy().get(), NULL));
469   int rv = cache->Init(cb.callback());
470   EXPECT_EQ(net::ERR_FAILED, cb.GetResult(rv));
471   base::ThreadRestrictions::SetIOAllowed(prev);
472 
473   cache.reset();
474   DisableIntegrityCheck();
475 }
476 
TEST_F(DiskCacheBackendTest,ExternalFiles)477 TEST_F(DiskCacheBackendTest, ExternalFiles) {
478   InitCache();
479   // First, let's create a file on the folder.
480   base::FilePath filename = cache_path_.AppendASCII("f_000001");
481 
482   const int kSize = 50;
483   scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
484   CacheTestFillBuffer(buffer1->data(), kSize, false);
485   ASSERT_EQ(kSize, base::WriteFile(filename, buffer1->data(), kSize));
486 
487   // Now let's create a file with the cache.
488   disk_cache::Entry* entry;
489   ASSERT_EQ(net::OK, CreateEntry("key", &entry));
490   ASSERT_EQ(0, WriteData(entry, 0, 20000, buffer1.get(), 0, false));
491   entry->Close();
492 
493   // And verify that the first file is still there.
494   scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize));
495   ASSERT_EQ(kSize, base::ReadFile(filename, buffer2->data(), kSize));
496   EXPECT_EQ(0, memcmp(buffer1->data(), buffer2->data(), kSize));
497 }
498 
499 // Tests that we deal with file-level pending operations at destruction time.
BackendShutdownWithPendingFileIO(bool fast)500 void DiskCacheBackendTest::BackendShutdownWithPendingFileIO(bool fast) {
501   ASSERT_TRUE(CleanupCacheDir());
502   uint32 flags = disk_cache::kNoBuffering;
503   if (!fast)
504     flags |= disk_cache::kNoRandom;
505 
506   UseCurrentThread();
507   CreateBackend(flags, NULL);
508 
509   net::TestCompletionCallback cb;
510   int rv = GeneratePendingIO(&cb);
511 
512   // The cache destructor will see one pending operation here.
513   cache_.reset();
514 
515   if (rv == net::ERR_IO_PENDING) {
516     if (fast || simple_cache_mode_)
517       EXPECT_FALSE(cb.have_result());
518     else
519       EXPECT_TRUE(cb.have_result());
520   }
521 
522   base::MessageLoop::current()->RunUntilIdle();
523 
524 #if !defined(OS_IOS)
525   // Wait for the actual operation to complete, or we'll keep a file handle that
526   // may cause issues later. Note that on iOS systems even though this test
527   // uses a single thread, the actual IO is posted to a worker thread and the
528   // cache destructor breaks the link to reach cb when the operation completes.
529   rv = cb.GetResult(rv);
530 #endif
531 }
532 
TEST_F(DiskCacheBackendTest,ShutdownWithPendingFileIO)533 TEST_F(DiskCacheBackendTest, ShutdownWithPendingFileIO) {
534   BackendShutdownWithPendingFileIO(false);
535 }
536 
537 // Here and below, tests that simulate crashes are not compiled in LeakSanitizer
538 // builds because they contain a lot of intentional memory leaks.
539 // The wrapper scripts used to run tests under Valgrind Memcheck will also
540 // disable these tests. See:
541 // tools/valgrind/gtest_exclude/net_unittests.gtest-memcheck.txt
542 #if !defined(LEAK_SANITIZER)
543 // We'll be leaking from this test.
TEST_F(DiskCacheBackendTest,ShutdownWithPendingFileIO_Fast)544 TEST_F(DiskCacheBackendTest, ShutdownWithPendingFileIO_Fast) {
545   // The integrity test sets kNoRandom so there's a version mismatch if we don't
546   // force new eviction.
547   SetNewEviction();
548   BackendShutdownWithPendingFileIO(true);
549 }
550 #endif
551 
552 // See crbug.com/330074
553 #if !defined(OS_IOS)
554 // Tests that one cache instance is not affected by another one going away.
TEST_F(DiskCacheBackendTest,MultipleInstancesWithPendingFileIO)555 TEST_F(DiskCacheBackendTest, MultipleInstancesWithPendingFileIO) {
556   base::ScopedTempDir store;
557   ASSERT_TRUE(store.CreateUniqueTempDir());
558 
559   net::TestCompletionCallback cb;
560   scoped_ptr<disk_cache::Backend> extra_cache;
561   int rv = disk_cache::CreateCacheBackend(
562                net::DISK_CACHE, net::CACHE_BACKEND_DEFAULT, store.path(), 0,
563                false, base::MessageLoopProxy::current().get(), NULL,
564                &extra_cache, cb.callback());
565   ASSERT_EQ(net::OK, cb.GetResult(rv));
566   ASSERT_TRUE(extra_cache.get() != NULL);
567 
568   ASSERT_TRUE(CleanupCacheDir());
569   SetNewEviction();  // Match the expected behavior for integrity verification.
570   UseCurrentThread();
571 
572   CreateBackend(disk_cache::kNoBuffering, NULL);
573   rv = GeneratePendingIO(&cb);
574 
575   // cache_ has a pending operation, and extra_cache will go away.
576   extra_cache.reset();
577 
578   if (rv == net::ERR_IO_PENDING)
579     EXPECT_FALSE(cb.have_result());
580 
581   base::MessageLoop::current()->RunUntilIdle();
582 
583   // Wait for the actual operation to complete, or we'll keep a file handle that
584   // may cause issues later.
585   rv = cb.GetResult(rv);
586 }
587 #endif
588 
589 // Tests that we deal with background-thread pending operations.
BackendShutdownWithPendingIO(bool fast)590 void DiskCacheBackendTest::BackendShutdownWithPendingIO(bool fast) {
591   net::TestCompletionCallback cb;
592 
593   {
594     ASSERT_TRUE(CleanupCacheDir());
595     base::Thread cache_thread("CacheThread");
596     ASSERT_TRUE(cache_thread.StartWithOptions(
597         base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
598 
599     uint32 flags = disk_cache::kNoBuffering;
600     if (!fast)
601       flags |= disk_cache::kNoRandom;
602 
603     CreateBackend(flags, &cache_thread);
604 
605     disk_cache::Entry* entry;
606     int rv = cache_->CreateEntry("some key", &entry, cb.callback());
607     ASSERT_EQ(net::OK, cb.GetResult(rv));
608 
609     entry->Close();
610 
611     // The cache destructor will see one pending operation here.
612     cache_.reset();
613   }
614 
615   base::MessageLoop::current()->RunUntilIdle();
616 }
617 
TEST_F(DiskCacheBackendTest,ShutdownWithPendingIO)618 TEST_F(DiskCacheBackendTest, ShutdownWithPendingIO) {
619   BackendShutdownWithPendingIO(false);
620 }
621 
622 #if !defined(LEAK_SANITIZER)
623 // We'll be leaking from this test.
TEST_F(DiskCacheBackendTest,ShutdownWithPendingIO_Fast)624 TEST_F(DiskCacheBackendTest, ShutdownWithPendingIO_Fast) {
625   // The integrity test sets kNoRandom so there's a version mismatch if we don't
626   // force new eviction.
627   SetNewEviction();
628   BackendShutdownWithPendingIO(true);
629 }
630 #endif
631 
632 // Tests that we deal with create-type pending operations.
BackendShutdownWithPendingCreate(bool fast)633 void DiskCacheBackendTest::BackendShutdownWithPendingCreate(bool fast) {
634   net::TestCompletionCallback cb;
635 
636   {
637     ASSERT_TRUE(CleanupCacheDir());
638     base::Thread cache_thread("CacheThread");
639     ASSERT_TRUE(cache_thread.StartWithOptions(
640         base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
641 
642     disk_cache::BackendFlags flags =
643       fast ? disk_cache::kNone : disk_cache::kNoRandom;
644     CreateBackend(flags, &cache_thread);
645 
646     disk_cache::Entry* entry;
647     int rv = cache_->CreateEntry("some key", &entry, cb.callback());
648     ASSERT_EQ(net::ERR_IO_PENDING, rv);
649 
650     cache_.reset();
651     EXPECT_FALSE(cb.have_result());
652   }
653 
654   base::MessageLoop::current()->RunUntilIdle();
655 }
656 
TEST_F(DiskCacheBackendTest,ShutdownWithPendingCreate)657 TEST_F(DiskCacheBackendTest, ShutdownWithPendingCreate) {
658   BackendShutdownWithPendingCreate(false);
659 }
660 
661 #if !defined(LEAK_SANITIZER)
662 // We'll be leaking an entry from this test.
TEST_F(DiskCacheBackendTest,ShutdownWithPendingCreate_Fast)663 TEST_F(DiskCacheBackendTest, ShutdownWithPendingCreate_Fast) {
664   // The integrity test sets kNoRandom so there's a version mismatch if we don't
665   // force new eviction.
666   SetNewEviction();
667   BackendShutdownWithPendingCreate(true);
668 }
669 #endif
670 
671 // Disabled on android since this test requires cache creator to create
672 // blockfile caches.
673 #if !defined(OS_ANDROID)
TEST_F(DiskCacheTest,TruncatedIndex)674 TEST_F(DiskCacheTest, TruncatedIndex) {
675   ASSERT_TRUE(CleanupCacheDir());
676   base::FilePath index = cache_path_.AppendASCII("index");
677   ASSERT_EQ(5, base::WriteFile(index, "hello", 5));
678 
679   base::Thread cache_thread("CacheThread");
680   ASSERT_TRUE(cache_thread.StartWithOptions(
681       base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
682   net::TestCompletionCallback cb;
683 
684   scoped_ptr<disk_cache::Backend> backend;
685   int rv =
686       disk_cache::CreateCacheBackend(net::DISK_CACHE,
687                                      net::CACHE_BACKEND_BLOCKFILE,
688                                      cache_path_,
689                                      0,
690                                      false,
691                                      cache_thread.message_loop_proxy().get(),
692                                      NULL,
693                                      &backend,
694                                      cb.callback());
695   ASSERT_NE(net::OK, cb.GetResult(rv));
696 
697   ASSERT_FALSE(backend);
698 }
699 #endif
700 
BackendSetSize()701 void DiskCacheBackendTest::BackendSetSize() {
702   const int cache_size = 0x10000;  // 64 kB
703   SetMaxSize(cache_size);
704   InitCache();
705 
706   std::string first("some key");
707   std::string second("something else");
708   disk_cache::Entry* entry;
709   ASSERT_EQ(net::OK, CreateEntry(first, &entry));
710 
711   scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(cache_size));
712   memset(buffer->data(), 0, cache_size);
713   EXPECT_EQ(cache_size / 10,
714             WriteData(entry, 0, 0, buffer.get(), cache_size / 10, false))
715       << "normal file";
716 
717   EXPECT_EQ(net::ERR_FAILED,
718             WriteData(entry, 1, 0, buffer.get(), cache_size / 5, false))
719       << "file size above the limit";
720 
721   // By doubling the total size, we make this file cacheable.
722   SetMaxSize(cache_size * 2);
723   EXPECT_EQ(cache_size / 5,
724             WriteData(entry, 1, 0, buffer.get(), cache_size / 5, false));
725 
726   // Let's fill up the cache!.
727   SetMaxSize(cache_size * 10);
728   EXPECT_EQ(cache_size * 3 / 4,
729             WriteData(entry, 0, 0, buffer.get(), cache_size * 3 / 4, false));
730   entry->Close();
731   FlushQueueForTest();
732 
733   SetMaxSize(cache_size);
734 
735   // The cache is 95% full.
736 
737   ASSERT_EQ(net::OK, CreateEntry(second, &entry));
738   EXPECT_EQ(cache_size / 10,
739             WriteData(entry, 0, 0, buffer.get(), cache_size / 10, false));
740 
741   disk_cache::Entry* entry2;
742   ASSERT_EQ(net::OK, CreateEntry("an extra key", &entry2));
743   EXPECT_EQ(cache_size / 10,
744             WriteData(entry2, 0, 0, buffer.get(), cache_size / 10, false));
745   entry2->Close();  // This will trigger the cache trim.
746 
747   EXPECT_NE(net::OK, OpenEntry(first, &entry2));
748 
749   FlushQueueForTest();  // Make sure that we are done trimming the cache.
750   FlushQueueForTest();  // We may have posted two tasks to evict stuff.
751 
752   entry->Close();
753   ASSERT_EQ(net::OK, OpenEntry(second, &entry));
754   EXPECT_EQ(cache_size / 10, entry->GetDataSize(0));
755   entry->Close();
756 }
757 
TEST_F(DiskCacheBackendTest,SetSize)758 TEST_F(DiskCacheBackendTest, SetSize) {
759   BackendSetSize();
760 }
761 
TEST_F(DiskCacheBackendTest,NewEvictionSetSize)762 TEST_F(DiskCacheBackendTest, NewEvictionSetSize) {
763   SetNewEviction();
764   BackendSetSize();
765 }
766 
TEST_F(DiskCacheBackendTest,MemoryOnlySetSize)767 TEST_F(DiskCacheBackendTest, MemoryOnlySetSize) {
768   SetMemoryOnlyMode();
769   BackendSetSize();
770 }
771 
BackendLoad()772 void DiskCacheBackendTest::BackendLoad() {
773   InitCache();
774   int seed = static_cast<int>(Time::Now().ToInternalValue());
775   srand(seed);
776 
777   disk_cache::Entry* entries[100];
778   for (int i = 0; i < 100; i++) {
779     std::string key = GenerateKey(true);
780     ASSERT_EQ(net::OK, CreateEntry(key, &entries[i]));
781   }
782   EXPECT_EQ(100, cache_->GetEntryCount());
783 
784   for (int i = 0; i < 100; i++) {
785     int source1 = rand() % 100;
786     int source2 = rand() % 100;
787     disk_cache::Entry* temp = entries[source1];
788     entries[source1] = entries[source2];
789     entries[source2] = temp;
790   }
791 
792   for (int i = 0; i < 100; i++) {
793     disk_cache::Entry* entry;
794     ASSERT_EQ(net::OK, OpenEntry(entries[i]->GetKey(), &entry));
795     EXPECT_TRUE(entry == entries[i]);
796     entry->Close();
797     entries[i]->Doom();
798     entries[i]->Close();
799   }
800   FlushQueueForTest();
801   EXPECT_EQ(0, cache_->GetEntryCount());
802 }
803 
TEST_F(DiskCacheBackendTest,Load)804 TEST_F(DiskCacheBackendTest, Load) {
805   // Work with a tiny index table (16 entries)
806   SetMask(0xf);
807   SetMaxSize(0x100000);
808   BackendLoad();
809 }
810 
TEST_F(DiskCacheBackendTest,NewEvictionLoad)811 TEST_F(DiskCacheBackendTest, NewEvictionLoad) {
812   SetNewEviction();
813   // Work with a tiny index table (16 entries)
814   SetMask(0xf);
815   SetMaxSize(0x100000);
816   BackendLoad();
817 }
818 
TEST_F(DiskCacheBackendTest,MemoryOnlyLoad)819 TEST_F(DiskCacheBackendTest, MemoryOnlyLoad) {
820   SetMaxSize(0x100000);
821   SetMemoryOnlyMode();
822   BackendLoad();
823 }
824 
TEST_F(DiskCacheBackendTest,AppCacheLoad)825 TEST_F(DiskCacheBackendTest, AppCacheLoad) {
826   SetCacheType(net::APP_CACHE);
827   // Work with a tiny index table (16 entries)
828   SetMask(0xf);
829   SetMaxSize(0x100000);
830   BackendLoad();
831 }
832 
TEST_F(DiskCacheBackendTest,ShaderCacheLoad)833 TEST_F(DiskCacheBackendTest, ShaderCacheLoad) {
834   SetCacheType(net::SHADER_CACHE);
835   // Work with a tiny index table (16 entries)
836   SetMask(0xf);
837   SetMaxSize(0x100000);
838   BackendLoad();
839 }
840 
841 // Tests the chaining of an entry to the current head.
BackendChain()842 void DiskCacheBackendTest::BackendChain() {
843   SetMask(0x1);  // 2-entry table.
844   SetMaxSize(0x3000);  // 12 kB.
845   InitCache();
846 
847   disk_cache::Entry* entry;
848   ASSERT_EQ(net::OK, CreateEntry("The first key", &entry));
849   entry->Close();
850   ASSERT_EQ(net::OK, CreateEntry("The Second key", &entry));
851   entry->Close();
852 }
853 
TEST_F(DiskCacheBackendTest,Chain)854 TEST_F(DiskCacheBackendTest, Chain) {
855   BackendChain();
856 }
857 
TEST_F(DiskCacheBackendTest,NewEvictionChain)858 TEST_F(DiskCacheBackendTest, NewEvictionChain) {
859   SetNewEviction();
860   BackendChain();
861 }
862 
TEST_F(DiskCacheBackendTest,AppCacheChain)863 TEST_F(DiskCacheBackendTest, AppCacheChain) {
864   SetCacheType(net::APP_CACHE);
865   BackendChain();
866 }
867 
TEST_F(DiskCacheBackendTest,ShaderCacheChain)868 TEST_F(DiskCacheBackendTest, ShaderCacheChain) {
869   SetCacheType(net::SHADER_CACHE);
870   BackendChain();
871 }
872 
TEST_F(DiskCacheBackendTest,NewEvictionTrim)873 TEST_F(DiskCacheBackendTest, NewEvictionTrim) {
874   SetNewEviction();
875   InitCache();
876 
877   disk_cache::Entry* entry;
878   for (int i = 0; i < 100; i++) {
879     std::string name(base::StringPrintf("Key %d", i));
880     ASSERT_EQ(net::OK, CreateEntry(name, &entry));
881     entry->Close();
882     if (i < 90) {
883       // Entries 0 to 89 are in list 1; 90 to 99 are in list 0.
884       ASSERT_EQ(net::OK, OpenEntry(name, &entry));
885       entry->Close();
886     }
887   }
888 
889   // The first eviction must come from list 1 (10% limit), the second must come
890   // from list 0.
891   TrimForTest(false);
892   EXPECT_NE(net::OK, OpenEntry("Key 0", &entry));
893   TrimForTest(false);
894   EXPECT_NE(net::OK, OpenEntry("Key 90", &entry));
895 
896   // Double check that we still have the list tails.
897   ASSERT_EQ(net::OK, OpenEntry("Key 1", &entry));
898   entry->Close();
899   ASSERT_EQ(net::OK, OpenEntry("Key 91", &entry));
900   entry->Close();
901 }
902 
903 // Before looking for invalid entries, let's check a valid entry.
BackendValidEntry()904 void DiskCacheBackendTest::BackendValidEntry() {
905   InitCache();
906 
907   std::string key("Some key");
908   disk_cache::Entry* entry;
909   ASSERT_EQ(net::OK, CreateEntry(key, &entry));
910 
911   const int kSize = 50;
912   scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
913   memset(buffer1->data(), 0, kSize);
914   base::strlcpy(buffer1->data(), "And the data to save", kSize);
915   EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer1.get(), kSize, false));
916   entry->Close();
917   SimulateCrash();
918 
919   ASSERT_EQ(net::OK, OpenEntry(key, &entry));
920 
921   scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize));
922   memset(buffer2->data(), 0, kSize);
923   EXPECT_EQ(kSize, ReadData(entry, 0, 0, buffer2.get(), kSize));
924   entry->Close();
925   EXPECT_STREQ(buffer1->data(), buffer2->data());
926 }
927 
TEST_F(DiskCacheBackendTest,ValidEntry)928 TEST_F(DiskCacheBackendTest, ValidEntry) {
929   BackendValidEntry();
930 }
931 
TEST_F(DiskCacheBackendTest,NewEvictionValidEntry)932 TEST_F(DiskCacheBackendTest, NewEvictionValidEntry) {
933   SetNewEviction();
934   BackendValidEntry();
935 }
936 
937 // The same logic of the previous test (ValidEntry), but this time force the
938 // entry to be invalid, simulating a crash in the middle.
939 // We'll be leaking memory from this test.
BackendInvalidEntry()940 void DiskCacheBackendTest::BackendInvalidEntry() {
941   InitCache();
942 
943   std::string key("Some key");
944   disk_cache::Entry* entry;
945   ASSERT_EQ(net::OK, CreateEntry(key, &entry));
946 
947   const int kSize = 50;
948   scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
949   memset(buffer->data(), 0, kSize);
950   base::strlcpy(buffer->data(), "And the data to save", kSize);
951   EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
952   SimulateCrash();
953 
954   EXPECT_NE(net::OK, OpenEntry(key, &entry));
955   EXPECT_EQ(0, cache_->GetEntryCount());
956 }
957 
958 #if !defined(LEAK_SANITIZER)
959 // We'll be leaking memory from this test.
TEST_F(DiskCacheBackendTest,InvalidEntry)960 TEST_F(DiskCacheBackendTest, InvalidEntry) {
961   BackendInvalidEntry();
962 }
963 
964 // We'll be leaking memory from this test.
TEST_F(DiskCacheBackendTest,NewEvictionInvalidEntry)965 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry) {
966   SetNewEviction();
967   BackendInvalidEntry();
968 }
969 
970 // We'll be leaking memory from this test.
TEST_F(DiskCacheBackendTest,AppCacheInvalidEntry)971 TEST_F(DiskCacheBackendTest, AppCacheInvalidEntry) {
972   SetCacheType(net::APP_CACHE);
973   BackendInvalidEntry();
974 }
975 
976 // We'll be leaking memory from this test.
TEST_F(DiskCacheBackendTest,ShaderCacheInvalidEntry)977 TEST_F(DiskCacheBackendTest, ShaderCacheInvalidEntry) {
978   SetCacheType(net::SHADER_CACHE);
979   BackendInvalidEntry();
980 }
981 
982 // Almost the same test, but this time crash the cache after reading an entry.
983 // We'll be leaking memory from this test.
BackendInvalidEntryRead()984 void DiskCacheBackendTest::BackendInvalidEntryRead() {
985   InitCache();
986 
987   std::string key("Some key");
988   disk_cache::Entry* entry;
989   ASSERT_EQ(net::OK, CreateEntry(key, &entry));
990 
991   const int kSize = 50;
992   scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
993   memset(buffer->data(), 0, kSize);
994   base::strlcpy(buffer->data(), "And the data to save", kSize);
995   EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
996   entry->Close();
997   ASSERT_EQ(net::OK, OpenEntry(key, &entry));
998   EXPECT_EQ(kSize, ReadData(entry, 0, 0, buffer.get(), kSize));
999 
1000   SimulateCrash();
1001 
1002   if (type_ == net::APP_CACHE) {
1003     // Reading an entry and crashing should not make it dirty.
1004     ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1005     EXPECT_EQ(1, cache_->GetEntryCount());
1006     entry->Close();
1007   } else {
1008     EXPECT_NE(net::OK, OpenEntry(key, &entry));
1009     EXPECT_EQ(0, cache_->GetEntryCount());
1010   }
1011 }
1012 
1013 // We'll be leaking memory from this test.
TEST_F(DiskCacheBackendTest,InvalidEntryRead)1014 TEST_F(DiskCacheBackendTest, InvalidEntryRead) {
1015   BackendInvalidEntryRead();
1016 }
1017 
1018 // We'll be leaking memory from this test.
TEST_F(DiskCacheBackendTest,NewEvictionInvalidEntryRead)1019 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntryRead) {
1020   SetNewEviction();
1021   BackendInvalidEntryRead();
1022 }
1023 
1024 // We'll be leaking memory from this test.
TEST_F(DiskCacheBackendTest,AppCacheInvalidEntryRead)1025 TEST_F(DiskCacheBackendTest, AppCacheInvalidEntryRead) {
1026   SetCacheType(net::APP_CACHE);
1027   BackendInvalidEntryRead();
1028 }
1029 
1030 // We'll be leaking memory from this test.
TEST_F(DiskCacheBackendTest,ShaderCacheInvalidEntryRead)1031 TEST_F(DiskCacheBackendTest, ShaderCacheInvalidEntryRead) {
1032   SetCacheType(net::SHADER_CACHE);
1033   BackendInvalidEntryRead();
1034 }
1035 
1036 // We'll be leaking memory from this test.
BackendInvalidEntryWithLoad()1037 void DiskCacheBackendTest::BackendInvalidEntryWithLoad() {
1038   // Work with a tiny index table (16 entries)
1039   SetMask(0xf);
1040   SetMaxSize(0x100000);
1041   InitCache();
1042 
1043   int seed = static_cast<int>(Time::Now().ToInternalValue());
1044   srand(seed);
1045 
1046   const int kNumEntries = 100;
1047   disk_cache::Entry* entries[kNumEntries];
1048   for (int i = 0; i < kNumEntries; i++) {
1049     std::string key = GenerateKey(true);
1050     ASSERT_EQ(net::OK, CreateEntry(key, &entries[i]));
1051   }
1052   EXPECT_EQ(kNumEntries, cache_->GetEntryCount());
1053 
1054   for (int i = 0; i < kNumEntries; i++) {
1055     int source1 = rand() % kNumEntries;
1056     int source2 = rand() % kNumEntries;
1057     disk_cache::Entry* temp = entries[source1];
1058     entries[source1] = entries[source2];
1059     entries[source2] = temp;
1060   }
1061 
1062   std::string keys[kNumEntries];
1063   for (int i = 0; i < kNumEntries; i++) {
1064     keys[i] = entries[i]->GetKey();
1065     if (i < kNumEntries / 2)
1066       entries[i]->Close();
1067   }
1068 
1069   SimulateCrash();
1070 
1071   for (int i = kNumEntries / 2; i < kNumEntries; i++) {
1072     disk_cache::Entry* entry;
1073     EXPECT_NE(net::OK, OpenEntry(keys[i], &entry));
1074   }
1075 
1076   for (int i = 0; i < kNumEntries / 2; i++) {
1077     disk_cache::Entry* entry;
1078     ASSERT_EQ(net::OK, OpenEntry(keys[i], &entry));
1079     entry->Close();
1080   }
1081 
1082   EXPECT_EQ(kNumEntries / 2, cache_->GetEntryCount());
1083 }
1084 
1085 // We'll be leaking memory from this test.
TEST_F(DiskCacheBackendTest,InvalidEntryWithLoad)1086 TEST_F(DiskCacheBackendTest, InvalidEntryWithLoad) {
1087   BackendInvalidEntryWithLoad();
1088 }
1089 
1090 // We'll be leaking memory from this test.
TEST_F(DiskCacheBackendTest,NewEvictionInvalidEntryWithLoad)1091 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntryWithLoad) {
1092   SetNewEviction();
1093   BackendInvalidEntryWithLoad();
1094 }
1095 
1096 // We'll be leaking memory from this test.
TEST_F(DiskCacheBackendTest,AppCacheInvalidEntryWithLoad)1097 TEST_F(DiskCacheBackendTest, AppCacheInvalidEntryWithLoad) {
1098   SetCacheType(net::APP_CACHE);
1099   BackendInvalidEntryWithLoad();
1100 }
1101 
1102 // We'll be leaking memory from this test.
TEST_F(DiskCacheBackendTest,ShaderCacheInvalidEntryWithLoad)1103 TEST_F(DiskCacheBackendTest, ShaderCacheInvalidEntryWithLoad) {
1104   SetCacheType(net::SHADER_CACHE);
1105   BackendInvalidEntryWithLoad();
1106 }
1107 
1108 // We'll be leaking memory from this test.
BackendTrimInvalidEntry()1109 void DiskCacheBackendTest::BackendTrimInvalidEntry() {
1110   const int kSize = 0x3000;  // 12 kB
1111   SetMaxSize(kSize * 10);
1112   InitCache();
1113 
1114   std::string first("some key");
1115   std::string second("something else");
1116   disk_cache::Entry* entry;
1117   ASSERT_EQ(net::OK, CreateEntry(first, &entry));
1118 
1119   scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
1120   memset(buffer->data(), 0, kSize);
1121   EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
1122 
1123   // Simulate a crash.
1124   SimulateCrash();
1125 
1126   ASSERT_EQ(net::OK, CreateEntry(second, &entry));
1127   EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
1128 
1129   EXPECT_EQ(2, cache_->GetEntryCount());
1130   SetMaxSize(kSize);
1131   entry->Close();  // Trim the cache.
1132   FlushQueueForTest();
1133 
1134   // If we evicted the entry in less than 20mS, we have one entry in the cache;
1135   // if it took more than that, we posted a task and we'll delete the second
1136   // entry too.
1137   base::MessageLoop::current()->RunUntilIdle();
1138 
1139   // This may be not thread-safe in general, but for now it's OK so add some
1140   // ThreadSanitizer annotations to ignore data races on cache_.
1141   // See http://crbug.com/55970
1142   ANNOTATE_IGNORE_READS_BEGIN();
1143   EXPECT_GE(1, cache_->GetEntryCount());
1144   ANNOTATE_IGNORE_READS_END();
1145 
1146   EXPECT_NE(net::OK, OpenEntry(first, &entry));
1147 }
1148 
1149 // We'll be leaking memory from this test.
TEST_F(DiskCacheBackendTest,TrimInvalidEntry)1150 TEST_F(DiskCacheBackendTest, TrimInvalidEntry) {
1151   BackendTrimInvalidEntry();
1152 }
1153 
1154 // We'll be leaking memory from this test.
TEST_F(DiskCacheBackendTest,NewEvictionTrimInvalidEntry)1155 TEST_F(DiskCacheBackendTest, NewEvictionTrimInvalidEntry) {
1156   SetNewEviction();
1157   BackendTrimInvalidEntry();
1158 }
1159 
1160 // We'll be leaking memory from this test.
BackendTrimInvalidEntry2()1161 void DiskCacheBackendTest::BackendTrimInvalidEntry2() {
1162   SetMask(0xf);  // 16-entry table.
1163 
1164   const int kSize = 0x3000;  // 12 kB
1165   SetMaxSize(kSize * 40);
1166   InitCache();
1167 
1168   scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
1169   memset(buffer->data(), 0, kSize);
1170   disk_cache::Entry* entry;
1171 
1172   // Writing 32 entries to this cache chains most of them.
1173   for (int i = 0; i < 32; i++) {
1174     std::string key(base::StringPrintf("some key %d", i));
1175     ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1176     EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
1177     entry->Close();
1178     ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1179     // Note that we are not closing the entries.
1180   }
1181 
1182   // Simulate a crash.
1183   SimulateCrash();
1184 
1185   ASSERT_EQ(net::OK, CreateEntry("Something else", &entry));
1186   EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
1187 
1188   FlushQueueForTest();
1189   EXPECT_EQ(33, cache_->GetEntryCount());
1190   SetMaxSize(kSize);
1191 
1192   // For the new eviction code, all corrupt entries are on the second list so
1193   // they are not going away that easy.
1194   if (new_eviction_) {
1195     EXPECT_EQ(net::OK, DoomAllEntries());
1196   }
1197 
1198   entry->Close();  // Trim the cache.
1199   FlushQueueForTest();
1200 
1201   // We may abort the eviction before cleaning up everything.
1202   base::MessageLoop::current()->RunUntilIdle();
1203   FlushQueueForTest();
1204   // If it's not clear enough: we may still have eviction tasks running at this
1205   // time, so the number of entries is changing while we read it.
1206   ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN();
1207   EXPECT_GE(30, cache_->GetEntryCount());
1208   ANNOTATE_IGNORE_READS_AND_WRITES_END();
1209 }
1210 
1211 // We'll be leaking memory from this test.
TEST_F(DiskCacheBackendTest,TrimInvalidEntry2)1212 TEST_F(DiskCacheBackendTest, TrimInvalidEntry2) {
1213   BackendTrimInvalidEntry2();
1214 }
1215 
1216 // We'll be leaking memory from this test.
TEST_F(DiskCacheBackendTest,NewEvictionTrimInvalidEntry2)1217 TEST_F(DiskCacheBackendTest, NewEvictionTrimInvalidEntry2) {
1218   SetNewEviction();
1219   BackendTrimInvalidEntry2();
1220 }
1221 #endif  // !defined(LEAK_SANITIZER)
1222 
BackendEnumerations()1223 void DiskCacheBackendTest::BackendEnumerations() {
1224   InitCache();
1225   Time initial = Time::Now();
1226 
1227   const int kNumEntries = 100;
1228   for (int i = 0; i < kNumEntries; i++) {
1229     std::string key = GenerateKey(true);
1230     disk_cache::Entry* entry;
1231     ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1232     entry->Close();
1233   }
1234   EXPECT_EQ(kNumEntries, cache_->GetEntryCount());
1235   Time final = Time::Now();
1236 
1237   disk_cache::Entry* entry;
1238   void* iter = NULL;
1239   int count = 0;
1240   Time last_modified[kNumEntries];
1241   Time last_used[kNumEntries];
1242   while (OpenNextEntry(&iter, &entry) == net::OK) {
1243     ASSERT_TRUE(NULL != entry);
1244     if (count < kNumEntries) {
1245       last_modified[count] = entry->GetLastModified();
1246       last_used[count] = entry->GetLastUsed();
1247       EXPECT_TRUE(initial <= last_modified[count]);
1248       EXPECT_TRUE(final >= last_modified[count]);
1249     }
1250 
1251     entry->Close();
1252     count++;
1253   };
1254   EXPECT_EQ(kNumEntries, count);
1255 
1256   iter = NULL;
1257   count = 0;
1258   // The previous enumeration should not have changed the timestamps.
1259   while (OpenNextEntry(&iter, &entry) == net::OK) {
1260     ASSERT_TRUE(NULL != entry);
1261     if (count < kNumEntries) {
1262       EXPECT_TRUE(last_modified[count] == entry->GetLastModified());
1263       EXPECT_TRUE(last_used[count] == entry->GetLastUsed());
1264     }
1265     entry->Close();
1266     count++;
1267   };
1268   EXPECT_EQ(kNumEntries, count);
1269 }
1270 
TEST_F(DiskCacheBackendTest,Enumerations)1271 TEST_F(DiskCacheBackendTest, Enumerations) {
1272   BackendEnumerations();
1273 }
1274 
TEST_F(DiskCacheBackendTest,NewEvictionEnumerations)1275 TEST_F(DiskCacheBackendTest, NewEvictionEnumerations) {
1276   SetNewEviction();
1277   BackendEnumerations();
1278 }
1279 
TEST_F(DiskCacheBackendTest,MemoryOnlyEnumerations)1280 TEST_F(DiskCacheBackendTest, MemoryOnlyEnumerations) {
1281   SetMemoryOnlyMode();
1282   BackendEnumerations();
1283 }
1284 
TEST_F(DiskCacheBackendTest,ShaderCacheEnumerations)1285 TEST_F(DiskCacheBackendTest, ShaderCacheEnumerations) {
1286   SetCacheType(net::SHADER_CACHE);
1287   BackendEnumerations();
1288 }
1289 
TEST_F(DiskCacheBackendTest,AppCacheEnumerations)1290 TEST_F(DiskCacheBackendTest, AppCacheEnumerations) {
1291   SetCacheType(net::APP_CACHE);
1292   BackendEnumerations();
1293 }
1294 
1295 // Verifies enumerations while entries are open.
BackendEnumerations2()1296 void DiskCacheBackendTest::BackendEnumerations2() {
1297   InitCache();
1298   const std::string first("first");
1299   const std::string second("second");
1300   disk_cache::Entry *entry1, *entry2;
1301   ASSERT_EQ(net::OK, CreateEntry(first, &entry1));
1302   entry1->Close();
1303   ASSERT_EQ(net::OK, CreateEntry(second, &entry2));
1304   entry2->Close();
1305   FlushQueueForTest();
1306 
1307   // Make sure that the timestamp is not the same.
1308   AddDelay();
1309   ASSERT_EQ(net::OK, OpenEntry(second, &entry1));
1310   void* iter = NULL;
1311   ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry2));
1312   EXPECT_EQ(entry2->GetKey(), second);
1313 
1314   // Two entries and the iterator pointing at "first".
1315   entry1->Close();
1316   entry2->Close();
1317 
1318   // The iterator should still be valid, so we should not crash.
1319   ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry2));
1320   EXPECT_EQ(entry2->GetKey(), first);
1321   entry2->Close();
1322   cache_->EndEnumeration(&iter);
1323 
1324   // Modify the oldest entry and get the newest element.
1325   ASSERT_EQ(net::OK, OpenEntry(first, &entry1));
1326   EXPECT_EQ(0, WriteData(entry1, 0, 200, NULL, 0, false));
1327   ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry2));
1328   if (type_ == net::APP_CACHE) {
1329     // The list is not updated.
1330     EXPECT_EQ(entry2->GetKey(), second);
1331   } else {
1332     EXPECT_EQ(entry2->GetKey(), first);
1333   }
1334 
1335   entry1->Close();
1336   entry2->Close();
1337   cache_->EndEnumeration(&iter);
1338 }
1339 
TEST_F(DiskCacheBackendTest,Enumerations2)1340 TEST_F(DiskCacheBackendTest, Enumerations2) {
1341   BackendEnumerations2();
1342 }
1343 
TEST_F(DiskCacheBackendTest,NewEvictionEnumerations2)1344 TEST_F(DiskCacheBackendTest, NewEvictionEnumerations2) {
1345   SetNewEviction();
1346   BackendEnumerations2();
1347 }
1348 
TEST_F(DiskCacheBackendTest,MemoryOnlyEnumerations2)1349 TEST_F(DiskCacheBackendTest, MemoryOnlyEnumerations2) {
1350   SetMemoryOnlyMode();
1351   BackendEnumerations2();
1352 }
1353 
TEST_F(DiskCacheBackendTest,AppCacheEnumerations2)1354 TEST_F(DiskCacheBackendTest, AppCacheEnumerations2) {
1355   SetCacheType(net::APP_CACHE);
1356   BackendEnumerations2();
1357 }
1358 
TEST_F(DiskCacheBackendTest,ShaderCacheEnumerations2)1359 TEST_F(DiskCacheBackendTest, ShaderCacheEnumerations2) {
1360   SetCacheType(net::SHADER_CACHE);
1361   BackendEnumerations2();
1362 }
1363 
1364 // Verify that ReadData calls do not update the LRU cache
1365 // when using the SHADER_CACHE type.
TEST_F(DiskCacheBackendTest,ShaderCacheEnumerationReadData)1366 TEST_F(DiskCacheBackendTest, ShaderCacheEnumerationReadData) {
1367   SetCacheType(net::SHADER_CACHE);
1368   InitCache();
1369   const std::string first("first");
1370   const std::string second("second");
1371   disk_cache::Entry *entry1, *entry2;
1372   const int kSize = 50;
1373   scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
1374 
1375   ASSERT_EQ(net::OK, CreateEntry(first, &entry1));
1376   memset(buffer1->data(), 0, kSize);
1377   base::strlcpy(buffer1->data(), "And the data to save", kSize);
1378   EXPECT_EQ(kSize, WriteData(entry1, 0, 0, buffer1.get(), kSize, false));
1379 
1380   ASSERT_EQ(net::OK, CreateEntry(second, &entry2));
1381   entry2->Close();
1382 
1383   FlushQueueForTest();
1384 
1385   // Make sure that the timestamp is not the same.
1386   AddDelay();
1387 
1388   // Read from the last item in the LRU.
1389   EXPECT_EQ(kSize, ReadData(entry1, 0, 0, buffer1.get(), kSize));
1390   entry1->Close();
1391 
1392   void* iter = NULL;
1393   ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry2));
1394   EXPECT_EQ(entry2->GetKey(), second);
1395   entry2->Close();
1396   cache_->EndEnumeration(&iter);
1397 }
1398 
1399 #if !defined(LEAK_SANITIZER)
1400 // Verify handling of invalid entries while doing enumerations.
1401 // We'll be leaking memory from this test.
BackendInvalidEntryEnumeration()1402 void DiskCacheBackendTest::BackendInvalidEntryEnumeration() {
1403   InitCache();
1404 
1405   std::string key("Some key");
1406   disk_cache::Entry *entry, *entry1, *entry2;
1407   ASSERT_EQ(net::OK, CreateEntry(key, &entry1));
1408 
1409   const int kSize = 50;
1410   scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
1411   memset(buffer1->data(), 0, kSize);
1412   base::strlcpy(buffer1->data(), "And the data to save", kSize);
1413   EXPECT_EQ(kSize, WriteData(entry1, 0, 0, buffer1.get(), kSize, false));
1414   entry1->Close();
1415   ASSERT_EQ(net::OK, OpenEntry(key, &entry1));
1416   EXPECT_EQ(kSize, ReadData(entry1, 0, 0, buffer1.get(), kSize));
1417 
1418   std::string key2("Another key");
1419   ASSERT_EQ(net::OK, CreateEntry(key2, &entry2));
1420   entry2->Close();
1421   ASSERT_EQ(2, cache_->GetEntryCount());
1422 
1423   SimulateCrash();
1424 
1425   void* iter = NULL;
1426   int count = 0;
1427   while (OpenNextEntry(&iter, &entry) == net::OK) {
1428     ASSERT_TRUE(NULL != entry);
1429     EXPECT_EQ(key2, entry->GetKey());
1430     entry->Close();
1431     count++;
1432   };
1433   EXPECT_EQ(1, count);
1434   EXPECT_EQ(1, cache_->GetEntryCount());
1435 }
1436 
1437 // We'll be leaking memory from this test.
TEST_F(DiskCacheBackendTest,InvalidEntryEnumeration)1438 TEST_F(DiskCacheBackendTest, InvalidEntryEnumeration) {
1439   BackendInvalidEntryEnumeration();
1440 }
1441 
1442 // We'll be leaking memory from this test.
TEST_F(DiskCacheBackendTest,NewEvictionInvalidEntryEnumeration)1443 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntryEnumeration) {
1444   SetNewEviction();
1445   BackendInvalidEntryEnumeration();
1446 }
1447 #endif  // !defined(LEAK_SANITIZER)
1448 
1449 // Tests that if for some reason entries are modified close to existing cache
1450 // iterators, we don't generate fatal errors or reset the cache.
BackendFixEnumerators()1451 void DiskCacheBackendTest::BackendFixEnumerators() {
1452   InitCache();
1453 
1454   int seed = static_cast<int>(Time::Now().ToInternalValue());
1455   srand(seed);
1456 
1457   const int kNumEntries = 10;
1458   for (int i = 0; i < kNumEntries; i++) {
1459     std::string key = GenerateKey(true);
1460     disk_cache::Entry* entry;
1461     ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1462     entry->Close();
1463   }
1464   EXPECT_EQ(kNumEntries, cache_->GetEntryCount());
1465 
1466   disk_cache::Entry *entry1, *entry2;
1467   void* iter1 = NULL;
1468   void* iter2 = NULL;
1469   ASSERT_EQ(net::OK, OpenNextEntry(&iter1, &entry1));
1470   ASSERT_TRUE(NULL != entry1);
1471   entry1->Close();
1472   entry1 = NULL;
1473 
1474   // Let's go to the middle of the list.
1475   for (int i = 0; i < kNumEntries / 2; i++) {
1476     if (entry1)
1477       entry1->Close();
1478     ASSERT_EQ(net::OK, OpenNextEntry(&iter1, &entry1));
1479     ASSERT_TRUE(NULL != entry1);
1480 
1481     ASSERT_EQ(net::OK, OpenNextEntry(&iter2, &entry2));
1482     ASSERT_TRUE(NULL != entry2);
1483     entry2->Close();
1484   }
1485 
1486   // Messing up with entry1 will modify entry2->next.
1487   entry1->Doom();
1488   ASSERT_EQ(net::OK, OpenNextEntry(&iter2, &entry2));
1489   ASSERT_TRUE(NULL != entry2);
1490 
1491   // The link entry2->entry1 should be broken.
1492   EXPECT_NE(entry2->GetKey(), entry1->GetKey());
1493   entry1->Close();
1494   entry2->Close();
1495 
1496   // And the second iterator should keep working.
1497   ASSERT_EQ(net::OK, OpenNextEntry(&iter2, &entry2));
1498   ASSERT_TRUE(NULL != entry2);
1499   entry2->Close();
1500 
1501   cache_->EndEnumeration(&iter1);
1502   cache_->EndEnumeration(&iter2);
1503 }
1504 
TEST_F(DiskCacheBackendTest,FixEnumerators)1505 TEST_F(DiskCacheBackendTest, FixEnumerators) {
1506   BackendFixEnumerators();
1507 }
1508 
TEST_F(DiskCacheBackendTest,NewEvictionFixEnumerators)1509 TEST_F(DiskCacheBackendTest, NewEvictionFixEnumerators) {
1510   SetNewEviction();
1511   BackendFixEnumerators();
1512 }
1513 
BackendDoomRecent()1514 void DiskCacheBackendTest::BackendDoomRecent() {
1515   InitCache();
1516 
1517   disk_cache::Entry *entry;
1518   ASSERT_EQ(net::OK, CreateEntry("first", &entry));
1519   entry->Close();
1520   ASSERT_EQ(net::OK, CreateEntry("second", &entry));
1521   entry->Close();
1522   FlushQueueForTest();
1523 
1524   AddDelay();
1525   Time middle = Time::Now();
1526 
1527   ASSERT_EQ(net::OK, CreateEntry("third", &entry));
1528   entry->Close();
1529   ASSERT_EQ(net::OK, CreateEntry("fourth", &entry));
1530   entry->Close();
1531   FlushQueueForTest();
1532 
1533   AddDelay();
1534   Time final = Time::Now();
1535 
1536   ASSERT_EQ(4, cache_->GetEntryCount());
1537   EXPECT_EQ(net::OK, DoomEntriesSince(final));
1538   ASSERT_EQ(4, cache_->GetEntryCount());
1539 
1540   EXPECT_EQ(net::OK, DoomEntriesSince(middle));
1541   ASSERT_EQ(2, cache_->GetEntryCount());
1542 
1543   ASSERT_EQ(net::OK, OpenEntry("second", &entry));
1544   entry->Close();
1545 }
1546 
TEST_F(DiskCacheBackendTest,DoomRecent)1547 TEST_F(DiskCacheBackendTest, DoomRecent) {
1548   BackendDoomRecent();
1549 }
1550 
TEST_F(DiskCacheBackendTest,NewEvictionDoomRecent)1551 TEST_F(DiskCacheBackendTest, NewEvictionDoomRecent) {
1552   SetNewEviction();
1553   BackendDoomRecent();
1554 }
1555 
TEST_F(DiskCacheBackendTest,MemoryOnlyDoomRecent)1556 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomRecent) {
1557   SetMemoryOnlyMode();
1558   BackendDoomRecent();
1559 }
1560 
TEST_F(DiskCacheBackendTest,MemoryOnlyDoomEntriesSinceSparse)1561 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomEntriesSinceSparse) {
1562   SetMemoryOnlyMode();
1563   base::Time start;
1564   InitSparseCache(&start, NULL);
1565   DoomEntriesSince(start);
1566   EXPECT_EQ(1, cache_->GetEntryCount());
1567 }
1568 
TEST_F(DiskCacheBackendTest,DoomEntriesSinceSparse)1569 TEST_F(DiskCacheBackendTest, DoomEntriesSinceSparse) {
1570   base::Time start;
1571   InitSparseCache(&start, NULL);
1572   DoomEntriesSince(start);
1573   // NOTE: BackendImpl counts child entries in its GetEntryCount(), while
1574   // MemBackendImpl does not. Thats why expected value differs here from
1575   // MemoryOnlyDoomEntriesSinceSparse.
1576   EXPECT_EQ(3, cache_->GetEntryCount());
1577 }
1578 
TEST_F(DiskCacheBackendTest,MemoryOnlyDoomAllSparse)1579 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomAllSparse) {
1580   SetMemoryOnlyMode();
1581   InitSparseCache(NULL, NULL);
1582   EXPECT_EQ(net::OK, DoomAllEntries());
1583   EXPECT_EQ(0, cache_->GetEntryCount());
1584 }
1585 
TEST_F(DiskCacheBackendTest,DoomAllSparse)1586 TEST_F(DiskCacheBackendTest, DoomAllSparse) {
1587   InitSparseCache(NULL, NULL);
1588   EXPECT_EQ(net::OK, DoomAllEntries());
1589   EXPECT_EQ(0, cache_->GetEntryCount());
1590 }
1591 
BackendDoomBetween()1592 void DiskCacheBackendTest::BackendDoomBetween() {
1593   InitCache();
1594 
1595   disk_cache::Entry *entry;
1596   ASSERT_EQ(net::OK, CreateEntry("first", &entry));
1597   entry->Close();
1598   FlushQueueForTest();
1599 
1600   AddDelay();
1601   Time middle_start = Time::Now();
1602 
1603   ASSERT_EQ(net::OK, CreateEntry("second", &entry));
1604   entry->Close();
1605   ASSERT_EQ(net::OK, CreateEntry("third", &entry));
1606   entry->Close();
1607   FlushQueueForTest();
1608 
1609   AddDelay();
1610   Time middle_end = Time::Now();
1611 
1612   ASSERT_EQ(net::OK, CreateEntry("fourth", &entry));
1613   entry->Close();
1614   ASSERT_EQ(net::OK, OpenEntry("fourth", &entry));
1615   entry->Close();
1616   FlushQueueForTest();
1617 
1618   AddDelay();
1619   Time final = Time::Now();
1620 
1621   ASSERT_EQ(4, cache_->GetEntryCount());
1622   EXPECT_EQ(net::OK, DoomEntriesBetween(middle_start, middle_end));
1623   ASSERT_EQ(2, cache_->GetEntryCount());
1624 
1625   ASSERT_EQ(net::OK, OpenEntry("fourth", &entry));
1626   entry->Close();
1627 
1628   EXPECT_EQ(net::OK, DoomEntriesBetween(middle_start, final));
1629   ASSERT_EQ(1, cache_->GetEntryCount());
1630 
1631   ASSERT_EQ(net::OK, OpenEntry("first", &entry));
1632   entry->Close();
1633 }
1634 
TEST_F(DiskCacheBackendTest,DoomBetween)1635 TEST_F(DiskCacheBackendTest, DoomBetween) {
1636   BackendDoomBetween();
1637 }
1638 
TEST_F(DiskCacheBackendTest,NewEvictionDoomBetween)1639 TEST_F(DiskCacheBackendTest, NewEvictionDoomBetween) {
1640   SetNewEviction();
1641   BackendDoomBetween();
1642 }
1643 
TEST_F(DiskCacheBackendTest,MemoryOnlyDoomBetween)1644 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomBetween) {
1645   SetMemoryOnlyMode();
1646   BackendDoomBetween();
1647 }
1648 
TEST_F(DiskCacheBackendTest,MemoryOnlyDoomEntriesBetweenSparse)1649 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomEntriesBetweenSparse) {
1650   SetMemoryOnlyMode();
1651   base::Time start, end;
1652   InitSparseCache(&start, &end);
1653   DoomEntriesBetween(start, end);
1654   EXPECT_EQ(3, cache_->GetEntryCount());
1655 
1656   start = end;
1657   end = base::Time::Now();
1658   DoomEntriesBetween(start, end);
1659   EXPECT_EQ(1, cache_->GetEntryCount());
1660 }
1661 
TEST_F(DiskCacheBackendTest,DoomEntriesBetweenSparse)1662 TEST_F(DiskCacheBackendTest, DoomEntriesBetweenSparse) {
1663   base::Time start, end;
1664   InitSparseCache(&start, &end);
1665   DoomEntriesBetween(start, end);
1666   EXPECT_EQ(9, cache_->GetEntryCount());
1667 
1668   start = end;
1669   end = base::Time::Now();
1670   DoomEntriesBetween(start, end);
1671   EXPECT_EQ(3, cache_->GetEntryCount());
1672 }
1673 
BackendTransaction(const std::string & name,int num_entries,bool load)1674 void DiskCacheBackendTest::BackendTransaction(const std::string& name,
1675                                               int num_entries, bool load) {
1676   success_ = false;
1677   ASSERT_TRUE(CopyTestCache(name));
1678   DisableFirstCleanup();
1679 
1680   uint32 mask;
1681   if (load) {
1682     mask = 0xf;
1683     SetMaxSize(0x100000);
1684   } else {
1685     // Clear the settings from the previous run.
1686     mask = 0;
1687     SetMaxSize(0);
1688   }
1689   SetMask(mask);
1690 
1691   InitCache();
1692   ASSERT_EQ(num_entries + 1, cache_->GetEntryCount());
1693 
1694   std::string key("the first key");
1695   disk_cache::Entry* entry1;
1696   ASSERT_NE(net::OK, OpenEntry(key, &entry1));
1697 
1698   int actual = cache_->GetEntryCount();
1699   if (num_entries != actual) {
1700     ASSERT_TRUE(load);
1701     // If there is a heavy load, inserting an entry will make another entry
1702     // dirty (on the hash bucket) so two entries are removed.
1703     ASSERT_EQ(num_entries - 1, actual);
1704   }
1705 
1706   cache_.reset();
1707   cache_impl_ = NULL;
1708 
1709   ASSERT_TRUE(CheckCacheIntegrity(cache_path_, new_eviction_, mask));
1710   success_ = true;
1711 }
1712 
BackendRecoverInsert()1713 void DiskCacheBackendTest::BackendRecoverInsert() {
1714   // Tests with an empty cache.
1715   BackendTransaction("insert_empty1", 0, false);
1716   ASSERT_TRUE(success_) << "insert_empty1";
1717   BackendTransaction("insert_empty2", 0, false);
1718   ASSERT_TRUE(success_) << "insert_empty2";
1719   BackendTransaction("insert_empty3", 0, false);
1720   ASSERT_TRUE(success_) << "insert_empty3";
1721 
1722   // Tests with one entry on the cache.
1723   BackendTransaction("insert_one1", 1, false);
1724   ASSERT_TRUE(success_) << "insert_one1";
1725   BackendTransaction("insert_one2", 1, false);
1726   ASSERT_TRUE(success_) << "insert_one2";
1727   BackendTransaction("insert_one3", 1, false);
1728   ASSERT_TRUE(success_) << "insert_one3";
1729 
1730   // Tests with one hundred entries on the cache, tiny index.
1731   BackendTransaction("insert_load1", 100, true);
1732   ASSERT_TRUE(success_) << "insert_load1";
1733   BackendTransaction("insert_load2", 100, true);
1734   ASSERT_TRUE(success_) << "insert_load2";
1735 }
1736 
TEST_F(DiskCacheBackendTest,RecoverInsert)1737 TEST_F(DiskCacheBackendTest, RecoverInsert) {
1738   BackendRecoverInsert();
1739 }
1740 
TEST_F(DiskCacheBackendTest,NewEvictionRecoverInsert)1741 TEST_F(DiskCacheBackendTest, NewEvictionRecoverInsert) {
1742   SetNewEviction();
1743   BackendRecoverInsert();
1744 }
1745 
BackendRecoverRemove()1746 void DiskCacheBackendTest::BackendRecoverRemove() {
1747   // Removing the only element.
1748   BackendTransaction("remove_one1", 0, false);
1749   ASSERT_TRUE(success_) << "remove_one1";
1750   BackendTransaction("remove_one2", 0, false);
1751   ASSERT_TRUE(success_) << "remove_one2";
1752   BackendTransaction("remove_one3", 0, false);
1753   ASSERT_TRUE(success_) << "remove_one3";
1754 
1755   // Removing the head.
1756   BackendTransaction("remove_head1", 1, false);
1757   ASSERT_TRUE(success_) << "remove_head1";
1758   BackendTransaction("remove_head2", 1, false);
1759   ASSERT_TRUE(success_) << "remove_head2";
1760   BackendTransaction("remove_head3", 1, false);
1761   ASSERT_TRUE(success_) << "remove_head3";
1762 
1763   // Removing the tail.
1764   BackendTransaction("remove_tail1", 1, false);
1765   ASSERT_TRUE(success_) << "remove_tail1";
1766   BackendTransaction("remove_tail2", 1, false);
1767   ASSERT_TRUE(success_) << "remove_tail2";
1768   BackendTransaction("remove_tail3", 1, false);
1769   ASSERT_TRUE(success_) << "remove_tail3";
1770 
1771   // Removing with one hundred entries on the cache, tiny index.
1772   BackendTransaction("remove_load1", 100, true);
1773   ASSERT_TRUE(success_) << "remove_load1";
1774   BackendTransaction("remove_load2", 100, true);
1775   ASSERT_TRUE(success_) << "remove_load2";
1776   BackendTransaction("remove_load3", 100, true);
1777   ASSERT_TRUE(success_) << "remove_load3";
1778 
1779   // This case cannot be reverted.
1780   BackendTransaction("remove_one4", 0, false);
1781   ASSERT_TRUE(success_) << "remove_one4";
1782   BackendTransaction("remove_head4", 1, false);
1783   ASSERT_TRUE(success_) << "remove_head4";
1784 }
1785 
TEST_F(DiskCacheBackendTest,RecoverRemove)1786 TEST_F(DiskCacheBackendTest, RecoverRemove) {
1787   BackendRecoverRemove();
1788 }
1789 
TEST_F(DiskCacheBackendTest,NewEvictionRecoverRemove)1790 TEST_F(DiskCacheBackendTest, NewEvictionRecoverRemove) {
1791   SetNewEviction();
1792   BackendRecoverRemove();
1793 }
1794 
BackendRecoverWithEviction()1795 void DiskCacheBackendTest::BackendRecoverWithEviction() {
1796   success_ = false;
1797   ASSERT_TRUE(CopyTestCache("insert_load1"));
1798   DisableFirstCleanup();
1799 
1800   SetMask(0xf);
1801   SetMaxSize(0x1000);
1802 
1803   // We should not crash here.
1804   InitCache();
1805   DisableIntegrityCheck();
1806 }
1807 
TEST_F(DiskCacheBackendTest,RecoverWithEviction)1808 TEST_F(DiskCacheBackendTest, RecoverWithEviction) {
1809   BackendRecoverWithEviction();
1810 }
1811 
TEST_F(DiskCacheBackendTest,NewEvictionRecoverWithEviction)1812 TEST_F(DiskCacheBackendTest, NewEvictionRecoverWithEviction) {
1813   SetNewEviction();
1814   BackendRecoverWithEviction();
1815 }
1816 
1817 // Tests that the |BackendImpl| fails to start with the wrong cache version.
TEST_F(DiskCacheTest,WrongVersion)1818 TEST_F(DiskCacheTest, WrongVersion) {
1819   ASSERT_TRUE(CopyTestCache("wrong_version"));
1820   base::Thread cache_thread("CacheThread");
1821   ASSERT_TRUE(cache_thread.StartWithOptions(
1822       base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
1823   net::TestCompletionCallback cb;
1824 
1825   scoped_ptr<disk_cache::BackendImpl> cache(new disk_cache::BackendImpl(
1826       cache_path_, cache_thread.message_loop_proxy().get(), NULL));
1827   int rv = cache->Init(cb.callback());
1828   ASSERT_EQ(net::ERR_FAILED, cb.GetResult(rv));
1829 }
1830 
1831 class BadEntropyProvider : public base::FieldTrial::EntropyProvider {
1832  public:
~BadEntropyProvider()1833   virtual ~BadEntropyProvider() {}
1834 
GetEntropyForTrial(const std::string & trial_name,uint32 randomization_seed) const1835   virtual double GetEntropyForTrial(const std::string& trial_name,
1836                                     uint32 randomization_seed) const OVERRIDE {
1837     return 0.5;
1838   }
1839 };
1840 
1841 // Tests that the disk cache successfully joins the control group, dropping the
1842 // existing cache in favour of a new empty cache.
1843 // Disabled on android since this test requires cache creator to create
1844 // blockfile caches.
1845 #if !defined(OS_ANDROID)
TEST_F(DiskCacheTest,SimpleCacheControlJoin)1846 TEST_F(DiskCacheTest, SimpleCacheControlJoin) {
1847   base::Thread cache_thread("CacheThread");
1848   ASSERT_TRUE(cache_thread.StartWithOptions(
1849                   base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
1850 
1851   scoped_ptr<disk_cache::BackendImpl> cache =
1852       CreateExistingEntryCache(cache_thread, cache_path_);
1853   ASSERT_TRUE(cache.get());
1854   cache.reset();
1855 
1856   // Instantiate the SimpleCacheTrial, forcing this run into the
1857   // ExperimentControl group.
1858   base::FieldTrialList field_trial_list(new BadEntropyProvider());
1859   base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial",
1860                                          "ExperimentControl");
1861   net::TestCompletionCallback cb;
1862   scoped_ptr<disk_cache::Backend> base_cache;
1863   int rv =
1864       disk_cache::CreateCacheBackend(net::DISK_CACHE,
1865                                      net::CACHE_BACKEND_BLOCKFILE,
1866                                      cache_path_,
1867                                      0,
1868                                      true,
1869                                      cache_thread.message_loop_proxy().get(),
1870                                      NULL,
1871                                      &base_cache,
1872                                      cb.callback());
1873   ASSERT_EQ(net::OK, cb.GetResult(rv));
1874   EXPECT_EQ(0, base_cache->GetEntryCount());
1875 }
1876 #endif
1877 
1878 // Tests that the disk cache can restart in the control group preserving
1879 // existing entries.
TEST_F(DiskCacheTest,SimpleCacheControlRestart)1880 TEST_F(DiskCacheTest, SimpleCacheControlRestart) {
1881   // Instantiate the SimpleCacheTrial, forcing this run into the
1882   // ExperimentControl group.
1883   base::FieldTrialList field_trial_list(new BadEntropyProvider());
1884   base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial",
1885                                          "ExperimentControl");
1886 
1887   base::Thread cache_thread("CacheThread");
1888   ASSERT_TRUE(cache_thread.StartWithOptions(
1889                   base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
1890 
1891   scoped_ptr<disk_cache::BackendImpl> cache =
1892       CreateExistingEntryCache(cache_thread, cache_path_);
1893   ASSERT_TRUE(cache.get());
1894 
1895   net::TestCompletionCallback cb;
1896 
1897   const int kRestartCount = 5;
1898   for (int i = 0; i < kRestartCount; ++i) {
1899     cache.reset(new disk_cache::BackendImpl(
1900         cache_path_, cache_thread.message_loop_proxy(), NULL));
1901     int rv = cache->Init(cb.callback());
1902     ASSERT_EQ(net::OK, cb.GetResult(rv));
1903     EXPECT_EQ(1, cache->GetEntryCount());
1904 
1905     disk_cache::Entry* entry = NULL;
1906     rv = cache->OpenEntry(kExistingEntryKey, &entry, cb.callback());
1907     EXPECT_EQ(net::OK, cb.GetResult(rv));
1908     EXPECT_TRUE(entry);
1909     entry->Close();
1910   }
1911 }
1912 
1913 // Tests that the disk cache can leave the control group preserving existing
1914 // entries.
TEST_F(DiskCacheTest,SimpleCacheControlLeave)1915 TEST_F(DiskCacheTest, SimpleCacheControlLeave) {
1916   base::Thread cache_thread("CacheThread");
1917   ASSERT_TRUE(cache_thread.StartWithOptions(
1918       base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
1919 
1920   {
1921     // Instantiate the SimpleCacheTrial, forcing this run into the
1922     // ExperimentControl group.
1923     base::FieldTrialList field_trial_list(new BadEntropyProvider());
1924     base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial",
1925                                            "ExperimentControl");
1926 
1927     scoped_ptr<disk_cache::BackendImpl> cache =
1928         CreateExistingEntryCache(cache_thread, cache_path_);
1929     ASSERT_TRUE(cache.get());
1930   }
1931 
1932   // Instantiate the SimpleCacheTrial, forcing this run into the
1933   // ExperimentNo group.
1934   base::FieldTrialList field_trial_list(new BadEntropyProvider());
1935   base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial", "ExperimentNo");
1936   net::TestCompletionCallback cb;
1937 
1938   const int kRestartCount = 5;
1939   for (int i = 0; i < kRestartCount; ++i) {
1940     scoped_ptr<disk_cache::BackendImpl> cache(new disk_cache::BackendImpl(
1941         cache_path_, cache_thread.message_loop_proxy(), NULL));
1942     int rv = cache->Init(cb.callback());
1943     ASSERT_EQ(net::OK, cb.GetResult(rv));
1944     EXPECT_EQ(1, cache->GetEntryCount());
1945 
1946     disk_cache::Entry* entry = NULL;
1947     rv = cache->OpenEntry(kExistingEntryKey, &entry, cb.callback());
1948     EXPECT_EQ(net::OK, cb.GetResult(rv));
1949     EXPECT_TRUE(entry);
1950     entry->Close();
1951   }
1952 }
1953 
1954 // Tests that the cache is properly restarted on recovery error.
1955 // Disabled on android since this test requires cache creator to create
1956 // blockfile caches.
1957 #if !defined(OS_ANDROID)
TEST_F(DiskCacheBackendTest,DeleteOld)1958 TEST_F(DiskCacheBackendTest, DeleteOld) {
1959   ASSERT_TRUE(CopyTestCache("wrong_version"));
1960   SetNewEviction();
1961   base::Thread cache_thread("CacheThread");
1962   ASSERT_TRUE(cache_thread.StartWithOptions(
1963       base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
1964 
1965   net::TestCompletionCallback cb;
1966   bool prev = base::ThreadRestrictions::SetIOAllowed(false);
1967   base::FilePath path(cache_path_);
1968   int rv =
1969       disk_cache::CreateCacheBackend(net::DISK_CACHE,
1970                                      net::CACHE_BACKEND_BLOCKFILE,
1971                                      path,
1972                                      0,
1973                                      true,
1974                                      cache_thread.message_loop_proxy().get(),
1975                                      NULL,
1976                                      &cache_,
1977                                      cb.callback());
1978   path.clear();  // Make sure path was captured by the previous call.
1979   ASSERT_EQ(net::OK, cb.GetResult(rv));
1980   base::ThreadRestrictions::SetIOAllowed(prev);
1981   cache_.reset();
1982   EXPECT_TRUE(CheckCacheIntegrity(cache_path_, new_eviction_, mask_));
1983 }
1984 #endif
1985 
1986 // We want to be able to deal with messed up entries on disk.
BackendInvalidEntry2()1987 void DiskCacheBackendTest::BackendInvalidEntry2() {
1988   ASSERT_TRUE(CopyTestCache("bad_entry"));
1989   DisableFirstCleanup();
1990   InitCache();
1991 
1992   disk_cache::Entry *entry1, *entry2;
1993   ASSERT_EQ(net::OK, OpenEntry("the first key", &entry1));
1994   EXPECT_NE(net::OK, OpenEntry("some other key", &entry2));
1995   entry1->Close();
1996 
1997   // CheckCacheIntegrity will fail at this point.
1998   DisableIntegrityCheck();
1999 }
2000 
TEST_F(DiskCacheBackendTest,InvalidEntry2)2001 TEST_F(DiskCacheBackendTest, InvalidEntry2) {
2002   BackendInvalidEntry2();
2003 }
2004 
TEST_F(DiskCacheBackendTest,NewEvictionInvalidEntry2)2005 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry2) {
2006   SetNewEviction();
2007   BackendInvalidEntry2();
2008 }
2009 
2010 // Tests that we don't crash or hang when enumerating this cache.
BackendInvalidEntry3()2011 void DiskCacheBackendTest::BackendInvalidEntry3() {
2012   SetMask(0x1);  // 2-entry table.
2013   SetMaxSize(0x3000);  // 12 kB.
2014   DisableFirstCleanup();
2015   InitCache();
2016 
2017   disk_cache::Entry* entry;
2018   void* iter = NULL;
2019   while (OpenNextEntry(&iter, &entry) == net::OK) {
2020     entry->Close();
2021   }
2022 }
2023 
TEST_F(DiskCacheBackendTest,InvalidEntry3)2024 TEST_F(DiskCacheBackendTest, InvalidEntry3) {
2025   ASSERT_TRUE(CopyTestCache("dirty_entry3"));
2026   BackendInvalidEntry3();
2027 }
2028 
TEST_F(DiskCacheBackendTest,NewEvictionInvalidEntry3)2029 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry3) {
2030   ASSERT_TRUE(CopyTestCache("dirty_entry4"));
2031   SetNewEviction();
2032   BackendInvalidEntry3();
2033   DisableIntegrityCheck();
2034 }
2035 
2036 // Test that we handle a dirty entry on the LRU list, already replaced with
2037 // the same key, and with hash collisions.
TEST_F(DiskCacheBackendTest,InvalidEntry4)2038 TEST_F(DiskCacheBackendTest, InvalidEntry4) {
2039   ASSERT_TRUE(CopyTestCache("dirty_entry3"));
2040   SetMask(0x1);  // 2-entry table.
2041   SetMaxSize(0x3000);  // 12 kB.
2042   DisableFirstCleanup();
2043   InitCache();
2044 
2045   TrimForTest(false);
2046 }
2047 
2048 // Test that we handle a dirty entry on the deleted list, already replaced with
2049 // the same key, and with hash collisions.
TEST_F(DiskCacheBackendTest,InvalidEntry5)2050 TEST_F(DiskCacheBackendTest, InvalidEntry5) {
2051   ASSERT_TRUE(CopyTestCache("dirty_entry4"));
2052   SetNewEviction();
2053   SetMask(0x1);  // 2-entry table.
2054   SetMaxSize(0x3000);  // 12 kB.
2055   DisableFirstCleanup();
2056   InitCache();
2057 
2058   TrimDeletedListForTest(false);
2059 }
2060 
TEST_F(DiskCacheBackendTest,InvalidEntry6)2061 TEST_F(DiskCacheBackendTest, InvalidEntry6) {
2062   ASSERT_TRUE(CopyTestCache("dirty_entry5"));
2063   SetMask(0x1);  // 2-entry table.
2064   SetMaxSize(0x3000);  // 12 kB.
2065   DisableFirstCleanup();
2066   InitCache();
2067 
2068   // There is a dirty entry (but marked as clean) at the end, pointing to a
2069   // deleted entry through the hash collision list. We should not re-insert the
2070   // deleted entry into the index table.
2071 
2072   TrimForTest(false);
2073   // The cache should be clean (as detected by CheckCacheIntegrity).
2074 }
2075 
2076 // Tests that we don't hang when there is a loop on the hash collision list.
2077 // The test cache could be a result of bug 69135.
TEST_F(DiskCacheBackendTest,BadNextEntry1)2078 TEST_F(DiskCacheBackendTest, BadNextEntry1) {
2079   ASSERT_TRUE(CopyTestCache("list_loop2"));
2080   SetMask(0x1);  // 2-entry table.
2081   SetMaxSize(0x3000);  // 12 kB.
2082   DisableFirstCleanup();
2083   InitCache();
2084 
2085   // The second entry points at itselft, and the first entry is not accessible
2086   // though the index, but it is at the head of the LRU.
2087 
2088   disk_cache::Entry* entry;
2089   ASSERT_EQ(net::OK, CreateEntry("The first key", &entry));
2090   entry->Close();
2091 
2092   TrimForTest(false);
2093   TrimForTest(false);
2094   ASSERT_EQ(net::OK, OpenEntry("The first key", &entry));
2095   entry->Close();
2096   EXPECT_EQ(1, cache_->GetEntryCount());
2097 }
2098 
2099 // Tests that we don't hang when there is a loop on the hash collision list.
2100 // The test cache could be a result of bug 69135.
TEST_F(DiskCacheBackendTest,BadNextEntry2)2101 TEST_F(DiskCacheBackendTest, BadNextEntry2) {
2102   ASSERT_TRUE(CopyTestCache("list_loop3"));
2103   SetMask(0x1);  // 2-entry table.
2104   SetMaxSize(0x3000);  // 12 kB.
2105   DisableFirstCleanup();
2106   InitCache();
2107 
2108   // There is a wide loop of 5 entries.
2109 
2110   disk_cache::Entry* entry;
2111   ASSERT_NE(net::OK, OpenEntry("Not present key", &entry));
2112 }
2113 
TEST_F(DiskCacheBackendTest,NewEvictionInvalidEntry6)2114 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry6) {
2115   ASSERT_TRUE(CopyTestCache("bad_rankings3"));
2116   DisableFirstCleanup();
2117   SetNewEviction();
2118   InitCache();
2119 
2120   // The second entry is dirty, but removing it should not corrupt the list.
2121   disk_cache::Entry* entry;
2122   ASSERT_NE(net::OK, OpenEntry("the second key", &entry));
2123   ASSERT_EQ(net::OK, OpenEntry("the first key", &entry));
2124 
2125   // This should not delete the cache.
2126   entry->Doom();
2127   FlushQueueForTest();
2128   entry->Close();
2129 
2130   ASSERT_EQ(net::OK, OpenEntry("some other key", &entry));
2131   entry->Close();
2132 }
2133 
2134 // Tests handling of corrupt entries by keeping the rankings node around, with
2135 // a fatal failure.
BackendInvalidEntry7()2136 void DiskCacheBackendTest::BackendInvalidEntry7() {
2137   const int kSize = 0x3000;  // 12 kB.
2138   SetMaxSize(kSize * 10);
2139   InitCache();
2140 
2141   std::string first("some key");
2142   std::string second("something else");
2143   disk_cache::Entry* entry;
2144   ASSERT_EQ(net::OK, CreateEntry(first, &entry));
2145   entry->Close();
2146   ASSERT_EQ(net::OK, CreateEntry(second, &entry));
2147 
2148   // Corrupt this entry.
2149   disk_cache::EntryImpl* entry_impl =
2150       static_cast<disk_cache::EntryImpl*>(entry);
2151 
2152   entry_impl->rankings()->Data()->next = 0;
2153   entry_impl->rankings()->Store();
2154   entry->Close();
2155   FlushQueueForTest();
2156   EXPECT_EQ(2, cache_->GetEntryCount());
2157 
2158   // This should detect the bad entry.
2159   EXPECT_NE(net::OK, OpenEntry(second, &entry));
2160   EXPECT_EQ(1, cache_->GetEntryCount());
2161 
2162   // We should delete the cache. The list still has a corrupt node.
2163   void* iter = NULL;
2164   EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry));
2165   FlushQueueForTest();
2166   EXPECT_EQ(0, cache_->GetEntryCount());
2167 }
2168 
TEST_F(DiskCacheBackendTest,InvalidEntry7)2169 TEST_F(DiskCacheBackendTest, InvalidEntry7) {
2170   BackendInvalidEntry7();
2171 }
2172 
TEST_F(DiskCacheBackendTest,NewEvictionInvalidEntry7)2173 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry7) {
2174   SetNewEviction();
2175   BackendInvalidEntry7();
2176 }
2177 
2178 // Tests handling of corrupt entries by keeping the rankings node around, with
2179 // a non fatal failure.
BackendInvalidEntry8()2180 void DiskCacheBackendTest::BackendInvalidEntry8() {
2181   const int kSize = 0x3000;  // 12 kB
2182   SetMaxSize(kSize * 10);
2183   InitCache();
2184 
2185   std::string first("some key");
2186   std::string second("something else");
2187   disk_cache::Entry* entry;
2188   ASSERT_EQ(net::OK, CreateEntry(first, &entry));
2189   entry->Close();
2190   ASSERT_EQ(net::OK, CreateEntry(second, &entry));
2191 
2192   // Corrupt this entry.
2193   disk_cache::EntryImpl* entry_impl =
2194       static_cast<disk_cache::EntryImpl*>(entry);
2195 
2196   entry_impl->rankings()->Data()->contents = 0;
2197   entry_impl->rankings()->Store();
2198   entry->Close();
2199   FlushQueueForTest();
2200   EXPECT_EQ(2, cache_->GetEntryCount());
2201 
2202   // This should detect the bad entry.
2203   EXPECT_NE(net::OK, OpenEntry(second, &entry));
2204   EXPECT_EQ(1, cache_->GetEntryCount());
2205 
2206   // We should not delete the cache.
2207   void* iter = NULL;
2208   ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry));
2209   entry->Close();
2210   EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry));
2211   EXPECT_EQ(1, cache_->GetEntryCount());
2212 }
2213 
TEST_F(DiskCacheBackendTest,InvalidEntry8)2214 TEST_F(DiskCacheBackendTest, InvalidEntry8) {
2215   BackendInvalidEntry8();
2216 }
2217 
TEST_F(DiskCacheBackendTest,NewEvictionInvalidEntry8)2218 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry8) {
2219   SetNewEviction();
2220   BackendInvalidEntry8();
2221 }
2222 
2223 // Tests handling of corrupt entries detected by enumerations. Note that these
2224 // tests (xx9 to xx11) are basically just going though slightly different
2225 // codepaths so they are tighlty coupled with the code, but that is better than
2226 // not testing error handling code.
BackendInvalidEntry9(bool eviction)2227 void DiskCacheBackendTest::BackendInvalidEntry9(bool eviction) {
2228   const int kSize = 0x3000;  // 12 kB.
2229   SetMaxSize(kSize * 10);
2230   InitCache();
2231 
2232   std::string first("some key");
2233   std::string second("something else");
2234   disk_cache::Entry* entry;
2235   ASSERT_EQ(net::OK, CreateEntry(first, &entry));
2236   entry->Close();
2237   ASSERT_EQ(net::OK, CreateEntry(second, &entry));
2238 
2239   // Corrupt this entry.
2240   disk_cache::EntryImpl* entry_impl =
2241       static_cast<disk_cache::EntryImpl*>(entry);
2242 
2243   entry_impl->entry()->Data()->state = 0xbad;
2244   entry_impl->entry()->Store();
2245   entry->Close();
2246   FlushQueueForTest();
2247   EXPECT_EQ(2, cache_->GetEntryCount());
2248 
2249   if (eviction) {
2250     TrimForTest(false);
2251     EXPECT_EQ(1, cache_->GetEntryCount());
2252     TrimForTest(false);
2253     EXPECT_EQ(1, cache_->GetEntryCount());
2254   } else {
2255     // We should detect the problem through the list, but we should not delete
2256     // the entry, just fail the iteration.
2257     void* iter = NULL;
2258     EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry));
2259 
2260     // Now a full iteration will work, and return one entry.
2261     ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry));
2262     entry->Close();
2263     EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry));
2264 
2265     // This should detect what's left of the bad entry.
2266     EXPECT_NE(net::OK, OpenEntry(second, &entry));
2267     EXPECT_EQ(2, cache_->GetEntryCount());
2268   }
2269   DisableIntegrityCheck();
2270 }
2271 
TEST_F(DiskCacheBackendTest,InvalidEntry9)2272 TEST_F(DiskCacheBackendTest, InvalidEntry9) {
2273   BackendInvalidEntry9(false);
2274 }
2275 
TEST_F(DiskCacheBackendTest,NewEvictionInvalidEntry9)2276 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry9) {
2277   SetNewEviction();
2278   BackendInvalidEntry9(false);
2279 }
2280 
TEST_F(DiskCacheBackendTest,TrimInvalidEntry9)2281 TEST_F(DiskCacheBackendTest, TrimInvalidEntry9) {
2282   BackendInvalidEntry9(true);
2283 }
2284 
TEST_F(DiskCacheBackendTest,NewEvictionTrimInvalidEntry9)2285 TEST_F(DiskCacheBackendTest, NewEvictionTrimInvalidEntry9) {
2286   SetNewEviction();
2287   BackendInvalidEntry9(true);
2288 }
2289 
2290 // Tests handling of corrupt entries detected by enumerations.
BackendInvalidEntry10(bool eviction)2291 void DiskCacheBackendTest::BackendInvalidEntry10(bool eviction) {
2292   const int kSize = 0x3000;  // 12 kB.
2293   SetMaxSize(kSize * 10);
2294   SetNewEviction();
2295   InitCache();
2296 
2297   std::string first("some key");
2298   std::string second("something else");
2299   disk_cache::Entry* entry;
2300   ASSERT_EQ(net::OK, CreateEntry(first, &entry));
2301   entry->Close();
2302   ASSERT_EQ(net::OK, OpenEntry(first, &entry));
2303   EXPECT_EQ(0, WriteData(entry, 0, 200, NULL, 0, false));
2304   entry->Close();
2305   ASSERT_EQ(net::OK, CreateEntry(second, &entry));
2306 
2307   // Corrupt this entry.
2308   disk_cache::EntryImpl* entry_impl =
2309       static_cast<disk_cache::EntryImpl*>(entry);
2310 
2311   entry_impl->entry()->Data()->state = 0xbad;
2312   entry_impl->entry()->Store();
2313   entry->Close();
2314   ASSERT_EQ(net::OK, CreateEntry("third", &entry));
2315   entry->Close();
2316   EXPECT_EQ(3, cache_->GetEntryCount());
2317 
2318   // We have:
2319   // List 0: third -> second (bad).
2320   // List 1: first.
2321 
2322   if (eviction) {
2323     // Detection order: second -> first -> third.
2324     TrimForTest(false);
2325     EXPECT_EQ(3, cache_->GetEntryCount());
2326     TrimForTest(false);
2327     EXPECT_EQ(2, cache_->GetEntryCount());
2328     TrimForTest(false);
2329     EXPECT_EQ(1, cache_->GetEntryCount());
2330   } else {
2331     // Detection order: third -> second -> first.
2332     // We should detect the problem through the list, but we should not delete
2333     // the entry.
2334     void* iter = NULL;
2335     ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry));
2336     entry->Close();
2337     ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry));
2338     EXPECT_EQ(first, entry->GetKey());
2339     entry->Close();
2340     EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry));
2341   }
2342   DisableIntegrityCheck();
2343 }
2344 
TEST_F(DiskCacheBackendTest,InvalidEntry10)2345 TEST_F(DiskCacheBackendTest, InvalidEntry10) {
2346   BackendInvalidEntry10(false);
2347 }
2348 
TEST_F(DiskCacheBackendTest,TrimInvalidEntry10)2349 TEST_F(DiskCacheBackendTest, TrimInvalidEntry10) {
2350   BackendInvalidEntry10(true);
2351 }
2352 
2353 // Tests handling of corrupt entries detected by enumerations.
BackendInvalidEntry11(bool eviction)2354 void DiskCacheBackendTest::BackendInvalidEntry11(bool eviction) {
2355   const int kSize = 0x3000;  // 12 kB.
2356   SetMaxSize(kSize * 10);
2357   SetNewEviction();
2358   InitCache();
2359 
2360   std::string first("some key");
2361   std::string second("something else");
2362   disk_cache::Entry* entry;
2363   ASSERT_EQ(net::OK, CreateEntry(first, &entry));
2364   entry->Close();
2365   ASSERT_EQ(net::OK, OpenEntry(first, &entry));
2366   EXPECT_EQ(0, WriteData(entry, 0, 200, NULL, 0, false));
2367   entry->Close();
2368   ASSERT_EQ(net::OK, CreateEntry(second, &entry));
2369   entry->Close();
2370   ASSERT_EQ(net::OK, OpenEntry(second, &entry));
2371   EXPECT_EQ(0, WriteData(entry, 0, 200, NULL, 0, false));
2372 
2373   // Corrupt this entry.
2374   disk_cache::EntryImpl* entry_impl =
2375       static_cast<disk_cache::EntryImpl*>(entry);
2376 
2377   entry_impl->entry()->Data()->state = 0xbad;
2378   entry_impl->entry()->Store();
2379   entry->Close();
2380   ASSERT_EQ(net::OK, CreateEntry("third", &entry));
2381   entry->Close();
2382   FlushQueueForTest();
2383   EXPECT_EQ(3, cache_->GetEntryCount());
2384 
2385   // We have:
2386   // List 0: third.
2387   // List 1: second (bad) -> first.
2388 
2389   if (eviction) {
2390     // Detection order: third -> first -> second.
2391     TrimForTest(false);
2392     EXPECT_EQ(2, cache_->GetEntryCount());
2393     TrimForTest(false);
2394     EXPECT_EQ(1, cache_->GetEntryCount());
2395     TrimForTest(false);
2396     EXPECT_EQ(1, cache_->GetEntryCount());
2397   } else {
2398     // Detection order: third -> second.
2399     // We should detect the problem through the list, but we should not delete
2400     // the entry, just fail the iteration.
2401     void* iter = NULL;
2402     ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry));
2403     entry->Close();
2404     EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry));
2405 
2406     // Now a full iteration will work, and return two entries.
2407     ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry));
2408     entry->Close();
2409     ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry));
2410     entry->Close();
2411     EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry));
2412   }
2413   DisableIntegrityCheck();
2414 }
2415 
TEST_F(DiskCacheBackendTest,InvalidEntry11)2416 TEST_F(DiskCacheBackendTest, InvalidEntry11) {
2417   BackendInvalidEntry11(false);
2418 }
2419 
TEST_F(DiskCacheBackendTest,TrimInvalidEntry11)2420 TEST_F(DiskCacheBackendTest, TrimInvalidEntry11) {
2421   BackendInvalidEntry11(true);
2422 }
2423 
2424 // Tests handling of corrupt entries in the middle of a long eviction run.
BackendTrimInvalidEntry12()2425 void DiskCacheBackendTest::BackendTrimInvalidEntry12() {
2426   const int kSize = 0x3000;  // 12 kB
2427   SetMaxSize(kSize * 10);
2428   InitCache();
2429 
2430   std::string first("some key");
2431   std::string second("something else");
2432   disk_cache::Entry* entry;
2433   ASSERT_EQ(net::OK, CreateEntry(first, &entry));
2434   entry->Close();
2435   ASSERT_EQ(net::OK, CreateEntry(second, &entry));
2436 
2437   // Corrupt this entry.
2438   disk_cache::EntryImpl* entry_impl =
2439       static_cast<disk_cache::EntryImpl*>(entry);
2440 
2441   entry_impl->entry()->Data()->state = 0xbad;
2442   entry_impl->entry()->Store();
2443   entry->Close();
2444   ASSERT_EQ(net::OK, CreateEntry("third", &entry));
2445   entry->Close();
2446   ASSERT_EQ(net::OK, CreateEntry("fourth", &entry));
2447   TrimForTest(true);
2448   EXPECT_EQ(1, cache_->GetEntryCount());
2449   entry->Close();
2450   DisableIntegrityCheck();
2451 }
2452 
TEST_F(DiskCacheBackendTest,TrimInvalidEntry12)2453 TEST_F(DiskCacheBackendTest, TrimInvalidEntry12) {
2454   BackendTrimInvalidEntry12();
2455 }
2456 
TEST_F(DiskCacheBackendTest,NewEvictionTrimInvalidEntry12)2457 TEST_F(DiskCacheBackendTest, NewEvictionTrimInvalidEntry12) {
2458   SetNewEviction();
2459   BackendTrimInvalidEntry12();
2460 }
2461 
2462 // We want to be able to deal with messed up entries on disk.
BackendInvalidRankings2()2463 void DiskCacheBackendTest::BackendInvalidRankings2() {
2464   ASSERT_TRUE(CopyTestCache("bad_rankings"));
2465   DisableFirstCleanup();
2466   InitCache();
2467 
2468   disk_cache::Entry *entry1, *entry2;
2469   EXPECT_NE(net::OK, OpenEntry("the first key", &entry1));
2470   ASSERT_EQ(net::OK, OpenEntry("some other key", &entry2));
2471   entry2->Close();
2472 
2473   // CheckCacheIntegrity will fail at this point.
2474   DisableIntegrityCheck();
2475 }
2476 
TEST_F(DiskCacheBackendTest,InvalidRankings2)2477 TEST_F(DiskCacheBackendTest, InvalidRankings2) {
2478   BackendInvalidRankings2();
2479 }
2480 
TEST_F(DiskCacheBackendTest,NewEvictionInvalidRankings2)2481 TEST_F(DiskCacheBackendTest, NewEvictionInvalidRankings2) {
2482   SetNewEviction();
2483   BackendInvalidRankings2();
2484 }
2485 
2486 // If the LRU is corrupt, we delete the cache.
BackendInvalidRankings()2487 void DiskCacheBackendTest::BackendInvalidRankings() {
2488   disk_cache::Entry* entry;
2489   void* iter = NULL;
2490   ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry));
2491   entry->Close();
2492   EXPECT_EQ(2, cache_->GetEntryCount());
2493 
2494   EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry));
2495   FlushQueueForTest();  // Allow the restart to finish.
2496   EXPECT_EQ(0, cache_->GetEntryCount());
2497 }
2498 
TEST_F(DiskCacheBackendTest,InvalidRankingsSuccess)2499 TEST_F(DiskCacheBackendTest, InvalidRankingsSuccess) {
2500   ASSERT_TRUE(CopyTestCache("bad_rankings"));
2501   DisableFirstCleanup();
2502   InitCache();
2503   BackendInvalidRankings();
2504 }
2505 
TEST_F(DiskCacheBackendTest,NewEvictionInvalidRankingsSuccess)2506 TEST_F(DiskCacheBackendTest, NewEvictionInvalidRankingsSuccess) {
2507   ASSERT_TRUE(CopyTestCache("bad_rankings"));
2508   DisableFirstCleanup();
2509   SetNewEviction();
2510   InitCache();
2511   BackendInvalidRankings();
2512 }
2513 
TEST_F(DiskCacheBackendTest,InvalidRankingsFailure)2514 TEST_F(DiskCacheBackendTest, InvalidRankingsFailure) {
2515   ASSERT_TRUE(CopyTestCache("bad_rankings"));
2516   DisableFirstCleanup();
2517   InitCache();
2518   SetTestMode();  // Fail cache reinitialization.
2519   BackendInvalidRankings();
2520 }
2521 
TEST_F(DiskCacheBackendTest,NewEvictionInvalidRankingsFailure)2522 TEST_F(DiskCacheBackendTest, NewEvictionInvalidRankingsFailure) {
2523   ASSERT_TRUE(CopyTestCache("bad_rankings"));
2524   DisableFirstCleanup();
2525   SetNewEviction();
2526   InitCache();
2527   SetTestMode();  // Fail cache reinitialization.
2528   BackendInvalidRankings();
2529 }
2530 
2531 // If the LRU is corrupt and we have open entries, we disable the cache.
BackendDisable()2532 void DiskCacheBackendTest::BackendDisable() {
2533   disk_cache::Entry *entry1, *entry2;
2534   void* iter = NULL;
2535   ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry1));
2536 
2537   EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry2));
2538   EXPECT_EQ(0, cache_->GetEntryCount());
2539   EXPECT_NE(net::OK, CreateEntry("Something new", &entry2));
2540 
2541   entry1->Close();
2542   FlushQueueForTest();  // Flushing the Close posts a task to restart the cache.
2543   FlushQueueForTest();  // This one actually allows that task to complete.
2544 
2545   EXPECT_EQ(0, cache_->GetEntryCount());
2546 }
2547 
TEST_F(DiskCacheBackendTest,DisableSuccess)2548 TEST_F(DiskCacheBackendTest, DisableSuccess) {
2549   ASSERT_TRUE(CopyTestCache("bad_rankings"));
2550   DisableFirstCleanup();
2551   InitCache();
2552   BackendDisable();
2553 }
2554 
TEST_F(DiskCacheBackendTest,NewEvictionDisableSuccess)2555 TEST_F(DiskCacheBackendTest, NewEvictionDisableSuccess) {
2556   ASSERT_TRUE(CopyTestCache("bad_rankings"));
2557   DisableFirstCleanup();
2558   SetNewEviction();
2559   InitCache();
2560   BackendDisable();
2561 }
2562 
TEST_F(DiskCacheBackendTest,DisableFailure)2563 TEST_F(DiskCacheBackendTest, DisableFailure) {
2564   ASSERT_TRUE(CopyTestCache("bad_rankings"));
2565   DisableFirstCleanup();
2566   InitCache();
2567   SetTestMode();  // Fail cache reinitialization.
2568   BackendDisable();
2569 }
2570 
TEST_F(DiskCacheBackendTest,NewEvictionDisableFailure)2571 TEST_F(DiskCacheBackendTest, NewEvictionDisableFailure) {
2572   ASSERT_TRUE(CopyTestCache("bad_rankings"));
2573   DisableFirstCleanup();
2574   SetNewEviction();
2575   InitCache();
2576   SetTestMode();  // Fail cache reinitialization.
2577   BackendDisable();
2578 }
2579 
2580 // This is another type of corruption on the LRU; disable the cache.
BackendDisable2()2581 void DiskCacheBackendTest::BackendDisable2() {
2582   EXPECT_EQ(8, cache_->GetEntryCount());
2583 
2584   disk_cache::Entry* entry;
2585   void* iter = NULL;
2586   int count = 0;
2587   while (OpenNextEntry(&iter, &entry) == net::OK) {
2588     ASSERT_TRUE(NULL != entry);
2589     entry->Close();
2590     count++;
2591     ASSERT_LT(count, 9);
2592   };
2593 
2594   FlushQueueForTest();
2595   EXPECT_EQ(0, cache_->GetEntryCount());
2596 }
2597 
TEST_F(DiskCacheBackendTest,DisableSuccess2)2598 TEST_F(DiskCacheBackendTest, DisableSuccess2) {
2599   ASSERT_TRUE(CopyTestCache("list_loop"));
2600   DisableFirstCleanup();
2601   InitCache();
2602   BackendDisable2();
2603 }
2604 
TEST_F(DiskCacheBackendTest,NewEvictionDisableSuccess2)2605 TEST_F(DiskCacheBackendTest, NewEvictionDisableSuccess2) {
2606   ASSERT_TRUE(CopyTestCache("list_loop"));
2607   DisableFirstCleanup();
2608   SetNewEviction();
2609   InitCache();
2610   BackendDisable2();
2611 }
2612 
TEST_F(DiskCacheBackendTest,DisableFailure2)2613 TEST_F(DiskCacheBackendTest, DisableFailure2) {
2614   ASSERT_TRUE(CopyTestCache("list_loop"));
2615   DisableFirstCleanup();
2616   InitCache();
2617   SetTestMode();  // Fail cache reinitialization.
2618   BackendDisable2();
2619 }
2620 
TEST_F(DiskCacheBackendTest,NewEvictionDisableFailure2)2621 TEST_F(DiskCacheBackendTest, NewEvictionDisableFailure2) {
2622   ASSERT_TRUE(CopyTestCache("list_loop"));
2623   DisableFirstCleanup();
2624   SetNewEviction();
2625   InitCache();
2626   SetTestMode();  // Fail cache reinitialization.
2627   BackendDisable2();
2628 }
2629 
2630 // If the index size changes when we disable the cache, we should not crash.
BackendDisable3()2631 void DiskCacheBackendTest::BackendDisable3() {
2632   disk_cache::Entry *entry1, *entry2;
2633   void* iter = NULL;
2634   EXPECT_EQ(2, cache_->GetEntryCount());
2635   ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry1));
2636   entry1->Close();
2637 
2638   EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry2));
2639   FlushQueueForTest();
2640 
2641   ASSERT_EQ(net::OK, CreateEntry("Something new", &entry2));
2642   entry2->Close();
2643 
2644   EXPECT_EQ(1, cache_->GetEntryCount());
2645 }
2646 
TEST_F(DiskCacheBackendTest,DisableSuccess3)2647 TEST_F(DiskCacheBackendTest, DisableSuccess3) {
2648   ASSERT_TRUE(CopyTestCache("bad_rankings2"));
2649   DisableFirstCleanup();
2650   SetMaxSize(20 * 1024 * 1024);
2651   InitCache();
2652   BackendDisable3();
2653 }
2654 
TEST_F(DiskCacheBackendTest,NewEvictionDisableSuccess3)2655 TEST_F(DiskCacheBackendTest, NewEvictionDisableSuccess3) {
2656   ASSERT_TRUE(CopyTestCache("bad_rankings2"));
2657   DisableFirstCleanup();
2658   SetMaxSize(20 * 1024 * 1024);
2659   SetNewEviction();
2660   InitCache();
2661   BackendDisable3();
2662 }
2663 
2664 // If we disable the cache, already open entries should work as far as possible.
BackendDisable4()2665 void DiskCacheBackendTest::BackendDisable4() {
2666   disk_cache::Entry *entry1, *entry2, *entry3, *entry4;
2667   void* iter = NULL;
2668   ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry1));
2669 
2670   char key2[2000];
2671   char key3[20000];
2672   CacheTestFillBuffer(key2, sizeof(key2), true);
2673   CacheTestFillBuffer(key3, sizeof(key3), true);
2674   key2[sizeof(key2) - 1] = '\0';
2675   key3[sizeof(key3) - 1] = '\0';
2676   ASSERT_EQ(net::OK, CreateEntry(key2, &entry2));
2677   ASSERT_EQ(net::OK, CreateEntry(key3, &entry3));
2678 
2679   const int kBufSize = 20000;
2680   scoped_refptr<net::IOBuffer> buf(new net::IOBuffer(kBufSize));
2681   memset(buf->data(), 0, kBufSize);
2682   EXPECT_EQ(100, WriteData(entry2, 0, 0, buf.get(), 100, false));
2683   EXPECT_EQ(kBufSize, WriteData(entry3, 0, 0, buf.get(), kBufSize, false));
2684 
2685   // This line should disable the cache but not delete it.
2686   EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry4));
2687   EXPECT_EQ(0, cache_->GetEntryCount());
2688 
2689   EXPECT_NE(net::OK, CreateEntry("cache is disabled", &entry4));
2690 
2691   EXPECT_EQ(100, ReadData(entry2, 0, 0, buf.get(), 100));
2692   EXPECT_EQ(100, WriteData(entry2, 0, 0, buf.get(), 100, false));
2693   EXPECT_EQ(100, WriteData(entry2, 1, 0, buf.get(), 100, false));
2694 
2695   EXPECT_EQ(kBufSize, ReadData(entry3, 0, 0, buf.get(), kBufSize));
2696   EXPECT_EQ(kBufSize, WriteData(entry3, 0, 0, buf.get(), kBufSize, false));
2697   EXPECT_EQ(kBufSize, WriteData(entry3, 1, 0, buf.get(), kBufSize, false));
2698 
2699   std::string key = entry2->GetKey();
2700   EXPECT_EQ(sizeof(key2) - 1, key.size());
2701   key = entry3->GetKey();
2702   EXPECT_EQ(sizeof(key3) - 1, key.size());
2703 
2704   entry1->Close();
2705   entry2->Close();
2706   entry3->Close();
2707   FlushQueueForTest();  // Flushing the Close posts a task to restart the cache.
2708   FlushQueueForTest();  // This one actually allows that task to complete.
2709 
2710   EXPECT_EQ(0, cache_->GetEntryCount());
2711 }
2712 
TEST_F(DiskCacheBackendTest,DisableSuccess4)2713 TEST_F(DiskCacheBackendTest, DisableSuccess4) {
2714   ASSERT_TRUE(CopyTestCache("bad_rankings"));
2715   DisableFirstCleanup();
2716   InitCache();
2717   BackendDisable4();
2718 }
2719 
TEST_F(DiskCacheBackendTest,NewEvictionDisableSuccess4)2720 TEST_F(DiskCacheBackendTest, NewEvictionDisableSuccess4) {
2721   ASSERT_TRUE(CopyTestCache("bad_rankings"));
2722   DisableFirstCleanup();
2723   SetNewEviction();
2724   InitCache();
2725   BackendDisable4();
2726 }
2727 
TEST_F(DiskCacheTest,Backend_UsageStatsTimer)2728 TEST_F(DiskCacheTest, Backend_UsageStatsTimer) {
2729   MessageLoopHelper helper;
2730 
2731   ASSERT_TRUE(CleanupCacheDir());
2732   scoped_ptr<disk_cache::BackendImpl> cache;
2733   cache.reset(new disk_cache::BackendImpl(
2734       cache_path_, base::MessageLoopProxy::current().get(), NULL));
2735   ASSERT_TRUE(NULL != cache.get());
2736   cache->SetUnitTestMode();
2737   ASSERT_EQ(net::OK, cache->SyncInit());
2738 
2739   // Wait for a callback that never comes... about 2 secs :). The message loop
2740   // has to run to allow invocation of the usage timer.
2741   helper.WaitUntilCacheIoFinished(1);
2742 }
2743 
TEST_F(DiskCacheBackendTest,TimerNotCreated)2744 TEST_F(DiskCacheBackendTest, TimerNotCreated) {
2745   ASSERT_TRUE(CopyTestCache("wrong_version"));
2746 
2747   scoped_ptr<disk_cache::BackendImpl> cache;
2748   cache.reset(new disk_cache::BackendImpl(
2749       cache_path_, base::MessageLoopProxy::current().get(), NULL));
2750   ASSERT_TRUE(NULL != cache.get());
2751   cache->SetUnitTestMode();
2752   ASSERT_NE(net::OK, cache->SyncInit());
2753 
2754   ASSERT_TRUE(NULL == cache->GetTimerForTest());
2755 
2756   DisableIntegrityCheck();
2757 }
2758 
TEST_F(DiskCacheBackendTest,Backend_UsageStats)2759 TEST_F(DiskCacheBackendTest, Backend_UsageStats) {
2760   InitCache();
2761   disk_cache::Entry* entry;
2762   ASSERT_EQ(net::OK, CreateEntry("key", &entry));
2763   entry->Close();
2764   FlushQueueForTest();
2765 
2766   disk_cache::StatsItems stats;
2767   cache_->GetStats(&stats);
2768   EXPECT_FALSE(stats.empty());
2769 
2770   disk_cache::StatsItems::value_type hits("Create hit", "0x1");
2771   EXPECT_EQ(1, std::count(stats.begin(), stats.end(), hits));
2772 
2773   cache_.reset();
2774 
2775   // Now open the cache and verify that the stats are still there.
2776   DisableFirstCleanup();
2777   InitCache();
2778   EXPECT_EQ(1, cache_->GetEntryCount());
2779 
2780   stats.clear();
2781   cache_->GetStats(&stats);
2782   EXPECT_FALSE(stats.empty());
2783 
2784   EXPECT_EQ(1, std::count(stats.begin(), stats.end(), hits));
2785 }
2786 
BackendDoomAll()2787 void DiskCacheBackendTest::BackendDoomAll() {
2788   InitCache();
2789 
2790   disk_cache::Entry *entry1, *entry2;
2791   ASSERT_EQ(net::OK, CreateEntry("first", &entry1));
2792   ASSERT_EQ(net::OK, CreateEntry("second", &entry2));
2793   entry1->Close();
2794   entry2->Close();
2795 
2796   ASSERT_EQ(net::OK, CreateEntry("third", &entry1));
2797   ASSERT_EQ(net::OK, CreateEntry("fourth", &entry2));
2798 
2799   ASSERT_EQ(4, cache_->GetEntryCount());
2800   EXPECT_EQ(net::OK, DoomAllEntries());
2801   ASSERT_EQ(0, cache_->GetEntryCount());
2802 
2803   // We should stop posting tasks at some point (if we post any).
2804   base::MessageLoop::current()->RunUntilIdle();
2805 
2806   disk_cache::Entry *entry3, *entry4;
2807   EXPECT_NE(net::OK, OpenEntry("third", &entry3));
2808   ASSERT_EQ(net::OK, CreateEntry("third", &entry3));
2809   ASSERT_EQ(net::OK, CreateEntry("fourth", &entry4));
2810 
2811   EXPECT_EQ(net::OK, DoomAllEntries());
2812   ASSERT_EQ(0, cache_->GetEntryCount());
2813 
2814   entry1->Close();
2815   entry2->Close();
2816   entry3->Doom();  // The entry should be already doomed, but this must work.
2817   entry3->Close();
2818   entry4->Close();
2819 
2820   // Now try with all references released.
2821   ASSERT_EQ(net::OK, CreateEntry("third", &entry1));
2822   ASSERT_EQ(net::OK, CreateEntry("fourth", &entry2));
2823   entry1->Close();
2824   entry2->Close();
2825 
2826   ASSERT_EQ(2, cache_->GetEntryCount());
2827   EXPECT_EQ(net::OK, DoomAllEntries());
2828   ASSERT_EQ(0, cache_->GetEntryCount());
2829 
2830   EXPECT_EQ(net::OK, DoomAllEntries());
2831 }
2832 
TEST_F(DiskCacheBackendTest,DoomAll)2833 TEST_F(DiskCacheBackendTest, DoomAll) {
2834   BackendDoomAll();
2835 }
2836 
TEST_F(DiskCacheBackendTest,NewEvictionDoomAll)2837 TEST_F(DiskCacheBackendTest, NewEvictionDoomAll) {
2838   SetNewEviction();
2839   BackendDoomAll();
2840 }
2841 
TEST_F(DiskCacheBackendTest,MemoryOnlyDoomAll)2842 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomAll) {
2843   SetMemoryOnlyMode();
2844   BackendDoomAll();
2845 }
2846 
TEST_F(DiskCacheBackendTest,AppCacheOnlyDoomAll)2847 TEST_F(DiskCacheBackendTest, AppCacheOnlyDoomAll) {
2848   SetCacheType(net::APP_CACHE);
2849   BackendDoomAll();
2850 }
2851 
TEST_F(DiskCacheBackendTest,ShaderCacheOnlyDoomAll)2852 TEST_F(DiskCacheBackendTest, ShaderCacheOnlyDoomAll) {
2853   SetCacheType(net::SHADER_CACHE);
2854   BackendDoomAll();
2855 }
2856 
2857 // If the index size changes when we doom the cache, we should not crash.
BackendDoomAll2()2858 void DiskCacheBackendTest::BackendDoomAll2() {
2859   EXPECT_EQ(2, cache_->GetEntryCount());
2860   EXPECT_EQ(net::OK, DoomAllEntries());
2861 
2862   disk_cache::Entry* entry;
2863   ASSERT_EQ(net::OK, CreateEntry("Something new", &entry));
2864   entry->Close();
2865 
2866   EXPECT_EQ(1, cache_->GetEntryCount());
2867 }
2868 
TEST_F(DiskCacheBackendTest,DoomAll2)2869 TEST_F(DiskCacheBackendTest, DoomAll2) {
2870   ASSERT_TRUE(CopyTestCache("bad_rankings2"));
2871   DisableFirstCleanup();
2872   SetMaxSize(20 * 1024 * 1024);
2873   InitCache();
2874   BackendDoomAll2();
2875 }
2876 
TEST_F(DiskCacheBackendTest,NewEvictionDoomAll2)2877 TEST_F(DiskCacheBackendTest, NewEvictionDoomAll2) {
2878   ASSERT_TRUE(CopyTestCache("bad_rankings2"));
2879   DisableFirstCleanup();
2880   SetMaxSize(20 * 1024 * 1024);
2881   SetNewEviction();
2882   InitCache();
2883   BackendDoomAll2();
2884 }
2885 
2886 // We should be able to create the same entry on multiple simultaneous instances
2887 // of the cache.
TEST_F(DiskCacheTest,MultipleInstances)2888 TEST_F(DiskCacheTest, MultipleInstances) {
2889   base::ScopedTempDir store1, store2;
2890   ASSERT_TRUE(store1.CreateUniqueTempDir());
2891   ASSERT_TRUE(store2.CreateUniqueTempDir());
2892 
2893   base::Thread cache_thread("CacheThread");
2894   ASSERT_TRUE(cache_thread.StartWithOptions(
2895       base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
2896   net::TestCompletionCallback cb;
2897 
2898   const int kNumberOfCaches = 2;
2899   scoped_ptr<disk_cache::Backend> cache[kNumberOfCaches];
2900 
2901   int rv =
2902       disk_cache::CreateCacheBackend(net::DISK_CACHE,
2903                                      net::CACHE_BACKEND_DEFAULT,
2904                                      store1.path(),
2905                                      0,
2906                                      false,
2907                                      cache_thread.message_loop_proxy().get(),
2908                                      NULL,
2909                                      &cache[0],
2910                                      cb.callback());
2911   ASSERT_EQ(net::OK, cb.GetResult(rv));
2912   rv = disk_cache::CreateCacheBackend(net::MEDIA_CACHE,
2913                                       net::CACHE_BACKEND_DEFAULT,
2914                                       store2.path(),
2915                                       0,
2916                                       false,
2917                                       cache_thread.message_loop_proxy().get(),
2918                                       NULL,
2919                                       &cache[1],
2920                                       cb.callback());
2921   ASSERT_EQ(net::OK, cb.GetResult(rv));
2922 
2923   ASSERT_TRUE(cache[0].get() != NULL && cache[1].get() != NULL);
2924 
2925   std::string key("the first key");
2926   disk_cache::Entry* entry;
2927   for (int i = 0; i < kNumberOfCaches; i++) {
2928     rv = cache[i]->CreateEntry(key, &entry, cb.callback());
2929     ASSERT_EQ(net::OK, cb.GetResult(rv));
2930     entry->Close();
2931   }
2932 }
2933 
2934 // Test the six regions of the curve that determines the max cache size.
TEST_F(DiskCacheTest,AutomaticMaxSize)2935 TEST_F(DiskCacheTest, AutomaticMaxSize) {
2936   using disk_cache::kDefaultCacheSize;
2937   int64 large_size = kDefaultCacheSize;
2938 
2939   // Region 1: expected = available * 0.8
2940   EXPECT_EQ((kDefaultCacheSize - 1) * 8 / 10,
2941             disk_cache::PreferredCacheSize(large_size - 1));
2942   EXPECT_EQ(kDefaultCacheSize * 8 / 10,
2943             disk_cache::PreferredCacheSize(large_size));
2944   EXPECT_EQ(kDefaultCacheSize - 1,
2945             disk_cache::PreferredCacheSize(large_size * 10 / 8 - 1));
2946 
2947   // Region 2: expected = default_size
2948   EXPECT_EQ(kDefaultCacheSize,
2949             disk_cache::PreferredCacheSize(large_size * 10 / 8));
2950   EXPECT_EQ(kDefaultCacheSize,
2951             disk_cache::PreferredCacheSize(large_size * 10 - 1));
2952 
2953   // Region 3: expected = available * 0.1
2954   EXPECT_EQ(kDefaultCacheSize,
2955             disk_cache::PreferredCacheSize(large_size * 10));
2956   EXPECT_EQ((kDefaultCacheSize * 25 - 1) / 10,
2957             disk_cache::PreferredCacheSize(large_size * 25 - 1));
2958 
2959   // Region 4: expected = default_size * 2.5
2960   EXPECT_EQ(kDefaultCacheSize * 25 / 10,
2961             disk_cache::PreferredCacheSize(large_size * 25));
2962   EXPECT_EQ(kDefaultCacheSize * 25 / 10,
2963             disk_cache::PreferredCacheSize(large_size * 100 - 1));
2964   EXPECT_EQ(kDefaultCacheSize * 25 / 10,
2965             disk_cache::PreferredCacheSize(large_size * 100));
2966   EXPECT_EQ(kDefaultCacheSize * 25 / 10,
2967             disk_cache::PreferredCacheSize(large_size * 250 - 1));
2968 
2969   // Region 5: expected = available * 0.1
2970   int64 largest_size = kDefaultCacheSize * 4;
2971   EXPECT_EQ(kDefaultCacheSize * 25 / 10,
2972             disk_cache::PreferredCacheSize(large_size * 250));
2973   EXPECT_EQ(largest_size - 1,
2974             disk_cache::PreferredCacheSize(largest_size * 100 - 1));
2975 
2976   // Region 6: expected = largest possible size
2977   EXPECT_EQ(largest_size,
2978             disk_cache::PreferredCacheSize(largest_size * 100));
2979   EXPECT_EQ(largest_size,
2980             disk_cache::PreferredCacheSize(largest_size * 10000));
2981 }
2982 
2983 // Tests that we can "migrate" a running instance from one experiment group to
2984 // another.
TEST_F(DiskCacheBackendTest,Histograms)2985 TEST_F(DiskCacheBackendTest, Histograms) {
2986   InitCache();
2987   disk_cache::BackendImpl* backend_ = cache_impl_;  // Needed be the macro.
2988 
2989   for (int i = 1; i < 3; i++) {
2990     CACHE_UMA(HOURS, "FillupTime", i, 28);
2991   }
2992 }
2993 
2994 // Make sure that we keep the total memory used by the internal buffers under
2995 // control.
TEST_F(DiskCacheBackendTest,TotalBuffersSize1)2996 TEST_F(DiskCacheBackendTest, TotalBuffersSize1) {
2997   InitCache();
2998   std::string key("the first key");
2999   disk_cache::Entry* entry;
3000   ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3001 
3002   const int kSize = 200;
3003   scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
3004   CacheTestFillBuffer(buffer->data(), kSize, true);
3005 
3006   for (int i = 0; i < 10; i++) {
3007     SCOPED_TRACE(i);
3008     // Allocate 2MB for this entry.
3009     EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, true));
3010     EXPECT_EQ(kSize, WriteData(entry, 1, 0, buffer.get(), kSize, true));
3011     EXPECT_EQ(kSize,
3012               WriteData(entry, 0, 1024 * 1024, buffer.get(), kSize, false));
3013     EXPECT_EQ(kSize,
3014               WriteData(entry, 1, 1024 * 1024, buffer.get(), kSize, false));
3015 
3016     // Delete one of the buffers and truncate the other.
3017     EXPECT_EQ(0, WriteData(entry, 0, 0, buffer.get(), 0, true));
3018     EXPECT_EQ(0, WriteData(entry, 1, 10, buffer.get(), 0, true));
3019 
3020     // Delete the second buffer, writing 10 bytes to disk.
3021     entry->Close();
3022     ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3023   }
3024 
3025   entry->Close();
3026   EXPECT_EQ(0, cache_impl_->GetTotalBuffersSize());
3027 }
3028 
3029 // This test assumes at least 150MB of system memory.
TEST_F(DiskCacheBackendTest,TotalBuffersSize2)3030 TEST_F(DiskCacheBackendTest, TotalBuffersSize2) {
3031   InitCache();
3032 
3033   const int kOneMB = 1024 * 1024;
3034   EXPECT_TRUE(cache_impl_->IsAllocAllowed(0, kOneMB));
3035   EXPECT_EQ(kOneMB, cache_impl_->GetTotalBuffersSize());
3036 
3037   EXPECT_TRUE(cache_impl_->IsAllocAllowed(0, kOneMB));
3038   EXPECT_EQ(kOneMB * 2, cache_impl_->GetTotalBuffersSize());
3039 
3040   EXPECT_TRUE(cache_impl_->IsAllocAllowed(0, kOneMB));
3041   EXPECT_EQ(kOneMB * 3, cache_impl_->GetTotalBuffersSize());
3042 
3043   cache_impl_->BufferDeleted(kOneMB);
3044   EXPECT_EQ(kOneMB * 2, cache_impl_->GetTotalBuffersSize());
3045 
3046   // Check the upper limit.
3047   EXPECT_FALSE(cache_impl_->IsAllocAllowed(0, 30 * kOneMB));
3048 
3049   for (int i = 0; i < 30; i++)
3050     cache_impl_->IsAllocAllowed(0, kOneMB);  // Ignore the result.
3051 
3052   EXPECT_FALSE(cache_impl_->IsAllocAllowed(0, kOneMB));
3053 }
3054 
3055 // Tests that sharing of external files works and we are able to delete the
3056 // files when we need to.
TEST_F(DiskCacheBackendTest,FileSharing)3057 TEST_F(DiskCacheBackendTest, FileSharing) {
3058   InitCache();
3059 
3060   disk_cache::Addr address(0x80000001);
3061   ASSERT_TRUE(cache_impl_->CreateExternalFile(&address));
3062   base::FilePath name = cache_impl_->GetFileName(address);
3063 
3064   scoped_refptr<disk_cache::File> file(new disk_cache::File(false));
3065   file->Init(name);
3066 
3067 #if defined(OS_WIN)
3068   DWORD sharing = FILE_SHARE_READ | FILE_SHARE_WRITE;
3069   DWORD access = GENERIC_READ | GENERIC_WRITE;
3070   base::win::ScopedHandle file2(CreateFile(
3071       name.value().c_str(), access, sharing, NULL, OPEN_EXISTING, 0, NULL));
3072   EXPECT_FALSE(file2.IsValid());
3073 
3074   sharing |= FILE_SHARE_DELETE;
3075   file2.Set(CreateFile(name.value().c_str(), access, sharing, NULL,
3076                        OPEN_EXISTING, 0, NULL));
3077   EXPECT_TRUE(file2.IsValid());
3078 #endif
3079 
3080   EXPECT_TRUE(base::DeleteFile(name, false));
3081 
3082   // We should be able to use the file.
3083   const int kSize = 200;
3084   char buffer1[kSize];
3085   char buffer2[kSize];
3086   memset(buffer1, 't', kSize);
3087   memset(buffer2, 0, kSize);
3088   EXPECT_TRUE(file->Write(buffer1, kSize, 0));
3089   EXPECT_TRUE(file->Read(buffer2, kSize, 0));
3090   EXPECT_EQ(0, memcmp(buffer1, buffer2, kSize));
3091 
3092   EXPECT_TRUE(disk_cache::DeleteCacheFile(name));
3093 }
3094 
TEST_F(DiskCacheBackendTest,UpdateRankForExternalCacheHit)3095 TEST_F(DiskCacheBackendTest, UpdateRankForExternalCacheHit) {
3096   InitCache();
3097 
3098   disk_cache::Entry* entry;
3099 
3100   for (int i = 0; i < 2; ++i) {
3101     std::string key = base::StringPrintf("key%d", i);
3102     ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3103     entry->Close();
3104   }
3105 
3106   // Ping the oldest entry.
3107   cache_->OnExternalCacheHit("key0");
3108 
3109   TrimForTest(false);
3110 
3111   // Make sure the older key remains.
3112   EXPECT_EQ(1, cache_->GetEntryCount());
3113   ASSERT_EQ(net::OK, OpenEntry("key0", &entry));
3114   entry->Close();
3115 }
3116 
TEST_F(DiskCacheBackendTest,ShaderCacheUpdateRankForExternalCacheHit)3117 TEST_F(DiskCacheBackendTest, ShaderCacheUpdateRankForExternalCacheHit) {
3118   SetCacheType(net::SHADER_CACHE);
3119   InitCache();
3120 
3121   disk_cache::Entry* entry;
3122 
3123   for (int i = 0; i < 2; ++i) {
3124     std::string key = base::StringPrintf("key%d", i);
3125     ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3126     entry->Close();
3127   }
3128 
3129   // Ping the oldest entry.
3130   cache_->OnExternalCacheHit("key0");
3131 
3132   TrimForTest(false);
3133 
3134   // Make sure the older key remains.
3135   EXPECT_EQ(1, cache_->GetEntryCount());
3136   ASSERT_EQ(net::OK, OpenEntry("key0", &entry));
3137   entry->Close();
3138 }
3139 
TracingBackendBasics()3140 void DiskCacheBackendTest::TracingBackendBasics() {
3141   InitCache();
3142   cache_.reset(new disk_cache::TracingCacheBackend(cache_.Pass()));
3143   cache_impl_ = NULL;
3144   EXPECT_EQ(net::DISK_CACHE, cache_->GetCacheType());
3145   if (!simple_cache_mode_) {
3146     EXPECT_EQ(0, cache_->GetEntryCount());
3147   }
3148 
3149   net::TestCompletionCallback cb;
3150   disk_cache::Entry* entry = NULL;
3151   EXPECT_NE(net::OK, OpenEntry("key", &entry));
3152   EXPECT_TRUE(NULL == entry);
3153 
3154   ASSERT_EQ(net::OK, CreateEntry("key", &entry));
3155   EXPECT_TRUE(NULL != entry);
3156 
3157   disk_cache::Entry* same_entry = NULL;
3158   ASSERT_EQ(net::OK, OpenEntry("key", &same_entry));
3159   EXPECT_TRUE(NULL != same_entry);
3160 
3161   if (!simple_cache_mode_) {
3162     EXPECT_EQ(1, cache_->GetEntryCount());
3163   }
3164   entry->Close();
3165   entry = NULL;
3166   same_entry->Close();
3167   same_entry = NULL;
3168 }
3169 
TEST_F(DiskCacheBackendTest,TracingBackendBasics)3170 TEST_F(DiskCacheBackendTest, TracingBackendBasics) {
3171   TracingBackendBasics();
3172 }
3173 
3174 // The Simple Cache backend requires a few guarantees from the filesystem like
3175 // atomic renaming of recently open files. Those guarantees are not provided in
3176 // general on Windows.
3177 #if defined(OS_POSIX)
3178 
TEST_F(DiskCacheBackendTest,SimpleCacheShutdownWithPendingCreate)3179 TEST_F(DiskCacheBackendTest, SimpleCacheShutdownWithPendingCreate) {
3180   SetCacheType(net::APP_CACHE);
3181   SetSimpleCacheMode();
3182   BackendShutdownWithPendingCreate(false);
3183 }
3184 
TEST_F(DiskCacheBackendTest,SimpleCacheShutdownWithPendingFileIO)3185 TEST_F(DiskCacheBackendTest, SimpleCacheShutdownWithPendingFileIO) {
3186   SetCacheType(net::APP_CACHE);
3187   SetSimpleCacheMode();
3188   BackendShutdownWithPendingFileIO(false);
3189 }
3190 
TEST_F(DiskCacheBackendTest,SimpleCacheBasics)3191 TEST_F(DiskCacheBackendTest, SimpleCacheBasics) {
3192   SetSimpleCacheMode();
3193   BackendBasics();
3194 }
3195 
TEST_F(DiskCacheBackendTest,SimpleCacheAppCacheBasics)3196 TEST_F(DiskCacheBackendTest, SimpleCacheAppCacheBasics) {
3197   SetCacheType(net::APP_CACHE);
3198   SetSimpleCacheMode();
3199   BackendBasics();
3200 }
3201 
TEST_F(DiskCacheBackendTest,SimpleCacheKeying)3202 TEST_F(DiskCacheBackendTest, SimpleCacheKeying) {
3203   SetSimpleCacheMode();
3204   BackendKeying();
3205 }
3206 
TEST_F(DiskCacheBackendTest,SimpleCacheAppCacheKeying)3207 TEST_F(DiskCacheBackendTest, SimpleCacheAppCacheKeying) {
3208   SetSimpleCacheMode();
3209   SetCacheType(net::APP_CACHE);
3210   BackendKeying();
3211 }
3212 
TEST_F(DiskCacheBackendTest,DISABLED_SimpleCacheSetSize)3213 TEST_F(DiskCacheBackendTest, DISABLED_SimpleCacheSetSize) {
3214   SetSimpleCacheMode();
3215   BackendSetSize();
3216 }
3217 
3218 // MacOS has a default open file limit of 256 files, which is incompatible with
3219 // this simple cache test.
3220 #if defined(OS_MACOSX)
3221 #define SIMPLE_MAYBE_MACOS(TestName) DISABLED_ ## TestName
3222 #else
3223 #define SIMPLE_MAYBE_MACOS(TestName) TestName
3224 #endif
3225 
TEST_F(DiskCacheBackendTest,SIMPLE_MAYBE_MACOS (SimpleCacheLoad))3226 TEST_F(DiskCacheBackendTest, SIMPLE_MAYBE_MACOS(SimpleCacheLoad)) {
3227   SetMaxSize(0x100000);
3228   SetSimpleCacheMode();
3229   BackendLoad();
3230 }
3231 
TEST_F(DiskCacheBackendTest,SIMPLE_MAYBE_MACOS (SimpleCacheAppCacheLoad))3232 TEST_F(DiskCacheBackendTest, SIMPLE_MAYBE_MACOS(SimpleCacheAppCacheLoad)) {
3233   SetCacheType(net::APP_CACHE);
3234   SetSimpleCacheMode();
3235   SetMaxSize(0x100000);
3236   BackendLoad();
3237 }
3238 
TEST_F(DiskCacheBackendTest,SimpleDoomRecent)3239 TEST_F(DiskCacheBackendTest, SimpleDoomRecent) {
3240   SetSimpleCacheMode();
3241   BackendDoomRecent();
3242 }
3243 
3244 // crbug.com/330926, crbug.com/370677
TEST_F(DiskCacheBackendTest,DISABLED_SimpleDoomBetween)3245 TEST_F(DiskCacheBackendTest, DISABLED_SimpleDoomBetween) {
3246   SetSimpleCacheMode();
3247   BackendDoomBetween();
3248 }
3249 
TEST_F(DiskCacheBackendTest,SimpleCacheDoomAll)3250 TEST_F(DiskCacheBackendTest, SimpleCacheDoomAll) {
3251   SetSimpleCacheMode();
3252   BackendDoomAll();
3253 }
3254 
TEST_F(DiskCacheBackendTest,SimpleCacheAppCacheOnlyDoomAll)3255 TEST_F(DiskCacheBackendTest, SimpleCacheAppCacheOnlyDoomAll) {
3256   SetCacheType(net::APP_CACHE);
3257   SetSimpleCacheMode();
3258   BackendDoomAll();
3259 }
3260 
TEST_F(DiskCacheBackendTest,SimpleCacheTracingBackendBasics)3261 TEST_F(DiskCacheBackendTest, SimpleCacheTracingBackendBasics) {
3262   SetSimpleCacheMode();
3263   TracingBackendBasics();
3264   // TODO(pasko): implement integrity checking on the Simple Backend.
3265   DisableIntegrityCheck();
3266 }
3267 
TEST_F(DiskCacheBackendTest,SimpleCacheOpenMissingFile)3268 TEST_F(DiskCacheBackendTest, SimpleCacheOpenMissingFile) {
3269   SetSimpleCacheMode();
3270   InitCache();
3271 
3272   const char* key = "the first key";
3273   disk_cache::Entry* entry = NULL;
3274 
3275   ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3276   ASSERT_TRUE(entry != NULL);
3277   entry->Close();
3278   entry = NULL;
3279 
3280   // To make sure the file creation completed we need to call open again so that
3281   // we block until it actually created the files.
3282   ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3283   ASSERT_TRUE(entry != NULL);
3284   entry->Close();
3285   entry = NULL;
3286 
3287   // Delete one of the files in the entry.
3288   base::FilePath to_delete_file = cache_path_.AppendASCII(
3289       disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, 0));
3290   EXPECT_TRUE(base::PathExists(to_delete_file));
3291   EXPECT_TRUE(disk_cache::DeleteCacheFile(to_delete_file));
3292 
3293   // Failing to open the entry should delete the rest of these files.
3294   ASSERT_EQ(net::ERR_FAILED, OpenEntry(key, &entry));
3295 
3296   // Confirm the rest of the files are gone.
3297   for (int i = 1; i < disk_cache::kSimpleEntryFileCount; ++i) {
3298     base::FilePath should_be_gone_file(cache_path_.AppendASCII(
3299         disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, i)));
3300     EXPECT_FALSE(base::PathExists(should_be_gone_file));
3301   }
3302 }
3303 
TEST_F(DiskCacheBackendTest,SimpleCacheOpenBadFile)3304 TEST_F(DiskCacheBackendTest, SimpleCacheOpenBadFile) {
3305   SetSimpleCacheMode();
3306   InitCache();
3307 
3308   const char* key = "the first key";
3309   disk_cache::Entry* entry = NULL;
3310 
3311   ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3312   disk_cache::Entry* null = NULL;
3313   ASSERT_NE(null, entry);
3314   entry->Close();
3315   entry = NULL;
3316 
3317   // To make sure the file creation completed we need to call open again so that
3318   // we block until it actually created the files.
3319   ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3320   ASSERT_NE(null, entry);
3321   entry->Close();
3322   entry = NULL;
3323 
3324   // Write an invalid header for stream 0 and stream 1.
3325   base::FilePath entry_file1_path = cache_path_.AppendASCII(
3326       disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, 0));
3327 
3328   disk_cache::SimpleFileHeader header;
3329   header.initial_magic_number = GG_UINT64_C(0xbadf00d);
3330   EXPECT_EQ(
3331       implicit_cast<int>(sizeof(header)),
3332       base::WriteFile(entry_file1_path, reinterpret_cast<char*>(&header),
3333                            sizeof(header)));
3334   ASSERT_EQ(net::ERR_FAILED, OpenEntry(key, &entry));
3335 }
3336 
3337 // Tests that the Simple Cache Backend fails to initialize with non-matching
3338 // file structure on disk.
TEST_F(DiskCacheBackendTest,SimpleCacheOverBlockfileCache)3339 TEST_F(DiskCacheBackendTest, SimpleCacheOverBlockfileCache) {
3340   // Create a cache structure with the |BackendImpl|.
3341   InitCache();
3342   disk_cache::Entry* entry;
3343   const int kSize = 50;
3344   scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
3345   CacheTestFillBuffer(buffer->data(), kSize, false);
3346   ASSERT_EQ(net::OK, CreateEntry("key", &entry));
3347   ASSERT_EQ(0, WriteData(entry, 0, 0, buffer.get(), 0, false));
3348   entry->Close();
3349   cache_.reset();
3350 
3351   // Check that the |SimpleBackendImpl| does not favor this structure.
3352   base::Thread cache_thread("CacheThread");
3353   ASSERT_TRUE(cache_thread.StartWithOptions(
3354       base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
3355   disk_cache::SimpleBackendImpl* simple_cache =
3356       new disk_cache::SimpleBackendImpl(cache_path_,
3357                                         0,
3358                                         net::DISK_CACHE,
3359                                         cache_thread.message_loop_proxy().get(),
3360                                         NULL);
3361   net::TestCompletionCallback cb;
3362   int rv = simple_cache->Init(cb.callback());
3363   EXPECT_NE(net::OK, cb.GetResult(rv));
3364   delete simple_cache;
3365   DisableIntegrityCheck();
3366 }
3367 
3368 // Tests that the |BackendImpl| refuses to initialize on top of the files
3369 // generated by the Simple Cache Backend.
TEST_F(DiskCacheBackendTest,BlockfileCacheOverSimpleCache)3370 TEST_F(DiskCacheBackendTest, BlockfileCacheOverSimpleCache) {
3371   // Create a cache structure with the |SimpleBackendImpl|.
3372   SetSimpleCacheMode();
3373   InitCache();
3374   disk_cache::Entry* entry;
3375   const int kSize = 50;
3376   scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
3377   CacheTestFillBuffer(buffer->data(), kSize, false);
3378   ASSERT_EQ(net::OK, CreateEntry("key", &entry));
3379   ASSERT_EQ(0, WriteData(entry, 0, 0, buffer.get(), 0, false));
3380   entry->Close();
3381   cache_.reset();
3382 
3383   // Check that the |BackendImpl| does not favor this structure.
3384   base::Thread cache_thread("CacheThread");
3385   ASSERT_TRUE(cache_thread.StartWithOptions(
3386       base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
3387   disk_cache::BackendImpl* cache = new disk_cache::BackendImpl(
3388       cache_path_, base::MessageLoopProxy::current().get(), NULL);
3389   cache->SetUnitTestMode();
3390   net::TestCompletionCallback cb;
3391   int rv = cache->Init(cb.callback());
3392   EXPECT_NE(net::OK, cb.GetResult(rv));
3393   delete cache;
3394   DisableIntegrityCheck();
3395 }
3396 
TEST_F(DiskCacheBackendTest,SimpleCacheFixEnumerators)3397 TEST_F(DiskCacheBackendTest, SimpleCacheFixEnumerators) {
3398   SetSimpleCacheMode();
3399   BackendFixEnumerators();
3400 }
3401 
3402 // Tests basic functionality of the SimpleBackend implementation of the
3403 // enumeration API.
TEST_F(DiskCacheBackendTest,SimpleCacheEnumerationBasics)3404 TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationBasics) {
3405   SetSimpleCacheMode();
3406   InitCache();
3407   std::set<std::string> key_pool;
3408   ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool));
3409 
3410   // Check that enumeration returns all entries.
3411   std::set<std::string> keys_to_match(key_pool);
3412   void* iter = NULL;
3413   size_t count = 0;
3414   ASSERT_TRUE(EnumerateAndMatchKeys(-1, &iter, &keys_to_match, &count));
3415   cache_->EndEnumeration(&iter);
3416   EXPECT_EQ(key_pool.size(), count);
3417   EXPECT_TRUE(keys_to_match.empty());
3418 
3419   // Check that opening entries does not affect enumeration.
3420   keys_to_match = key_pool;
3421   iter = NULL;
3422   count = 0;
3423   disk_cache::Entry* entry_opened_before;
3424   ASSERT_EQ(net::OK, OpenEntry(*(key_pool.begin()), &entry_opened_before));
3425   ASSERT_TRUE(EnumerateAndMatchKeys(key_pool.size()/2,
3426                                     &iter,
3427                                     &keys_to_match,
3428                                     &count));
3429 
3430   disk_cache::Entry* entry_opened_middle;
3431   ASSERT_EQ(net::OK,
3432             OpenEntry(*(keys_to_match.begin()), &entry_opened_middle));
3433   ASSERT_TRUE(EnumerateAndMatchKeys(-1, &iter, &keys_to_match, &count));
3434   cache_->EndEnumeration(&iter);
3435   entry_opened_before->Close();
3436   entry_opened_middle->Close();
3437 
3438   EXPECT_EQ(key_pool.size(), count);
3439   EXPECT_TRUE(keys_to_match.empty());
3440 }
3441 
3442 // Tests that the enumerations are not affected by dooming an entry in the
3443 // middle.
TEST_F(DiskCacheBackendTest,SimpleCacheEnumerationWhileDoomed)3444 TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationWhileDoomed) {
3445   SetSimpleCacheMode();
3446   InitCache();
3447   std::set<std::string> key_pool;
3448   ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool));
3449 
3450   // Check that enumeration returns all entries but the doomed one.
3451   std::set<std::string> keys_to_match(key_pool);
3452   void* iter = NULL;
3453   size_t count = 0;
3454   ASSERT_TRUE(EnumerateAndMatchKeys(key_pool.size()/2,
3455                                     &iter,
3456                                     &keys_to_match,
3457                                     &count));
3458 
3459   std::string key_to_delete = *(keys_to_match.begin());
3460   DoomEntry(key_to_delete);
3461   keys_to_match.erase(key_to_delete);
3462   key_pool.erase(key_to_delete);
3463   ASSERT_TRUE(EnumerateAndMatchKeys(-1, &iter, &keys_to_match, &count));
3464   cache_->EndEnumeration(&iter);
3465 
3466   EXPECT_EQ(key_pool.size(), count);
3467   EXPECT_TRUE(keys_to_match.empty());
3468 }
3469 
3470 // Tests that enumerations are not affected by corrupt files.
TEST_F(DiskCacheBackendTest,SimpleCacheEnumerationCorruption)3471 TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationCorruption) {
3472   SetSimpleCacheMode();
3473   InitCache();
3474   std::set<std::string> key_pool;
3475   ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool));
3476 
3477   // Create a corrupt entry. The write/read sequence ensures that the entry will
3478   // have been created before corrupting the platform files, in the case of
3479   // optimistic operations.
3480   const std::string key = "the key";
3481   disk_cache::Entry* corrupted_entry;
3482 
3483   ASSERT_EQ(net::OK, CreateEntry(key, &corrupted_entry));
3484   ASSERT_TRUE(corrupted_entry);
3485   const int kSize = 50;
3486   scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
3487   CacheTestFillBuffer(buffer->data(), kSize, false);
3488   ASSERT_EQ(kSize,
3489             WriteData(corrupted_entry, 0, 0, buffer.get(), kSize, false));
3490   ASSERT_EQ(kSize, ReadData(corrupted_entry, 0, 0, buffer.get(), kSize));
3491   corrupted_entry->Close();
3492 
3493   EXPECT_TRUE(disk_cache::simple_util::CreateCorruptFileForTests(
3494       key, cache_path_));
3495   EXPECT_EQ(key_pool.size() + 1,
3496             implicit_cast<size_t>(cache_->GetEntryCount()));
3497 
3498   // Check that enumeration returns all entries but the corrupt one.
3499   std::set<std::string> keys_to_match(key_pool);
3500   void* iter = NULL;
3501   size_t count = 0;
3502   ASSERT_TRUE(EnumerateAndMatchKeys(-1, &iter, &keys_to_match, &count));
3503   cache_->EndEnumeration(&iter);
3504 
3505   EXPECT_EQ(key_pool.size(), count);
3506   EXPECT_TRUE(keys_to_match.empty());
3507 }
3508 
3509 #endif  // defined(OS_POSIX)
3510