• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2012 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include <utility>
6 
7 #include "base/files/file.h"
8 #include "base/files/file_util.h"
9 #include "base/functional/bind.h"
10 #include "base/functional/callback_helpers.h"
11 #include "base/metrics/field_trial.h"
12 #include "base/metrics/field_trial_param_associator.h"
13 #include "base/run_loop.h"
14 #include "base/strings/string_number_conversions.h"
15 #include "base/strings/string_util.h"
16 #include "base/test/metrics/histogram_tester.h"
17 #include "base/test/scoped_feature_list.h"
18 #include "base/threading/platform_thread.h"
19 #include "base/time/time.h"
20 #include "build/build_config.h"
21 #include "net/base/completion_once_callback.h"
22 #include "net/base/io_buffer.h"
23 #include "net/base/net_errors.h"
24 #include "net/base/request_priority.h"
25 #include "net/base/test_completion_callback.h"
26 #include "net/disk_cache/blockfile/backend_impl.h"
27 #include "net/disk_cache/blockfile/entry_impl.h"
28 #include "net/disk_cache/cache_util.h"
29 #include "net/disk_cache/disk_cache_test_base.h"
30 #include "net/disk_cache/disk_cache_test_util.h"
31 #include "net/disk_cache/memory/mem_entry_impl.h"
32 #include "net/disk_cache/simple/simple_backend_impl.h"
33 #include "net/disk_cache/simple/simple_entry_format.h"
34 #include "net/disk_cache/simple/simple_entry_impl.h"
35 #include "net/disk_cache/simple/simple_histogram_enums.h"
36 #include "net/disk_cache/simple/simple_synchronous_entry.h"
37 #include "net/disk_cache/simple/simple_test_util.h"
38 #include "net/disk_cache/simple/simple_util.h"
39 #include "net/test/gtest_util.h"
40 #include "testing/gmock/include/gmock/gmock.h"
41 #include "testing/gtest/include/gtest/gtest.h"
42 
43 using net::test::IsError;
44 using net::test::IsOk;
45 
46 using base::Time;
47 using disk_cache::EntryResult;
48 using disk_cache::EntryResultCallback;
49 using disk_cache::RangeResult;
50 using disk_cache::ScopedEntryPtr;
51 
52 // Tests that can run with different types of caches.
53 class DiskCacheEntryTest : public DiskCacheTestWithCache {
54  public:
55   void InternalSyncIOBackground(disk_cache::Entry* entry);
56   void ExternalSyncIOBackground(disk_cache::Entry* entry);
57 
58  protected:
59   void InternalSyncIO();
60   void InternalAsyncIO();
61   void ExternalSyncIO();
62   void ExternalAsyncIO();
63   void ReleaseBuffer(int stream_index);
64   void StreamAccess();
65   void GetKey();
66   void GetTimes(int stream_index);
67   void GrowData(int stream_index);
68   void TruncateData(int stream_index);
69   void ZeroLengthIO(int stream_index);
70   void Buffering();
71   void SizeAtCreate();
72   void SizeChanges(int stream_index);
73   void ReuseEntry(int size, int stream_index);
74   void InvalidData(int stream_index);
75   void ReadWriteDestroyBuffer(int stream_index);
76   void DoomNormalEntry();
77   void DoomEntryNextToOpenEntry();
78   void DoomedEntry(int stream_index);
79   void BasicSparseIO();
80   void HugeSparseIO();
81   void GetAvailableRangeTest();
82   void CouldBeSparse();
83   void UpdateSparseEntry();
84   void DoomSparseEntry();
85   void PartialSparseEntry();
86   void SparseInvalidArg();
87   void SparseClipEnd(int64_t max_index, bool expected_unsupported);
88   bool SimpleCacheMakeBadChecksumEntry(const std::string& key, int data_size);
89   bool SimpleCacheThirdStreamFileExists(const char* key);
90   void SyncDoomEntry(const char* key);
91   void CreateEntryWithHeaderBodyAndSideData(const std::string& key,
92                                             int data_size);
93   void TruncateFileFromEnd(int file_index,
94                            const std::string& key,
95                            int data_size,
96                            int truncate_size);
97   void UseAfterBackendDestruction();
98   void CloseSparseAfterBackendDestruction();
99   void LastUsedTimePersists();
100   void TruncateBackwards();
101   void ZeroWriteBackwards();
102   void SparseOffset64Bit();
103 };
104 
105 // This part of the test runs on the background thread.
InternalSyncIOBackground(disk_cache::Entry * entry)106 void DiskCacheEntryTest::InternalSyncIOBackground(disk_cache::Entry* entry) {
107   const int kSize1 = 10;
108   auto buffer1 = base::MakeRefCounted<net::IOBufferWithSize>(kSize1);
109   CacheTestFillBuffer(buffer1->data(), kSize1, false);
110   EXPECT_EQ(0, entry->ReadData(0, 0, buffer1.get(), kSize1,
111                                net::CompletionOnceCallback()));
112   base::strlcpy(buffer1->data(), "the data", kSize1);
113   EXPECT_EQ(10, entry->WriteData(0, 0, buffer1.get(), kSize1,
114                                  net::CompletionOnceCallback(), false));
115   memset(buffer1->data(), 0, kSize1);
116   EXPECT_EQ(10, entry->ReadData(0, 0, buffer1.get(), kSize1,
117                                 net::CompletionOnceCallback()));
118   EXPECT_STREQ("the data", buffer1->data());
119 
120   const int kSize2 = 5000;
121   const int kSize3 = 10000;
122   auto buffer2 = base::MakeRefCounted<net::IOBufferWithSize>(kSize2);
123   auto buffer3 = base::MakeRefCounted<net::IOBufferWithSize>(kSize3);
124   memset(buffer3->data(), 0, kSize3);
125   CacheTestFillBuffer(buffer2->data(), kSize2, false);
126   base::strlcpy(buffer2->data(), "The really big data goes here", kSize2);
127   EXPECT_EQ(5000, entry->WriteData(1, 1500, buffer2.get(), kSize2,
128                                    net::CompletionOnceCallback(), false));
129   memset(buffer2->data(), 0, kSize2);
130   EXPECT_EQ(4989, entry->ReadData(1, 1511, buffer2.get(), kSize2,
131                                   net::CompletionOnceCallback()));
132   EXPECT_STREQ("big data goes here", buffer2->data());
133   EXPECT_EQ(5000, entry->ReadData(1, 0, buffer2.get(), kSize2,
134                                   net::CompletionOnceCallback()));
135   EXPECT_EQ(0, memcmp(buffer2->data(), buffer3->data(), 1500));
136   EXPECT_EQ(1500, entry->ReadData(1, 5000, buffer2.get(), kSize2,
137                                   net::CompletionOnceCallback()));
138 
139   EXPECT_EQ(0, entry->ReadData(1, 6500, buffer2.get(), kSize2,
140                                net::CompletionOnceCallback()));
141   EXPECT_EQ(6500, entry->ReadData(1, 0, buffer3.get(), kSize3,
142                                   net::CompletionOnceCallback()));
143   EXPECT_EQ(8192, entry->WriteData(1, 0, buffer3.get(), 8192,
144                                    net::CompletionOnceCallback(), false));
145   EXPECT_EQ(8192, entry->ReadData(1, 0, buffer3.get(), kSize3,
146                                   net::CompletionOnceCallback()));
147   EXPECT_EQ(8192, entry->GetDataSize(1));
148 
149   // We need to delete the memory buffer on this thread.
150   EXPECT_EQ(0, entry->WriteData(0, 0, nullptr, 0, net::CompletionOnceCallback(),
151                                 true));
152   EXPECT_EQ(0, entry->WriteData(1, 0, nullptr, 0, net::CompletionOnceCallback(),
153                                 true));
154 }
155 
156 // We need to support synchronous IO even though it is not a supported operation
157 // from the point of view of the disk cache's public interface, because we use
158 // it internally, not just by a few tests, but as part of the implementation
159 // (see sparse_control.cc, for example).
InternalSyncIO()160 void DiskCacheEntryTest::InternalSyncIO() {
161   disk_cache::Entry* entry = nullptr;
162   ASSERT_THAT(CreateEntry("the first key", &entry), IsOk());
163   ASSERT_TRUE(nullptr != entry);
164 
165   // The bulk of the test runs from within the callback, on the cache thread.
166   RunTaskForTest(base::BindOnce(&DiskCacheEntryTest::InternalSyncIOBackground,
167                                 base::Unretained(this), entry));
168 
169   entry->Doom();
170   entry->Close();
171   FlushQueueForTest();
172   EXPECT_EQ(0, cache_->GetEntryCount());
173 }
174 
TEST_F(DiskCacheEntryTest,InternalSyncIO)175 TEST_F(DiskCacheEntryTest, InternalSyncIO) {
176   InitCache();
177   InternalSyncIO();
178 }
179 
TEST_F(DiskCacheEntryTest,MemoryOnlyInternalSyncIO)180 TEST_F(DiskCacheEntryTest, MemoryOnlyInternalSyncIO) {
181   SetMemoryOnlyMode();
182   InitCache();
183   InternalSyncIO();
184 }
185 
InternalAsyncIO()186 void DiskCacheEntryTest::InternalAsyncIO() {
187   disk_cache::Entry* entry = nullptr;
188   ASSERT_THAT(CreateEntry("the first key", &entry), IsOk());
189   ASSERT_TRUE(nullptr != entry);
190 
191   // Avoid using internal buffers for the test. We have to write something to
192   // the entry and close it so that we flush the internal buffer to disk. After
193   // that, IO operations will be really hitting the disk. We don't care about
194   // the content, so just extending the entry is enough (all extensions zero-
195   // fill any holes).
196   EXPECT_EQ(0, WriteData(entry, 0, 15 * 1024, nullptr, 0, false));
197   EXPECT_EQ(0, WriteData(entry, 1, 15 * 1024, nullptr, 0, false));
198   entry->Close();
199   ASSERT_THAT(OpenEntry("the first key", &entry), IsOk());
200 
201   MessageLoopHelper helper;
202   // Let's verify that each IO goes to the right callback object.
203   CallbackTest callback1(&helper, false);
204   CallbackTest callback2(&helper, false);
205   CallbackTest callback3(&helper, false);
206   CallbackTest callback4(&helper, false);
207   CallbackTest callback5(&helper, false);
208   CallbackTest callback6(&helper, false);
209   CallbackTest callback7(&helper, false);
210   CallbackTest callback8(&helper, false);
211   CallbackTest callback9(&helper, false);
212   CallbackTest callback10(&helper, false);
213   CallbackTest callback11(&helper, false);
214   CallbackTest callback12(&helper, false);
215   CallbackTest callback13(&helper, false);
216 
217   const int kSize1 = 10;
218   const int kSize2 = 5000;
219   const int kSize3 = 10000;
220   auto buffer1 = base::MakeRefCounted<net::IOBufferWithSize>(kSize1);
221   auto buffer2 = base::MakeRefCounted<net::IOBufferWithSize>(kSize2);
222   auto buffer3 = base::MakeRefCounted<net::IOBufferWithSize>(kSize3);
223   CacheTestFillBuffer(buffer1->data(), kSize1, false);
224   CacheTestFillBuffer(buffer2->data(), kSize2, false);
225   CacheTestFillBuffer(buffer3->data(), kSize3, false);
226 
227   EXPECT_EQ(0, entry->ReadData(0, 15 * 1024, buffer1.get(), kSize1,
228                                base::BindOnce(&CallbackTest::Run,
229                                               base::Unretained(&callback1))));
230   base::strlcpy(buffer1->data(), "the data", kSize1);
231   int expected = 0;
232   int ret = entry->WriteData(
233       0, 0, buffer1.get(), kSize1,
234       base::BindOnce(&CallbackTest::Run, base::Unretained(&callback2)), false);
235   EXPECT_TRUE(10 == ret || net::ERR_IO_PENDING == ret);
236   if (net::ERR_IO_PENDING == ret)
237     expected++;
238 
239   EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
240   memset(buffer2->data(), 0, kSize2);
241   ret = entry->ReadData(
242       0, 0, buffer2.get(), kSize1,
243       base::BindOnce(&CallbackTest::Run, base::Unretained(&callback3)));
244   EXPECT_TRUE(10 == ret || net::ERR_IO_PENDING == ret);
245   if (net::ERR_IO_PENDING == ret)
246     expected++;
247 
248   EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
249   EXPECT_STREQ("the data", buffer2->data());
250 
251   base::strlcpy(buffer2->data(), "The really big data goes here", kSize2);
252   ret = entry->WriteData(
253       1, 1500, buffer2.get(), kSize2,
254       base::BindOnce(&CallbackTest::Run, base::Unretained(&callback4)), true);
255   EXPECT_TRUE(5000 == ret || net::ERR_IO_PENDING == ret);
256   if (net::ERR_IO_PENDING == ret)
257     expected++;
258 
259   EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
260   memset(buffer3->data(), 0, kSize3);
261   ret = entry->ReadData(
262       1, 1511, buffer3.get(), kSize2,
263       base::BindOnce(&CallbackTest::Run, base::Unretained(&callback5)));
264   EXPECT_TRUE(4989 == ret || net::ERR_IO_PENDING == ret);
265   if (net::ERR_IO_PENDING == ret)
266     expected++;
267 
268   EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
269   EXPECT_STREQ("big data goes here", buffer3->data());
270   ret = entry->ReadData(
271       1, 0, buffer2.get(), kSize2,
272       base::BindOnce(&CallbackTest::Run, base::Unretained(&callback6)));
273   EXPECT_TRUE(5000 == ret || net::ERR_IO_PENDING == ret);
274   if (net::ERR_IO_PENDING == ret)
275     expected++;
276 
277   memset(buffer3->data(), 0, kSize3);
278 
279   EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
280   EXPECT_EQ(0, memcmp(buffer2->data(), buffer3->data(), 1500));
281   ret = entry->ReadData(
282       1, 5000, buffer2.get(), kSize2,
283       base::BindOnce(&CallbackTest::Run, base::Unretained(&callback7)));
284   EXPECT_TRUE(1500 == ret || net::ERR_IO_PENDING == ret);
285   if (net::ERR_IO_PENDING == ret)
286     expected++;
287 
288   ret = entry->ReadData(
289       1, 0, buffer3.get(), kSize3,
290       base::BindOnce(&CallbackTest::Run, base::Unretained(&callback9)));
291   EXPECT_TRUE(6500 == ret || net::ERR_IO_PENDING == ret);
292   if (net::ERR_IO_PENDING == ret)
293     expected++;
294 
295   ret = entry->WriteData(
296       1, 0, buffer3.get(), 8192,
297       base::BindOnce(&CallbackTest::Run, base::Unretained(&callback10)), true);
298   EXPECT_TRUE(8192 == ret || net::ERR_IO_PENDING == ret);
299   if (net::ERR_IO_PENDING == ret)
300     expected++;
301 
302   EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
303   ret = entry->ReadData(
304       1, 0, buffer3.get(), kSize3,
305       base::BindOnce(&CallbackTest::Run, base::Unretained(&callback11)));
306   EXPECT_TRUE(8192 == ret || net::ERR_IO_PENDING == ret);
307   if (net::ERR_IO_PENDING == ret)
308     expected++;
309 
310   EXPECT_EQ(8192, entry->GetDataSize(1));
311 
312   ret = entry->ReadData(
313       0, 0, buffer1.get(), kSize1,
314       base::BindOnce(&CallbackTest::Run, base::Unretained(&callback12)));
315   EXPECT_TRUE(10 == ret || net::ERR_IO_PENDING == ret);
316   if (net::ERR_IO_PENDING == ret)
317     expected++;
318 
319   ret = entry->ReadData(
320       1, 0, buffer2.get(), kSize2,
321       base::BindOnce(&CallbackTest::Run, base::Unretained(&callback13)));
322   EXPECT_TRUE(5000 == ret || net::ERR_IO_PENDING == ret);
323   if (net::ERR_IO_PENDING == ret)
324     expected++;
325 
326   EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
327 
328   EXPECT_FALSE(helper.callback_reused_error());
329 
330   entry->Doom();
331   entry->Close();
332   FlushQueueForTest();
333   EXPECT_EQ(0, cache_->GetEntryCount());
334 }
335 
TEST_F(DiskCacheEntryTest,InternalAsyncIO)336 TEST_F(DiskCacheEntryTest, InternalAsyncIO) {
337   InitCache();
338   InternalAsyncIO();
339 }
340 
TEST_F(DiskCacheEntryTest,MemoryOnlyInternalAsyncIO)341 TEST_F(DiskCacheEntryTest, MemoryOnlyInternalAsyncIO) {
342   SetMemoryOnlyMode();
343   InitCache();
344   InternalAsyncIO();
345 }
346 
347 // This part of the test runs on the background thread.
ExternalSyncIOBackground(disk_cache::Entry * entry)348 void DiskCacheEntryTest::ExternalSyncIOBackground(disk_cache::Entry* entry) {
349   const int kSize1 = 17000;
350   const int kSize2 = 25000;
351   auto buffer1 = base::MakeRefCounted<net::IOBufferWithSize>(kSize1);
352   auto buffer2 = base::MakeRefCounted<net::IOBufferWithSize>(kSize2);
353   CacheTestFillBuffer(buffer1->data(), kSize1, false);
354   CacheTestFillBuffer(buffer2->data(), kSize2, false);
355   base::strlcpy(buffer1->data(), "the data", kSize1);
356   EXPECT_EQ(17000, entry->WriteData(0, 0, buffer1.get(), kSize1,
357                                     net::CompletionOnceCallback(), false));
358   memset(buffer1->data(), 0, kSize1);
359   EXPECT_EQ(17000, entry->ReadData(0, 0, buffer1.get(), kSize1,
360                                    net::CompletionOnceCallback()));
361   EXPECT_STREQ("the data", buffer1->data());
362 
363   base::strlcpy(buffer2->data(), "The really big data goes here", kSize2);
364   EXPECT_EQ(25000, entry->WriteData(1, 10000, buffer2.get(), kSize2,
365                                     net::CompletionOnceCallback(), false));
366   memset(buffer2->data(), 0, kSize2);
367   EXPECT_EQ(24989, entry->ReadData(1, 10011, buffer2.get(), kSize2,
368                                    net::CompletionOnceCallback()));
369   EXPECT_STREQ("big data goes here", buffer2->data());
370   EXPECT_EQ(25000, entry->ReadData(1, 0, buffer2.get(), kSize2,
371                                    net::CompletionOnceCallback()));
372   EXPECT_EQ(5000, entry->ReadData(1, 30000, buffer2.get(), kSize2,
373                                   net::CompletionOnceCallback()));
374 
375   EXPECT_EQ(0, entry->ReadData(1, 35000, buffer2.get(), kSize2,
376                                net::CompletionOnceCallback()));
377   EXPECT_EQ(17000, entry->ReadData(1, 0, buffer1.get(), kSize1,
378                                    net::CompletionOnceCallback()));
379   EXPECT_EQ(17000, entry->WriteData(1, 20000, buffer1.get(), kSize1,
380                                     net::CompletionOnceCallback(), false));
381   EXPECT_EQ(37000, entry->GetDataSize(1));
382 
383   // We need to delete the memory buffer on this thread.
384   EXPECT_EQ(0, entry->WriteData(0, 0, nullptr, 0, net::CompletionOnceCallback(),
385                                 true));
386   EXPECT_EQ(0, entry->WriteData(1, 0, nullptr, 0, net::CompletionOnceCallback(),
387                                 true));
388 }
389 
ExternalSyncIO()390 void DiskCacheEntryTest::ExternalSyncIO() {
391   disk_cache::Entry* entry;
392   ASSERT_THAT(CreateEntry("the first key", &entry), IsOk());
393 
394   // The bulk of the test runs from within the callback, on the cache thread.
395   RunTaskForTest(base::BindOnce(&DiskCacheEntryTest::ExternalSyncIOBackground,
396                                 base::Unretained(this), entry));
397 
398   entry->Doom();
399   entry->Close();
400   FlushQueueForTest();
401   EXPECT_EQ(0, cache_->GetEntryCount());
402 }
403 
TEST_F(DiskCacheEntryTest,ExternalSyncIO)404 TEST_F(DiskCacheEntryTest, ExternalSyncIO) {
405   InitCache();
406   ExternalSyncIO();
407 }
408 
TEST_F(DiskCacheEntryTest,ExternalSyncIONoBuffer)409 TEST_F(DiskCacheEntryTest, ExternalSyncIONoBuffer) {
410   InitCache();
411   cache_impl_->SetFlags(disk_cache::kNoBuffering);
412   ExternalSyncIO();
413 }
414 
TEST_F(DiskCacheEntryTest,MemoryOnlyExternalSyncIO)415 TEST_F(DiskCacheEntryTest, MemoryOnlyExternalSyncIO) {
416   SetMemoryOnlyMode();
417   InitCache();
418   ExternalSyncIO();
419 }
420 
ExternalAsyncIO()421 void DiskCacheEntryTest::ExternalAsyncIO() {
422   disk_cache::Entry* entry;
423   ASSERT_THAT(CreateEntry("the first key", &entry), IsOk());
424 
425   int expected = 0;
426 
427   MessageLoopHelper helper;
428   // Let's verify that each IO goes to the right callback object.
429   CallbackTest callback1(&helper, false);
430   CallbackTest callback2(&helper, false);
431   CallbackTest callback3(&helper, false);
432   CallbackTest callback4(&helper, false);
433   CallbackTest callback5(&helper, false);
434   CallbackTest callback6(&helper, false);
435   CallbackTest callback7(&helper, false);
436   CallbackTest callback8(&helper, false);
437   CallbackTest callback9(&helper, false);
438 
439   const int kSize1 = 17000;
440   const int kSize2 = 25000;
441   const int kSize3 = 25000;
442   auto buffer1 = base::MakeRefCounted<net::IOBufferWithSize>(kSize1);
443   auto buffer2 = base::MakeRefCounted<net::IOBufferWithSize>(kSize2);
444   auto buffer3 = base::MakeRefCounted<net::IOBufferWithSize>(kSize3);
445   CacheTestFillBuffer(buffer1->data(), kSize1, false);
446   CacheTestFillBuffer(buffer2->data(), kSize2, false);
447   CacheTestFillBuffer(buffer3->data(), kSize3, false);
448   base::strlcpy(buffer1->data(), "the data", kSize1);
449   int ret = entry->WriteData(
450       0, 0, buffer1.get(), kSize1,
451       base::BindOnce(&CallbackTest::Run, base::Unretained(&callback1)), false);
452   EXPECT_TRUE(17000 == ret || net::ERR_IO_PENDING == ret);
453   if (net::ERR_IO_PENDING == ret)
454     expected++;
455 
456   EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
457 
458   memset(buffer2->data(), 0, kSize1);
459   ret = entry->ReadData(
460       0, 0, buffer2.get(), kSize1,
461       base::BindOnce(&CallbackTest::Run, base::Unretained(&callback2)));
462   EXPECT_TRUE(17000 == ret || net::ERR_IO_PENDING == ret);
463   if (net::ERR_IO_PENDING == ret)
464     expected++;
465 
466   EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
467   EXPECT_STREQ("the data", buffer2->data());
468 
469   base::strlcpy(buffer2->data(), "The really big data goes here", kSize2);
470   ret = entry->WriteData(
471       1, 10000, buffer2.get(), kSize2,
472       base::BindOnce(&CallbackTest::Run, base::Unretained(&callback3)), false);
473   EXPECT_TRUE(25000 == ret || net::ERR_IO_PENDING == ret);
474   if (net::ERR_IO_PENDING == ret)
475     expected++;
476 
477   EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
478 
479   memset(buffer3->data(), 0, kSize3);
480   ret = entry->ReadData(
481       1, 10011, buffer3.get(), kSize3,
482       base::BindOnce(&CallbackTest::Run, base::Unretained(&callback4)));
483   EXPECT_TRUE(24989 == ret || net::ERR_IO_PENDING == ret);
484   if (net::ERR_IO_PENDING == ret)
485     expected++;
486 
487   EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
488   EXPECT_STREQ("big data goes here", buffer3->data());
489   ret = entry->ReadData(
490       1, 0, buffer2.get(), kSize2,
491       base::BindOnce(&CallbackTest::Run, base::Unretained(&callback5)));
492   EXPECT_TRUE(25000 == ret || net::ERR_IO_PENDING == ret);
493   if (net::ERR_IO_PENDING == ret)
494     expected++;
495 
496   EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
497   memset(buffer3->data(), 0, kSize3);
498   EXPECT_EQ(0, memcmp(buffer2->data(), buffer3->data(), 10000));
499   ret = entry->ReadData(
500       1, 30000, buffer2.get(), kSize2,
501       base::BindOnce(&CallbackTest::Run, base::Unretained(&callback6)));
502   EXPECT_TRUE(5000 == ret || net::ERR_IO_PENDING == ret);
503   if (net::ERR_IO_PENDING == ret)
504     expected++;
505 
506   ret = entry->ReadData(
507       1, 35000, buffer2.get(), kSize2,
508       base::BindOnce(&CallbackTest::Run, base::Unretained(&callback7)));
509   EXPECT_TRUE(0 == ret || net::ERR_IO_PENDING == ret);
510   if (net::ERR_IO_PENDING == ret)
511     expected++;
512 
513   ret = entry->ReadData(
514       1, 0, buffer1.get(), kSize1,
515       base::BindOnce(&CallbackTest::Run, base::Unretained(&callback8)));
516   EXPECT_TRUE(17000 == ret || net::ERR_IO_PENDING == ret);
517   if (net::ERR_IO_PENDING == ret)
518     expected++;
519   ret = entry->WriteData(
520       1, 20000, buffer3.get(), kSize1,
521       base::BindOnce(&CallbackTest::Run, base::Unretained(&callback9)), false);
522   EXPECT_TRUE(17000 == ret || net::ERR_IO_PENDING == ret);
523   if (net::ERR_IO_PENDING == ret)
524     expected++;
525 
526   EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
527   EXPECT_EQ(37000, entry->GetDataSize(1));
528 
529   EXPECT_FALSE(helper.callback_reused_error());
530 
531   entry->Doom();
532   entry->Close();
533   FlushQueueForTest();
534   EXPECT_EQ(0, cache_->GetEntryCount());
535 }
536 
TEST_F(DiskCacheEntryTest,ExternalAsyncIO)537 TEST_F(DiskCacheEntryTest, ExternalAsyncIO) {
538   InitCache();
539   ExternalAsyncIO();
540 }
541 
542 // TODO(http://crbug.com/497101): This test is flaky.
543 #if BUILDFLAG(IS_IOS)
544 #define MAYBE_ExternalAsyncIONoBuffer DISABLED_ExternalAsyncIONoBuffer
545 #else
546 #define MAYBE_ExternalAsyncIONoBuffer ExternalAsyncIONoBuffer
547 #endif
TEST_F(DiskCacheEntryTest,MAYBE_ExternalAsyncIONoBuffer)548 TEST_F(DiskCacheEntryTest, MAYBE_ExternalAsyncIONoBuffer) {
549   InitCache();
550   cache_impl_->SetFlags(disk_cache::kNoBuffering);
551   ExternalAsyncIO();
552 }
553 
TEST_F(DiskCacheEntryTest,MemoryOnlyExternalAsyncIO)554 TEST_F(DiskCacheEntryTest, MemoryOnlyExternalAsyncIO) {
555   SetMemoryOnlyMode();
556   InitCache();
557   ExternalAsyncIO();
558 }
559 
560 // Tests that IOBuffers are not referenced after IO completes.
ReleaseBuffer(int stream_index)561 void DiskCacheEntryTest::ReleaseBuffer(int stream_index) {
562   disk_cache::Entry* entry = nullptr;
563   ASSERT_THAT(CreateEntry("the first key", &entry), IsOk());
564   ASSERT_TRUE(nullptr != entry);
565 
566   const int kBufferSize = 1024;
567   auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(kBufferSize);
568   CacheTestFillBuffer(buffer->data(), kBufferSize, false);
569 
570   net::ReleaseBufferCompletionCallback cb(buffer.get());
571   int rv = entry->WriteData(
572       stream_index, 0, buffer.get(), kBufferSize, cb.callback(), false);
573   EXPECT_EQ(kBufferSize, cb.GetResult(rv));
574   entry->Close();
575 }
576 
TEST_F(DiskCacheEntryTest,ReleaseBuffer)577 TEST_F(DiskCacheEntryTest, ReleaseBuffer) {
578   InitCache();
579   cache_impl_->SetFlags(disk_cache::kNoBuffering);
580   ReleaseBuffer(0);
581 }
582 
TEST_F(DiskCacheEntryTest,MemoryOnlyReleaseBuffer)583 TEST_F(DiskCacheEntryTest, MemoryOnlyReleaseBuffer) {
584   SetMemoryOnlyMode();
585   InitCache();
586   ReleaseBuffer(0);
587 }
588 
StreamAccess()589 void DiskCacheEntryTest::StreamAccess() {
590   disk_cache::Entry* entry = nullptr;
591   ASSERT_THAT(CreateEntry("the first key", &entry), IsOk());
592   ASSERT_TRUE(nullptr != entry);
593 
594   const int kBufferSize = 1024;
595   const int kNumStreams = 3;
596   scoped_refptr<net::IOBuffer> reference_buffers[kNumStreams];
597   for (auto& reference_buffer : reference_buffers) {
598     reference_buffer = base::MakeRefCounted<net::IOBufferWithSize>(kBufferSize);
599     CacheTestFillBuffer(reference_buffer->data(), kBufferSize, false);
600   }
601   auto buffer1 = base::MakeRefCounted<net::IOBufferWithSize>(kBufferSize);
602   for (int i = 0; i < kNumStreams; i++) {
603     EXPECT_EQ(
604         kBufferSize,
605         WriteData(entry, i, 0, reference_buffers[i].get(), kBufferSize, false));
606     memset(buffer1->data(), 0, kBufferSize);
607     EXPECT_EQ(kBufferSize, ReadData(entry, i, 0, buffer1.get(), kBufferSize));
608     EXPECT_EQ(
609         0, memcmp(reference_buffers[i]->data(), buffer1->data(), kBufferSize));
610   }
611   EXPECT_EQ(net::ERR_INVALID_ARGUMENT,
612             ReadData(entry, kNumStreams, 0, buffer1.get(), kBufferSize));
613   entry->Close();
614 
615   // Open the entry and read it in chunks, including a read past the end.
616   ASSERT_THAT(OpenEntry("the first key", &entry), IsOk());
617   ASSERT_TRUE(nullptr != entry);
618   const int kReadBufferSize = 600;
619   const int kFinalReadSize = kBufferSize - kReadBufferSize;
620   static_assert(kFinalReadSize < kReadBufferSize,
621                 "should be exactly two reads");
622   auto buffer2 = base::MakeRefCounted<net::IOBufferWithSize>(kReadBufferSize);
623   for (int i = 0; i < kNumStreams; i++) {
624     memset(buffer2->data(), 0, kReadBufferSize);
625     EXPECT_EQ(kReadBufferSize,
626               ReadData(entry, i, 0, buffer2.get(), kReadBufferSize));
627     EXPECT_EQ(
628         0,
629         memcmp(reference_buffers[i]->data(), buffer2->data(), kReadBufferSize));
630 
631     memset(buffer2->data(), 0, kReadBufferSize);
632     EXPECT_EQ(
633         kFinalReadSize,
634         ReadData(entry, i, kReadBufferSize, buffer2.get(), kReadBufferSize));
635     EXPECT_EQ(0,
636               memcmp(reference_buffers[i]->data() + kReadBufferSize,
637                      buffer2->data(),
638                      kFinalReadSize));
639   }
640 
641   entry->Close();
642 }
643 
TEST_F(DiskCacheEntryTest,StreamAccess)644 TEST_F(DiskCacheEntryTest, StreamAccess) {
645   InitCache();
646   StreamAccess();
647 }
648 
TEST_F(DiskCacheEntryTest,MemoryOnlyStreamAccess)649 TEST_F(DiskCacheEntryTest, MemoryOnlyStreamAccess) {
650   SetMemoryOnlyMode();
651   InitCache();
652   StreamAccess();
653 }
654 
GetKey()655 void DiskCacheEntryTest::GetKey() {
656   std::string key("the first key");
657   disk_cache::Entry* entry;
658   ASSERT_THAT(CreateEntry(key, &entry), IsOk());
659   EXPECT_EQ(key, entry->GetKey()) << "short key";
660   entry->Close();
661 
662   int seed = static_cast<int>(Time::Now().ToInternalValue());
663   srand(seed);
664   char key_buffer[20000];
665 
666   CacheTestFillBuffer(key_buffer, 3000, true);
667   key_buffer[1000] = '\0';
668 
669   key = key_buffer;
670   ASSERT_THAT(CreateEntry(key, &entry), IsOk());
671   EXPECT_TRUE(key == entry->GetKey()) << "1000 bytes key";
672   entry->Close();
673 
674   key_buffer[1000] = 'p';
675   key_buffer[3000] = '\0';
676   key = key_buffer;
677   ASSERT_THAT(CreateEntry(key, &entry), IsOk());
678   EXPECT_TRUE(key == entry->GetKey()) << "medium size key";
679   entry->Close();
680 
681   CacheTestFillBuffer(key_buffer, sizeof(key_buffer), true);
682   key_buffer[19999] = '\0';
683 
684   key = key_buffer;
685   ASSERT_THAT(CreateEntry(key, &entry), IsOk());
686   EXPECT_TRUE(key == entry->GetKey()) << "long key";
687   entry->Close();
688 
689   CacheTestFillBuffer(key_buffer, 0x4000, true);
690   key_buffer[0x4000] = '\0';
691 
692   key = key_buffer;
693   ASSERT_THAT(CreateEntry(key, &entry), IsOk());
694   EXPECT_TRUE(key == entry->GetKey()) << "16KB key";
695   entry->Close();
696 }
697 
TEST_F(DiskCacheEntryTest,GetKey)698 TEST_F(DiskCacheEntryTest, GetKey) {
699   InitCache();
700   GetKey();
701 }
702 
TEST_F(DiskCacheEntryTest,MemoryOnlyGetKey)703 TEST_F(DiskCacheEntryTest, MemoryOnlyGetKey) {
704   SetMemoryOnlyMode();
705   InitCache();
706   GetKey();
707 }
708 
GetTimes(int stream_index)709 void DiskCacheEntryTest::GetTimes(int stream_index) {
710   std::string key("the first key");
711   disk_cache::Entry* entry;
712 
713   Time t1 = Time::Now();
714   ASSERT_THAT(CreateEntry(key, &entry), IsOk());
715   EXPECT_TRUE(entry->GetLastModified() >= t1);
716   EXPECT_TRUE(entry->GetLastModified() == entry->GetLastUsed());
717 
718   AddDelay();
719   Time t2 = Time::Now();
720   EXPECT_TRUE(t2 > t1);
721   EXPECT_EQ(0, WriteData(entry, stream_index, 200, nullptr, 0, false));
722   if (type_ == net::APP_CACHE) {
723     EXPECT_TRUE(entry->GetLastModified() < t2);
724   } else {
725     EXPECT_TRUE(entry->GetLastModified() >= t2);
726   }
727   EXPECT_TRUE(entry->GetLastModified() == entry->GetLastUsed());
728 
729   AddDelay();
730   Time t3 = Time::Now();
731   EXPECT_TRUE(t3 > t2);
732   const int kSize = 200;
733   auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
734   EXPECT_EQ(kSize, ReadData(entry, stream_index, 0, buffer.get(), kSize));
735   if (type_ == net::APP_CACHE) {
736     EXPECT_TRUE(entry->GetLastUsed() < t2);
737     EXPECT_TRUE(entry->GetLastModified() < t2);
738   } else if (type_ == net::SHADER_CACHE) {
739     EXPECT_TRUE(entry->GetLastUsed() < t3);
740     EXPECT_TRUE(entry->GetLastModified() < t3);
741   } else {
742     EXPECT_TRUE(entry->GetLastUsed() >= t3);
743     EXPECT_TRUE(entry->GetLastModified() < t3);
744   }
745   entry->Close();
746 }
747 
TEST_F(DiskCacheEntryTest,GetTimes)748 TEST_F(DiskCacheEntryTest, GetTimes) {
749   InitCache();
750   GetTimes(0);
751 }
752 
TEST_F(DiskCacheEntryTest,MemoryOnlyGetTimes)753 TEST_F(DiskCacheEntryTest, MemoryOnlyGetTimes) {
754   SetMemoryOnlyMode();
755   InitCache();
756   GetTimes(0);
757 }
758 
TEST_F(DiskCacheEntryTest,AppCacheGetTimes)759 TEST_F(DiskCacheEntryTest, AppCacheGetTimes) {
760   SetCacheType(net::APP_CACHE);
761   InitCache();
762   GetTimes(0);
763 }
764 
TEST_F(DiskCacheEntryTest,ShaderCacheGetTimes)765 TEST_F(DiskCacheEntryTest, ShaderCacheGetTimes) {
766   SetCacheType(net::SHADER_CACHE);
767   InitCache();
768   GetTimes(0);
769 }
770 
GrowData(int stream_index)771 void DiskCacheEntryTest::GrowData(int stream_index) {
772   std::string key1("the first key");
773   disk_cache::Entry* entry;
774   ASSERT_THAT(CreateEntry(key1, &entry), IsOk());
775 
776   const int kSize = 20000;
777   auto buffer1 = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
778   auto buffer2 = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
779   CacheTestFillBuffer(buffer1->data(), kSize, false);
780   memset(buffer2->data(), 0, kSize);
781 
782   base::strlcpy(buffer1->data(), "the data", kSize);
783   EXPECT_EQ(10, WriteData(entry, stream_index, 0, buffer1.get(), 10, false));
784   EXPECT_EQ(10, ReadData(entry, stream_index, 0, buffer2.get(), 10));
785   EXPECT_STREQ("the data", buffer2->data());
786   EXPECT_EQ(10, entry->GetDataSize(stream_index));
787 
788   EXPECT_EQ(2000,
789             WriteData(entry, stream_index, 0, buffer1.get(), 2000, false));
790   EXPECT_EQ(2000, entry->GetDataSize(stream_index));
791   EXPECT_EQ(2000, ReadData(entry, stream_index, 0, buffer2.get(), 2000));
792   EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), 2000));
793 
794   EXPECT_EQ(20000,
795             WriteData(entry, stream_index, 0, buffer1.get(), kSize, false));
796   EXPECT_EQ(20000, entry->GetDataSize(stream_index));
797   EXPECT_EQ(20000, ReadData(entry, stream_index, 0, buffer2.get(), kSize));
798   EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), kSize));
799   entry->Close();
800 
801   memset(buffer2->data(), 0, kSize);
802   std::string key2("Second key");
803   ASSERT_THAT(CreateEntry(key2, &entry), IsOk());
804   EXPECT_EQ(10, WriteData(entry, stream_index, 0, buffer1.get(), 10, false));
805   EXPECT_EQ(10, entry->GetDataSize(stream_index));
806   entry->Close();
807 
808   // Go from an internal address to a bigger block size.
809   ASSERT_THAT(OpenEntry(key2, &entry), IsOk());
810   EXPECT_EQ(2000,
811             WriteData(entry, stream_index, 0, buffer1.get(), 2000, false));
812   EXPECT_EQ(2000, entry->GetDataSize(stream_index));
813   EXPECT_EQ(2000, ReadData(entry, stream_index, 0, buffer2.get(), 2000));
814   EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), 2000));
815   entry->Close();
816   memset(buffer2->data(), 0, kSize);
817 
818   // Go from an internal address to an external one.
819   ASSERT_THAT(OpenEntry(key2, &entry), IsOk());
820   EXPECT_EQ(20000,
821             WriteData(entry, stream_index, 0, buffer1.get(), kSize, false));
822   EXPECT_EQ(20000, entry->GetDataSize(stream_index));
823   EXPECT_EQ(20000, ReadData(entry, stream_index, 0, buffer2.get(), kSize));
824   EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), kSize));
825   entry->Close();
826 
827   // Double check the size from disk.
828   ASSERT_THAT(OpenEntry(key2, &entry), IsOk());
829   EXPECT_EQ(20000, entry->GetDataSize(stream_index));
830 
831   // Now extend the entry without actual data.
832   EXPECT_EQ(0, WriteData(entry, stream_index, 45500, buffer1.get(), 0, false));
833   entry->Close();
834 
835   // And check again from disk.
836   ASSERT_THAT(OpenEntry(key2, &entry), IsOk());
837   EXPECT_EQ(45500, entry->GetDataSize(stream_index));
838   entry->Close();
839 }
840 
TEST_F(DiskCacheEntryTest,GrowData)841 TEST_F(DiskCacheEntryTest, GrowData) {
842   InitCache();
843   GrowData(0);
844 }
845 
TEST_F(DiskCacheEntryTest,GrowDataNoBuffer)846 TEST_F(DiskCacheEntryTest, GrowDataNoBuffer) {
847   InitCache();
848   cache_impl_->SetFlags(disk_cache::kNoBuffering);
849   GrowData(0);
850 }
851 
TEST_F(DiskCacheEntryTest,MemoryOnlyGrowData)852 TEST_F(DiskCacheEntryTest, MemoryOnlyGrowData) {
853   SetMemoryOnlyMode();
854   InitCache();
855   GrowData(0);
856 }
857 
TruncateData(int stream_index)858 void DiskCacheEntryTest::TruncateData(int stream_index) {
859   std::string key("the first key");
860   disk_cache::Entry* entry;
861   ASSERT_THAT(CreateEntry(key, &entry), IsOk());
862 
863   const int kSize1 = 20000;
864   const int kSize2 = 20000;
865   auto buffer1 = base::MakeRefCounted<net::IOBufferWithSize>(kSize1);
866   auto buffer2 = base::MakeRefCounted<net::IOBufferWithSize>(kSize2);
867 
868   CacheTestFillBuffer(buffer1->data(), kSize1, false);
869   memset(buffer2->data(), 0, kSize2);
870 
871   // Simple truncation:
872   EXPECT_EQ(200, WriteData(entry, stream_index, 0, buffer1.get(), 200, false));
873   EXPECT_EQ(200, entry->GetDataSize(stream_index));
874   EXPECT_EQ(100, WriteData(entry, stream_index, 0, buffer1.get(), 100, false));
875   EXPECT_EQ(200, entry->GetDataSize(stream_index));
876   EXPECT_EQ(100, WriteData(entry, stream_index, 0, buffer1.get(), 100, true));
877   EXPECT_EQ(100, entry->GetDataSize(stream_index));
878   EXPECT_EQ(0, WriteData(entry, stream_index, 50, buffer1.get(), 0, true));
879   EXPECT_EQ(50, entry->GetDataSize(stream_index));
880   EXPECT_EQ(0, WriteData(entry, stream_index, 0, buffer1.get(), 0, true));
881   EXPECT_EQ(0, entry->GetDataSize(stream_index));
882   entry->Close();
883   ASSERT_THAT(OpenEntry(key, &entry), IsOk());
884 
885   // Go to an external file.
886   EXPECT_EQ(20000,
887             WriteData(entry, stream_index, 0, buffer1.get(), 20000, true));
888   EXPECT_EQ(20000, entry->GetDataSize(stream_index));
889   EXPECT_EQ(20000, ReadData(entry, stream_index, 0, buffer2.get(), 20000));
890   EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), 20000));
891   memset(buffer2->data(), 0, kSize2);
892 
893   // External file truncation
894   EXPECT_EQ(18000,
895             WriteData(entry, stream_index, 0, buffer1.get(), 18000, false));
896   EXPECT_EQ(20000, entry->GetDataSize(stream_index));
897   EXPECT_EQ(18000,
898             WriteData(entry, stream_index, 0, buffer1.get(), 18000, true));
899   EXPECT_EQ(18000, entry->GetDataSize(stream_index));
900   EXPECT_EQ(0, WriteData(entry, stream_index, 17500, buffer1.get(), 0, true));
901   EXPECT_EQ(17500, entry->GetDataSize(stream_index));
902 
903   // And back to an internal block.
904   EXPECT_EQ(600,
905             WriteData(entry, stream_index, 1000, buffer1.get(), 600, true));
906   EXPECT_EQ(1600, entry->GetDataSize(stream_index));
907   EXPECT_EQ(600, ReadData(entry, stream_index, 1000, buffer2.get(), 600));
908   EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), 600));
909   EXPECT_EQ(1000, ReadData(entry, stream_index, 0, buffer2.get(), 1000));
910   EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), 1000))
911       << "Preserves previous data";
912 
913   // Go from external file to zero length.
914   EXPECT_EQ(20000,
915             WriteData(entry, stream_index, 0, buffer1.get(), 20000, true));
916   EXPECT_EQ(20000, entry->GetDataSize(stream_index));
917   EXPECT_EQ(0, WriteData(entry, stream_index, 0, buffer1.get(), 0, true));
918   EXPECT_EQ(0, entry->GetDataSize(stream_index));
919 
920   entry->Close();
921 }
922 
TEST_F(DiskCacheEntryTest,TruncateData)923 TEST_F(DiskCacheEntryTest, TruncateData) {
924   InitCache();
925   TruncateData(0);
926 }
927 
TEST_F(DiskCacheEntryTest,TruncateDataNoBuffer)928 TEST_F(DiskCacheEntryTest, TruncateDataNoBuffer) {
929   InitCache();
930   cache_impl_->SetFlags(disk_cache::kNoBuffering);
931   TruncateData(0);
932 }
933 
TEST_F(DiskCacheEntryTest,MemoryOnlyTruncateData)934 TEST_F(DiskCacheEntryTest, MemoryOnlyTruncateData) {
935   SetMemoryOnlyMode();
936   InitCache();
937   TruncateData(0);
938 }
939 
ZeroLengthIO(int stream_index)940 void DiskCacheEntryTest::ZeroLengthIO(int stream_index) {
941   std::string key("the first key");
942   disk_cache::Entry* entry;
943   ASSERT_THAT(CreateEntry(key, &entry), IsOk());
944 
945   EXPECT_EQ(0, ReadData(entry, stream_index, 0, nullptr, 0));
946   EXPECT_EQ(0, WriteData(entry, stream_index, 0, nullptr, 0, false));
947 
948   // This write should extend the entry.
949   EXPECT_EQ(0, WriteData(entry, stream_index, 1000, nullptr, 0, false));
950   EXPECT_EQ(0, ReadData(entry, stream_index, 500, nullptr, 0));
951   EXPECT_EQ(0, ReadData(entry, stream_index, 2000, nullptr, 0));
952   EXPECT_EQ(1000, entry->GetDataSize(stream_index));
953 
954   EXPECT_EQ(0, WriteData(entry, stream_index, 100000, nullptr, 0, true));
955   EXPECT_EQ(0, ReadData(entry, stream_index, 50000, nullptr, 0));
956   EXPECT_EQ(100000, entry->GetDataSize(stream_index));
957 
958   // Let's verify the actual content.
959   const int kSize = 20;
960   const char zeros[kSize] = {};
961   auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
962 
963   CacheTestFillBuffer(buffer->data(), kSize, false);
964   EXPECT_EQ(kSize, ReadData(entry, stream_index, 500, buffer.get(), kSize));
965   EXPECT_TRUE(!memcmp(buffer->data(), zeros, kSize));
966 
967   CacheTestFillBuffer(buffer->data(), kSize, false);
968   EXPECT_EQ(kSize, ReadData(entry, stream_index, 5000, buffer.get(), kSize));
969   EXPECT_TRUE(!memcmp(buffer->data(), zeros, kSize));
970 
971   CacheTestFillBuffer(buffer->data(), kSize, false);
972   EXPECT_EQ(kSize, ReadData(entry, stream_index, 50000, buffer.get(), kSize));
973   EXPECT_TRUE(!memcmp(buffer->data(), zeros, kSize));
974 
975   entry->Close();
976 }
977 
TEST_F(DiskCacheEntryTest,ZeroLengthIO)978 TEST_F(DiskCacheEntryTest, ZeroLengthIO) {
979   InitCache();
980   ZeroLengthIO(0);
981 }
982 
TEST_F(DiskCacheEntryTest,ZeroLengthIONoBuffer)983 TEST_F(DiskCacheEntryTest, ZeroLengthIONoBuffer) {
984   InitCache();
985   cache_impl_->SetFlags(disk_cache::kNoBuffering);
986   ZeroLengthIO(0);
987 }
988 
TEST_F(DiskCacheEntryTest,MemoryOnlyZeroLengthIO)989 TEST_F(DiskCacheEntryTest, MemoryOnlyZeroLengthIO) {
990   SetMemoryOnlyMode();
991   InitCache();
992   ZeroLengthIO(0);
993 }
994 
995 // Tests that we handle the content correctly when buffering, a feature of the
996 // standard cache that permits fast responses to certain reads.
Buffering()997 void DiskCacheEntryTest::Buffering() {
998   std::string key("the first key");
999   disk_cache::Entry* entry;
1000   ASSERT_THAT(CreateEntry(key, &entry), IsOk());
1001 
1002   const int kSize = 200;
1003   auto buffer1 = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
1004   auto buffer2 = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
1005   CacheTestFillBuffer(buffer1->data(), kSize, true);
1006   CacheTestFillBuffer(buffer2->data(), kSize, true);
1007 
1008   EXPECT_EQ(kSize, WriteData(entry, 1, 0, buffer1.get(), kSize, false));
1009   entry->Close();
1010 
1011   // Write a little more and read what we wrote before.
1012   ASSERT_THAT(OpenEntry(key, &entry), IsOk());
1013   EXPECT_EQ(kSize, WriteData(entry, 1, 5000, buffer1.get(), kSize, false));
1014   EXPECT_EQ(kSize, ReadData(entry, 1, 0, buffer2.get(), kSize));
1015   EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data(), kSize));
1016 
1017   // Now go to an external file.
1018   EXPECT_EQ(kSize, WriteData(entry, 1, 18000, buffer1.get(), kSize, false));
1019   entry->Close();
1020 
1021   // Write something else and verify old data.
1022   ASSERT_THAT(OpenEntry(key, &entry), IsOk());
1023   EXPECT_EQ(kSize, WriteData(entry, 1, 10000, buffer1.get(), kSize, false));
1024   CacheTestFillBuffer(buffer2->data(), kSize, true);
1025   EXPECT_EQ(kSize, ReadData(entry, 1, 5000, buffer2.get(), kSize));
1026   EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data(), kSize));
1027   CacheTestFillBuffer(buffer2->data(), kSize, true);
1028   EXPECT_EQ(kSize, ReadData(entry, 1, 0, buffer2.get(), kSize));
1029   EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data(), kSize));
1030   CacheTestFillBuffer(buffer2->data(), kSize, true);
1031   EXPECT_EQ(kSize, ReadData(entry, 1, 18000, buffer2.get(), kSize));
1032   EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data(), kSize));
1033 
1034   // Extend the file some more.
1035   EXPECT_EQ(kSize, WriteData(entry, 1, 23000, buffer1.get(), kSize, false));
1036   entry->Close();
1037 
1038   // And now make sure that we can deal with data in both places (ram/disk).
1039   ASSERT_THAT(OpenEntry(key, &entry), IsOk());
1040   EXPECT_EQ(kSize, WriteData(entry, 1, 17000, buffer1.get(), kSize, false));
1041 
1042   // We should not overwrite the data at 18000 with this.
1043   EXPECT_EQ(kSize, WriteData(entry, 1, 19000, buffer1.get(), kSize, false));
1044   CacheTestFillBuffer(buffer2->data(), kSize, true);
1045   EXPECT_EQ(kSize, ReadData(entry, 1, 18000, buffer2.get(), kSize));
1046   EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data(), kSize));
1047   CacheTestFillBuffer(buffer2->data(), kSize, true);
1048   EXPECT_EQ(kSize, ReadData(entry, 1, 17000, buffer2.get(), kSize));
1049   EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data(), kSize));
1050 
1051   EXPECT_EQ(kSize, WriteData(entry, 1, 22900, buffer1.get(), kSize, false));
1052   CacheTestFillBuffer(buffer2->data(), kSize, true);
1053   EXPECT_EQ(100, ReadData(entry, 1, 23000, buffer2.get(), kSize));
1054   EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data() + 100, 100));
1055 
1056   CacheTestFillBuffer(buffer2->data(), kSize, true);
1057   EXPECT_EQ(100, ReadData(entry, 1, 23100, buffer2.get(), kSize));
1058   EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data() + 100, 100));
1059 
1060   // Extend the file again and read before without closing the entry.
1061   EXPECT_EQ(kSize, WriteData(entry, 1, 25000, buffer1.get(), kSize, false));
1062   EXPECT_EQ(kSize, WriteData(entry, 1, 45000, buffer1.get(), kSize, false));
1063   CacheTestFillBuffer(buffer2->data(), kSize, true);
1064   EXPECT_EQ(kSize, ReadData(entry, 1, 25000, buffer2.get(), kSize));
1065   EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data(), kSize));
1066   CacheTestFillBuffer(buffer2->data(), kSize, true);
1067   EXPECT_EQ(kSize, ReadData(entry, 1, 45000, buffer2.get(), kSize));
1068   EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data(), kSize));
1069 
1070   entry->Close();
1071 }
1072 
TEST_F(DiskCacheEntryTest,Buffering)1073 TEST_F(DiskCacheEntryTest, Buffering) {
1074   InitCache();
1075   Buffering();
1076 }
1077 
TEST_F(DiskCacheEntryTest,BufferingNoBuffer)1078 TEST_F(DiskCacheEntryTest, BufferingNoBuffer) {
1079   InitCache();
1080   cache_impl_->SetFlags(disk_cache::kNoBuffering);
1081   Buffering();
1082 }
1083 
1084 // Checks that entries are zero length when created.
SizeAtCreate()1085 void DiskCacheEntryTest::SizeAtCreate() {
1086   const char key[]  = "the first key";
1087   disk_cache::Entry* entry;
1088   ASSERT_THAT(CreateEntry(key, &entry), IsOk());
1089 
1090   const int kNumStreams = 3;
1091   for (int i = 0; i < kNumStreams; ++i)
1092     EXPECT_EQ(0, entry->GetDataSize(i));
1093   entry->Close();
1094 }
1095 
TEST_F(DiskCacheEntryTest,SizeAtCreate)1096 TEST_F(DiskCacheEntryTest, SizeAtCreate) {
1097   InitCache();
1098   SizeAtCreate();
1099 }
1100 
TEST_F(DiskCacheEntryTest,MemoryOnlySizeAtCreate)1101 TEST_F(DiskCacheEntryTest, MemoryOnlySizeAtCreate) {
1102   SetMemoryOnlyMode();
1103   InitCache();
1104   SizeAtCreate();
1105 }
1106 
1107 // Some extra tests to make sure that buffering works properly when changing
1108 // the entry size.
SizeChanges(int stream_index)1109 void DiskCacheEntryTest::SizeChanges(int stream_index) {
1110   std::string key("the first key");
1111   disk_cache::Entry* entry;
1112   ASSERT_THAT(CreateEntry(key, &entry), IsOk());
1113 
1114   const int kSize = 200;
1115   const char zeros[kSize] = {};
1116   auto buffer1 = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
1117   auto buffer2 = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
1118   CacheTestFillBuffer(buffer1->data(), kSize, true);
1119   CacheTestFillBuffer(buffer2->data(), kSize, true);
1120 
1121   EXPECT_EQ(kSize,
1122             WriteData(entry, stream_index, 0, buffer1.get(), kSize, true));
1123   EXPECT_EQ(kSize,
1124             WriteData(entry, stream_index, 17000, buffer1.get(), kSize, true));
1125   EXPECT_EQ(kSize,
1126             WriteData(entry, stream_index, 23000, buffer1.get(), kSize, true));
1127   entry->Close();
1128 
1129   // Extend the file and read between the old size and the new write.
1130   ASSERT_THAT(OpenEntry(key, &entry), IsOk());
1131   EXPECT_EQ(23000 + kSize, entry->GetDataSize(stream_index));
1132   EXPECT_EQ(kSize,
1133             WriteData(entry, stream_index, 25000, buffer1.get(), kSize, true));
1134   EXPECT_EQ(25000 + kSize, entry->GetDataSize(stream_index));
1135   EXPECT_EQ(kSize, ReadData(entry, stream_index, 24000, buffer2.get(), kSize));
1136   EXPECT_TRUE(!memcmp(buffer2->data(), zeros, kSize));
1137 
1138   // Read at the end of the old file size.
1139   EXPECT_EQ(
1140       kSize,
1141       ReadData(entry, stream_index, 23000 + kSize - 35, buffer2.get(), kSize));
1142   EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data() + kSize - 35, 35));
1143 
1144   // Read slightly before the last write.
1145   CacheTestFillBuffer(buffer2->data(), kSize, true);
1146   EXPECT_EQ(kSize, ReadData(entry, stream_index, 24900, buffer2.get(), kSize));
1147   EXPECT_TRUE(!memcmp(buffer2->data(), zeros, 100));
1148   EXPECT_TRUE(!memcmp(buffer2->data() + 100, buffer1->data(), kSize - 100));
1149 
1150   // Extend the entry a little more.
1151   EXPECT_EQ(kSize,
1152             WriteData(entry, stream_index, 26000, buffer1.get(), kSize, true));
1153   EXPECT_EQ(26000 + kSize, entry->GetDataSize(stream_index));
1154   CacheTestFillBuffer(buffer2->data(), kSize, true);
1155   EXPECT_EQ(kSize, ReadData(entry, stream_index, 25900, buffer2.get(), kSize));
1156   EXPECT_TRUE(!memcmp(buffer2->data(), zeros, 100));
1157   EXPECT_TRUE(!memcmp(buffer2->data() + 100, buffer1->data(), kSize - 100));
1158 
1159   // And now reduce the size.
1160   EXPECT_EQ(kSize,
1161             WriteData(entry, stream_index, 25000, buffer1.get(), kSize, true));
1162   EXPECT_EQ(25000 + kSize, entry->GetDataSize(stream_index));
1163   EXPECT_EQ(
1164       28,
1165       ReadData(entry, stream_index, 25000 + kSize - 28, buffer2.get(), kSize));
1166   EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data() + kSize - 28, 28));
1167 
1168   // Reduce the size with a buffer that is not extending the size.
1169   EXPECT_EQ(kSize,
1170             WriteData(entry, stream_index, 24000, buffer1.get(), kSize, false));
1171   EXPECT_EQ(25000 + kSize, entry->GetDataSize(stream_index));
1172   EXPECT_EQ(kSize,
1173             WriteData(entry, stream_index, 24500, buffer1.get(), kSize, true));
1174   EXPECT_EQ(24500 + kSize, entry->GetDataSize(stream_index));
1175   EXPECT_EQ(kSize, ReadData(entry, stream_index, 23900, buffer2.get(), kSize));
1176   EXPECT_TRUE(!memcmp(buffer2->data(), zeros, 100));
1177   EXPECT_TRUE(!memcmp(buffer2->data() + 100, buffer1->data(), kSize - 100));
1178 
1179   // And now reduce the size below the old size.
1180   EXPECT_EQ(kSize,
1181             WriteData(entry, stream_index, 19000, buffer1.get(), kSize, true));
1182   EXPECT_EQ(19000 + kSize, entry->GetDataSize(stream_index));
1183   EXPECT_EQ(kSize, ReadData(entry, stream_index, 18900, buffer2.get(), kSize));
1184   EXPECT_TRUE(!memcmp(buffer2->data(), zeros, 100));
1185   EXPECT_TRUE(!memcmp(buffer2->data() + 100, buffer1->data(), kSize - 100));
1186 
1187   // Verify that the actual file is truncated.
1188   entry->Close();
1189   ASSERT_THAT(OpenEntry(key, &entry), IsOk());
1190   EXPECT_EQ(19000 + kSize, entry->GetDataSize(stream_index));
1191 
1192   // Extend the newly opened file with a zero length write, expect zero fill.
1193   EXPECT_EQ(
1194       0,
1195       WriteData(entry, stream_index, 20000 + kSize, buffer1.get(), 0, false));
1196   EXPECT_EQ(kSize,
1197             ReadData(entry, stream_index, 19000 + kSize, buffer1.get(), kSize));
1198   EXPECT_EQ(0, memcmp(buffer1->data(), zeros, kSize));
1199 
1200   entry->Close();
1201 }
1202 
TEST_F(DiskCacheEntryTest,SizeChanges)1203 TEST_F(DiskCacheEntryTest, SizeChanges) {
1204   InitCache();
1205   SizeChanges(1);
1206 }
1207 
TEST_F(DiskCacheEntryTest,SizeChangesNoBuffer)1208 TEST_F(DiskCacheEntryTest, SizeChangesNoBuffer) {
1209   InitCache();
1210   cache_impl_->SetFlags(disk_cache::kNoBuffering);
1211   SizeChanges(1);
1212 }
1213 
1214 // Write more than the total cache capacity but to a single entry. |size| is the
1215 // amount of bytes to write each time.
ReuseEntry(int size,int stream_index)1216 void DiskCacheEntryTest::ReuseEntry(int size, int stream_index) {
1217   std::string key1("the first key");
1218   disk_cache::Entry* entry;
1219   ASSERT_THAT(CreateEntry(key1, &entry), IsOk());
1220 
1221   entry->Close();
1222   std::string key2("the second key");
1223   ASSERT_THAT(CreateEntry(key2, &entry), IsOk());
1224 
1225   auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(size);
1226   CacheTestFillBuffer(buffer->data(), size, false);
1227 
1228   for (int i = 0; i < 15; i++) {
1229     EXPECT_EQ(0, WriteData(entry, stream_index, 0, buffer.get(), 0, true));
1230     EXPECT_EQ(size,
1231               WriteData(entry, stream_index, 0, buffer.get(), size, false));
1232     entry->Close();
1233     ASSERT_THAT(OpenEntry(key2, &entry), IsOk());
1234   }
1235 
1236   entry->Close();
1237   ASSERT_EQ(net::OK, OpenEntry(key1, &entry)) << "have not evicted this entry";
1238   entry->Close();
1239 }
1240 
TEST_F(DiskCacheEntryTest,ReuseExternalEntry)1241 TEST_F(DiskCacheEntryTest, ReuseExternalEntry) {
1242   SetMaxSize(200 * 1024);
1243   InitCache();
1244   ReuseEntry(20 * 1024, 0);
1245 }
1246 
TEST_F(DiskCacheEntryTest,MemoryOnlyReuseExternalEntry)1247 TEST_F(DiskCacheEntryTest, MemoryOnlyReuseExternalEntry) {
1248   SetMemoryOnlyMode();
1249   SetMaxSize(200 * 1024);
1250   InitCache();
1251   ReuseEntry(20 * 1024, 0);
1252 }
1253 
TEST_F(DiskCacheEntryTest,ReuseInternalEntry)1254 TEST_F(DiskCacheEntryTest, ReuseInternalEntry) {
1255   SetMaxSize(100 * 1024);
1256   InitCache();
1257   ReuseEntry(10 * 1024, 0);
1258 }
1259 
TEST_F(DiskCacheEntryTest,MemoryOnlyReuseInternalEntry)1260 TEST_F(DiskCacheEntryTest, MemoryOnlyReuseInternalEntry) {
1261   SetMemoryOnlyMode();
1262   SetMaxSize(100 * 1024);
1263   InitCache();
1264   ReuseEntry(10 * 1024, 0);
1265 }
1266 
1267 // Reading somewhere that was not written should return zeros.
InvalidData(int stream_index)1268 void DiskCacheEntryTest::InvalidData(int stream_index) {
1269   std::string key("the first key");
1270   disk_cache::Entry* entry;
1271   ASSERT_THAT(CreateEntry(key, &entry), IsOk());
1272 
1273   const int kSize1 = 20000;
1274   const int kSize2 = 20000;
1275   const int kSize3 = 20000;
1276   auto buffer1 = base::MakeRefCounted<net::IOBufferWithSize>(kSize1);
1277   auto buffer2 = base::MakeRefCounted<net::IOBufferWithSize>(kSize2);
1278   auto buffer3 = base::MakeRefCounted<net::IOBufferWithSize>(kSize3);
1279 
1280   CacheTestFillBuffer(buffer1->data(), kSize1, false);
1281   memset(buffer2->data(), 0, kSize2);
1282 
1283   // Simple data grow:
1284   EXPECT_EQ(200,
1285             WriteData(entry, stream_index, 400, buffer1.get(), 200, false));
1286   EXPECT_EQ(600, entry->GetDataSize(stream_index));
1287   EXPECT_EQ(100, ReadData(entry, stream_index, 300, buffer3.get(), 100));
1288   EXPECT_TRUE(!memcmp(buffer3->data(), buffer2->data(), 100));
1289   entry->Close();
1290   ASSERT_THAT(OpenEntry(key, &entry), IsOk());
1291 
1292   // The entry is now on disk. Load it and extend it.
1293   EXPECT_EQ(200,
1294             WriteData(entry, stream_index, 800, buffer1.get(), 200, false));
1295   EXPECT_EQ(1000, entry->GetDataSize(stream_index));
1296   EXPECT_EQ(100, ReadData(entry, stream_index, 700, buffer3.get(), 100));
1297   EXPECT_TRUE(!memcmp(buffer3->data(), buffer2->data(), 100));
1298   entry->Close();
1299   ASSERT_THAT(OpenEntry(key, &entry), IsOk());
1300 
1301   // This time using truncate.
1302   EXPECT_EQ(200,
1303             WriteData(entry, stream_index, 1800, buffer1.get(), 200, true));
1304   EXPECT_EQ(2000, entry->GetDataSize(stream_index));
1305   EXPECT_EQ(100, ReadData(entry, stream_index, 1500, buffer3.get(), 100));
1306   EXPECT_TRUE(!memcmp(buffer3->data(), buffer2->data(), 100));
1307 
1308   // Go to an external file.
1309   EXPECT_EQ(200,
1310             WriteData(entry, stream_index, 19800, buffer1.get(), 200, false));
1311   EXPECT_EQ(20000, entry->GetDataSize(stream_index));
1312   EXPECT_EQ(4000, ReadData(entry, stream_index, 14000, buffer3.get(), 4000));
1313   EXPECT_TRUE(!memcmp(buffer3->data(), buffer2->data(), 4000));
1314 
1315   // And back to an internal block.
1316   EXPECT_EQ(600,
1317             WriteData(entry, stream_index, 1000, buffer1.get(), 600, true));
1318   EXPECT_EQ(1600, entry->GetDataSize(stream_index));
1319   EXPECT_EQ(600, ReadData(entry, stream_index, 1000, buffer3.get(), 600));
1320   EXPECT_TRUE(!memcmp(buffer3->data(), buffer1->data(), 600));
1321 
1322   // Extend it again.
1323   EXPECT_EQ(600,
1324             WriteData(entry, stream_index, 2000, buffer1.get(), 600, false));
1325   EXPECT_EQ(2600, entry->GetDataSize(stream_index));
1326   EXPECT_EQ(200, ReadData(entry, stream_index, 1800, buffer3.get(), 200));
1327   EXPECT_TRUE(!memcmp(buffer3->data(), buffer2->data(), 200));
1328 
1329   // And again (with truncation flag).
1330   EXPECT_EQ(600,
1331             WriteData(entry, stream_index, 3000, buffer1.get(), 600, true));
1332   EXPECT_EQ(3600, entry->GetDataSize(stream_index));
1333   EXPECT_EQ(200, ReadData(entry, stream_index, 2800, buffer3.get(), 200));
1334   EXPECT_TRUE(!memcmp(buffer3->data(), buffer2->data(), 200));
1335 
1336   entry->Close();
1337 }
1338 
TEST_F(DiskCacheEntryTest,InvalidData)1339 TEST_F(DiskCacheEntryTest, InvalidData) {
1340   InitCache();
1341   InvalidData(0);
1342 }
1343 
TEST_F(DiskCacheEntryTest,InvalidDataNoBuffer)1344 TEST_F(DiskCacheEntryTest, InvalidDataNoBuffer) {
1345   InitCache();
1346   cache_impl_->SetFlags(disk_cache::kNoBuffering);
1347   InvalidData(0);
1348 }
1349 
TEST_F(DiskCacheEntryTest,MemoryOnlyInvalidData)1350 TEST_F(DiskCacheEntryTest, MemoryOnlyInvalidData) {
1351   SetMemoryOnlyMode();
1352   InitCache();
1353   InvalidData(0);
1354 }
1355 
1356 // Tests that the cache preserves the buffer of an IO operation.
ReadWriteDestroyBuffer(int stream_index)1357 void DiskCacheEntryTest::ReadWriteDestroyBuffer(int stream_index) {
1358   std::string key("the first key");
1359   disk_cache::Entry* entry;
1360   ASSERT_THAT(CreateEntry(key, &entry), IsOk());
1361 
1362   const int kSize = 200;
1363   auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
1364   CacheTestFillBuffer(buffer->data(), kSize, false);
1365 
1366   net::TestCompletionCallback cb;
1367   EXPECT_EQ(net::ERR_IO_PENDING,
1368             entry->WriteData(
1369                 stream_index, 0, buffer.get(), kSize, cb.callback(), false));
1370 
1371   // Release our reference to the buffer.
1372   buffer = nullptr;
1373   EXPECT_EQ(kSize, cb.WaitForResult());
1374 
1375   // And now test with a Read().
1376   buffer = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
1377   CacheTestFillBuffer(buffer->data(), kSize, false);
1378 
1379   EXPECT_EQ(
1380       net::ERR_IO_PENDING,
1381       entry->ReadData(stream_index, 0, buffer.get(), kSize, cb.callback()));
1382   buffer = nullptr;
1383   EXPECT_EQ(kSize, cb.WaitForResult());
1384 
1385   entry->Close();
1386 }
1387 
TEST_F(DiskCacheEntryTest,ReadWriteDestroyBuffer)1388 TEST_F(DiskCacheEntryTest, ReadWriteDestroyBuffer) {
1389   InitCache();
1390   ReadWriteDestroyBuffer(0);
1391 }
1392 
DoomNormalEntry()1393 void DiskCacheEntryTest::DoomNormalEntry() {
1394   std::string key("the first key");
1395   disk_cache::Entry* entry;
1396   ASSERT_THAT(CreateEntry(key, &entry), IsOk());
1397   entry->Doom();
1398   entry->Close();
1399 
1400   const int kSize = 20000;
1401   auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
1402   CacheTestFillBuffer(buffer->data(), kSize, true);
1403   buffer->data()[19999] = '\0';
1404 
1405   key = buffer->data();
1406   ASSERT_THAT(CreateEntry(key, &entry), IsOk());
1407   EXPECT_EQ(20000, WriteData(entry, 0, 0, buffer.get(), kSize, false));
1408   EXPECT_EQ(20000, WriteData(entry, 1, 0, buffer.get(), kSize, false));
1409   entry->Doom();
1410   entry->Close();
1411 
1412   FlushQueueForTest();
1413   EXPECT_EQ(0, cache_->GetEntryCount());
1414 }
1415 
TEST_F(DiskCacheEntryTest,DoomEntry)1416 TEST_F(DiskCacheEntryTest, DoomEntry) {
1417   InitCache();
1418   DoomNormalEntry();
1419 }
1420 
TEST_F(DiskCacheEntryTest,MemoryOnlyDoomEntry)1421 TEST_F(DiskCacheEntryTest, MemoryOnlyDoomEntry) {
1422   SetMemoryOnlyMode();
1423   InitCache();
1424   DoomNormalEntry();
1425 }
1426 
1427 // Tests dooming an entry that's linked to an open entry.
DoomEntryNextToOpenEntry()1428 void DiskCacheEntryTest::DoomEntryNextToOpenEntry() {
1429   disk_cache::Entry* entry1;
1430   disk_cache::Entry* entry2;
1431   ASSERT_THAT(CreateEntry("fixed", &entry1), IsOk());
1432   entry1->Close();
1433   ASSERT_THAT(CreateEntry("foo", &entry1), IsOk());
1434   entry1->Close();
1435   ASSERT_THAT(CreateEntry("bar", &entry1), IsOk());
1436   entry1->Close();
1437 
1438   ASSERT_THAT(OpenEntry("foo", &entry1), IsOk());
1439   ASSERT_THAT(OpenEntry("bar", &entry2), IsOk());
1440   entry2->Doom();
1441   entry2->Close();
1442 
1443   ASSERT_THAT(OpenEntry("foo", &entry2), IsOk());
1444   entry2->Doom();
1445   entry2->Close();
1446   entry1->Close();
1447 
1448   ASSERT_THAT(OpenEntry("fixed", &entry1), IsOk());
1449   entry1->Close();
1450 }
1451 
TEST_F(DiskCacheEntryTest,DoomEntryNextToOpenEntry)1452 TEST_F(DiskCacheEntryTest, DoomEntryNextToOpenEntry) {
1453   InitCache();
1454   DoomEntryNextToOpenEntry();
1455 }
1456 
TEST_F(DiskCacheEntryTest,NewEvictionDoomEntryNextToOpenEntry)1457 TEST_F(DiskCacheEntryTest, NewEvictionDoomEntryNextToOpenEntry) {
1458   SetNewEviction();
1459   InitCache();
1460   DoomEntryNextToOpenEntry();
1461 }
1462 
TEST_F(DiskCacheEntryTest,AppCacheDoomEntryNextToOpenEntry)1463 TEST_F(DiskCacheEntryTest, AppCacheDoomEntryNextToOpenEntry) {
1464   SetCacheType(net::APP_CACHE);
1465   InitCache();
1466   DoomEntryNextToOpenEntry();
1467 }
1468 
1469 // Verify that basic operations work as expected with doomed entries.
DoomedEntry(int stream_index)1470 void DiskCacheEntryTest::DoomedEntry(int stream_index) {
1471   std::string key("the first key");
1472   disk_cache::Entry* entry;
1473   ASSERT_THAT(CreateEntry(key, &entry), IsOk());
1474   entry->Doom();
1475 
1476   FlushQueueForTest();
1477   EXPECT_EQ(0, cache_->GetEntryCount());
1478   Time initial = Time::Now();
1479   AddDelay();
1480 
1481   const int kSize1 = 2000;
1482   const int kSize2 = 2000;
1483   auto buffer1 = base::MakeRefCounted<net::IOBufferWithSize>(kSize1);
1484   auto buffer2 = base::MakeRefCounted<net::IOBufferWithSize>(kSize2);
1485   CacheTestFillBuffer(buffer1->data(), kSize1, false);
1486   memset(buffer2->data(), 0, kSize2);
1487 
1488   EXPECT_EQ(2000,
1489             WriteData(entry, stream_index, 0, buffer1.get(), 2000, false));
1490   EXPECT_EQ(2000, ReadData(entry, stream_index, 0, buffer2.get(), 2000));
1491   EXPECT_EQ(0, memcmp(buffer1->data(), buffer2->data(), kSize1));
1492   EXPECT_EQ(key, entry->GetKey());
1493   EXPECT_TRUE(initial < entry->GetLastModified());
1494   EXPECT_TRUE(initial < entry->GetLastUsed());
1495 
1496   entry->Close();
1497 }
1498 
TEST_F(DiskCacheEntryTest,DoomedEntry)1499 TEST_F(DiskCacheEntryTest, DoomedEntry) {
1500   InitCache();
1501   DoomedEntry(0);
1502 }
1503 
TEST_F(DiskCacheEntryTest,MemoryOnlyDoomedEntry)1504 TEST_F(DiskCacheEntryTest, MemoryOnlyDoomedEntry) {
1505   SetMemoryOnlyMode();
1506   InitCache();
1507   DoomedEntry(0);
1508 }
1509 
1510 // Tests that we discard entries if the data is missing.
TEST_F(DiskCacheEntryTest,MissingData)1511 TEST_F(DiskCacheEntryTest, MissingData) {
1512   InitCache();
1513 
1514   std::string key("the first key");
1515   disk_cache::Entry* entry;
1516   ASSERT_THAT(CreateEntry(key, &entry), IsOk());
1517 
1518   // Write to an external file.
1519   const int kSize = 20000;
1520   auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
1521   CacheTestFillBuffer(buffer->data(), kSize, false);
1522   EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
1523   entry->Close();
1524   FlushQueueForTest();
1525 
1526   disk_cache::Addr address(0x80000001);
1527   base::FilePath name = cache_impl_->GetFileName(address);
1528   EXPECT_TRUE(base::DeleteFile(name));
1529 
1530   // Attempt to read the data.
1531   ASSERT_THAT(OpenEntry(key, &entry), IsOk());
1532   EXPECT_EQ(net::ERR_FILE_NOT_FOUND,
1533             ReadData(entry, 0, 0, buffer.get(), kSize));
1534   entry->Close();
1535 
1536   // The entry should be gone.
1537   ASSERT_NE(net::OK, OpenEntry(key, &entry));
1538 }
1539 
1540 // Test that child entries in a memory cache backend are not visible from
1541 // enumerations.
TEST_F(DiskCacheEntryTest,MemoryOnlyEnumerationWithSparseEntries)1542 TEST_F(DiskCacheEntryTest, MemoryOnlyEnumerationWithSparseEntries) {
1543   SetMemoryOnlyMode();
1544   InitCache();
1545 
1546   const int kSize = 4096;
1547   auto buf = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
1548   CacheTestFillBuffer(buf->data(), kSize, false);
1549 
1550   std::string key("the first key");
1551   disk_cache::Entry* parent_entry;
1552   ASSERT_THAT(CreateEntry(key, &parent_entry), IsOk());
1553 
1554   // Writes to the parent entry.
1555   EXPECT_EQ(kSize, parent_entry->WriteSparseData(
1556                        0, buf.get(), kSize, net::CompletionOnceCallback()));
1557 
1558   // This write creates a child entry and writes to it.
1559   EXPECT_EQ(kSize, parent_entry->WriteSparseData(
1560                        8192, buf.get(), kSize, net::CompletionOnceCallback()));
1561 
1562   parent_entry->Close();
1563 
1564   // Perform the enumerations.
1565   std::unique_ptr<TestIterator> iter = CreateIterator();
1566   disk_cache::Entry* entry = nullptr;
1567   int count = 0;
1568   while (iter->OpenNextEntry(&entry) == net::OK) {
1569     ASSERT_TRUE(entry != nullptr);
1570     ++count;
1571     disk_cache::MemEntryImpl* mem_entry =
1572         reinterpret_cast<disk_cache::MemEntryImpl*>(entry);
1573     EXPECT_EQ(disk_cache::MemEntryImpl::EntryType::kParent, mem_entry->type());
1574     mem_entry->Close();
1575   }
1576   EXPECT_EQ(1, count);
1577 }
1578 
1579 // Writes |buf_1| to offset and reads it back as |buf_2|.
VerifySparseIO(disk_cache::Entry * entry,int64_t offset,net::IOBuffer * buf_1,int size,net::IOBuffer * buf_2)1580 void VerifySparseIO(disk_cache::Entry* entry,
1581                     int64_t offset,
1582                     net::IOBuffer* buf_1,
1583                     int size,
1584                     net::IOBuffer* buf_2) {
1585   net::TestCompletionCallback cb;
1586 
1587   memset(buf_2->data(), 0, size);
1588   int ret = entry->ReadSparseData(offset, buf_2, size, cb.callback());
1589   EXPECT_EQ(0, cb.GetResult(ret));
1590 
1591   ret = entry->WriteSparseData(offset, buf_1, size, cb.callback());
1592   EXPECT_EQ(size, cb.GetResult(ret));
1593 
1594   ret = entry->ReadSparseData(offset, buf_2, size, cb.callback());
1595   EXPECT_EQ(size, cb.GetResult(ret));
1596 
1597   EXPECT_EQ(0, memcmp(buf_1->data(), buf_2->data(), size));
1598 }
1599 
1600 // Reads |size| bytes from |entry| at |offset| and verifies that they are the
1601 // same as the content of the provided |buffer|.
VerifyContentSparseIO(disk_cache::Entry * entry,int64_t offset,char * buffer,int size)1602 void VerifyContentSparseIO(disk_cache::Entry* entry,
1603                            int64_t offset,
1604                            char* buffer,
1605                            int size) {
1606   net::TestCompletionCallback cb;
1607 
1608   auto buf_1 = base::MakeRefCounted<net::IOBufferWithSize>(size);
1609   memset(buf_1->data(), 0, size);
1610   int ret = entry->ReadSparseData(offset, buf_1.get(), size, cb.callback());
1611   EXPECT_EQ(size, cb.GetResult(ret));
1612   EXPECT_EQ(0, memcmp(buf_1->data(), buffer, size));
1613 }
1614 
BasicSparseIO()1615 void DiskCacheEntryTest::BasicSparseIO() {
1616   std::string key("the first key");
1617   disk_cache::Entry* entry;
1618   ASSERT_THAT(CreateEntry(key, &entry), IsOk());
1619 
1620   const int kSize = 2048;
1621   auto buf_1 = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
1622   auto buf_2 = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
1623   CacheTestFillBuffer(buf_1->data(), kSize, false);
1624 
1625   // Write at offset 0.
1626   VerifySparseIO(entry, 0, buf_1.get(), kSize, buf_2.get());
1627 
1628   // Write at offset 0x400000 (4 MB).
1629   VerifySparseIO(entry, 0x400000, buf_1.get(), kSize, buf_2.get());
1630 
1631   // Write at offset 0x800000000 (32 GB).
1632   VerifySparseIO(entry, 0x800000000LL, buf_1.get(), kSize, buf_2.get());
1633 
1634   entry->Close();
1635 
1636   // Check everything again.
1637   ASSERT_THAT(OpenEntry(key, &entry), IsOk());
1638   VerifyContentSparseIO(entry, 0, buf_1->data(), kSize);
1639   VerifyContentSparseIO(entry, 0x400000, buf_1->data(), kSize);
1640   VerifyContentSparseIO(entry, 0x800000000LL, buf_1->data(), kSize);
1641   entry->Close();
1642 }
1643 
TEST_F(DiskCacheEntryTest,BasicSparseIO)1644 TEST_F(DiskCacheEntryTest, BasicSparseIO) {
1645   InitCache();
1646   BasicSparseIO();
1647 }
1648 
TEST_F(DiskCacheEntryTest,MemoryOnlyBasicSparseIO)1649 TEST_F(DiskCacheEntryTest, MemoryOnlyBasicSparseIO) {
1650   SetMemoryOnlyMode();
1651   InitCache();
1652   BasicSparseIO();
1653 }
1654 
HugeSparseIO()1655 void DiskCacheEntryTest::HugeSparseIO() {
1656   std::string key("the first key");
1657   disk_cache::Entry* entry;
1658   ASSERT_THAT(CreateEntry(key, &entry), IsOk());
1659 
1660   // Write 1.2 MB so that we cover multiple entries.
1661   const int kSize = 1200 * 1024;
1662   auto buf_1 = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
1663   auto buf_2 = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
1664   CacheTestFillBuffer(buf_1->data(), kSize, false);
1665 
1666   // Write at offset 0x20F0000 (33 MB - 64 KB).
1667   VerifySparseIO(entry, 0x20F0000, buf_1.get(), kSize, buf_2.get());
1668   entry->Close();
1669 
1670   // Check it again.
1671   ASSERT_THAT(OpenEntry(key, &entry), IsOk());
1672   VerifyContentSparseIO(entry, 0x20F0000, buf_1->data(), kSize);
1673   entry->Close();
1674 }
1675 
TEST_F(DiskCacheEntryTest,HugeSparseIO)1676 TEST_F(DiskCacheEntryTest, HugeSparseIO) {
1677   InitCache();
1678   HugeSparseIO();
1679 }
1680 
TEST_F(DiskCacheEntryTest,MemoryOnlyHugeSparseIO)1681 TEST_F(DiskCacheEntryTest, MemoryOnlyHugeSparseIO) {
1682   SetMemoryOnlyMode();
1683   InitCache();
1684   HugeSparseIO();
1685 }
1686 
GetAvailableRangeTest()1687 void DiskCacheEntryTest::GetAvailableRangeTest() {
1688   std::string key("the first key");
1689   disk_cache::Entry* entry;
1690   ASSERT_THAT(CreateEntry(key, &entry), IsOk());
1691 
1692   const int kSize = 16 * 1024;
1693   auto buf = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
1694   CacheTestFillBuffer(buf->data(), kSize, false);
1695 
1696   // Write at offset 0x20F0000 (33 MB - 64 KB), and 0x20F4400 (33 MB - 47 KB).
1697   EXPECT_EQ(kSize, WriteSparseData(entry, 0x20F0000, buf.get(), kSize));
1698   EXPECT_EQ(kSize, WriteSparseData(entry, 0x20F4400, buf.get(), kSize));
1699 
1700   // We stop at the first empty block.
1701   TestRangeResultCompletionCallback cb;
1702   RangeResult result = cb.GetResult(
1703       entry->GetAvailableRange(0x20F0000, kSize * 2, cb.callback()));
1704   EXPECT_EQ(net::OK, result.net_error);
1705   EXPECT_EQ(kSize, result.available_len);
1706   EXPECT_EQ(0x20F0000, result.start);
1707 
1708   result = cb.GetResult(entry->GetAvailableRange(0, kSize, cb.callback()));
1709   EXPECT_EQ(net::OK, result.net_error);
1710   EXPECT_EQ(0, result.available_len);
1711 
1712   result = cb.GetResult(
1713       entry->GetAvailableRange(0x20F0000 - kSize, kSize, cb.callback()));
1714   EXPECT_EQ(net::OK, result.net_error);
1715   EXPECT_EQ(0, result.available_len);
1716 
1717   result = cb.GetResult(entry->GetAvailableRange(0, 0x2100000, cb.callback()));
1718   EXPECT_EQ(net::OK, result.net_error);
1719   EXPECT_EQ(kSize, result.available_len);
1720   EXPECT_EQ(0x20F0000, result.start);
1721 
1722   // We should be able to Read based on the results of GetAvailableRange.
1723   net::TestCompletionCallback read_cb;
1724   result =
1725       cb.GetResult(entry->GetAvailableRange(0x2100000, kSize, cb.callback()));
1726   EXPECT_EQ(net::OK, result.net_error);
1727   EXPECT_EQ(0, result.available_len);
1728   int rv =
1729       entry->ReadSparseData(result.start, buf.get(), kSize, read_cb.callback());
1730   EXPECT_EQ(0, read_cb.GetResult(rv));
1731 
1732   result =
1733       cb.GetResult(entry->GetAvailableRange(0x20F2000, kSize, cb.callback()));
1734   EXPECT_EQ(net::OK, result.net_error);
1735   EXPECT_EQ(0x2000, result.available_len);
1736   EXPECT_EQ(0x20F2000, result.start);
1737   EXPECT_EQ(0x2000, ReadSparseData(entry, result.start, buf.get(), kSize));
1738 
1739   // Make sure that we respect the |len| argument.
1740   result = cb.GetResult(
1741       entry->GetAvailableRange(0x20F0001 - kSize, kSize, cb.callback()));
1742   EXPECT_EQ(net::OK, result.net_error);
1743   EXPECT_EQ(1, result.available_len);
1744   EXPECT_EQ(0x20F0000, result.start);
1745 
1746   // Use very small ranges. Write at offset 50.
1747   const int kTinyLen = 10;
1748   EXPECT_EQ(kTinyLen, WriteSparseData(entry, 50, buf.get(), kTinyLen));
1749 
1750   result = cb.GetResult(
1751       entry->GetAvailableRange(kTinyLen * 2, kTinyLen, cb.callback()));
1752   EXPECT_EQ(net::OK, result.net_error);
1753   EXPECT_EQ(0, result.available_len);
1754   EXPECT_EQ(kTinyLen * 2, result.start);
1755 
1756   // Get a huge range with maximum boundary
1757   result = cb.GetResult(entry->GetAvailableRange(
1758       0x2100000, std::numeric_limits<int32_t>::max(), cb.callback()));
1759   EXPECT_EQ(net::OK, result.net_error);
1760   EXPECT_EQ(0, result.available_len);
1761 
1762   entry->Close();
1763 }
1764 
TEST_F(DiskCacheEntryTest,GetAvailableRange)1765 TEST_F(DiskCacheEntryTest, GetAvailableRange) {
1766   InitCache();
1767   GetAvailableRangeTest();
1768 }
1769 
TEST_F(DiskCacheEntryTest,MemoryOnlyGetAvailableRange)1770 TEST_F(DiskCacheEntryTest, MemoryOnlyGetAvailableRange) {
1771   SetMemoryOnlyMode();
1772   InitCache();
1773   GetAvailableRangeTest();
1774 }
1775 
TEST_F(DiskCacheEntryTest,GetAvailableRangeBlockFileDiscontinuous)1776 TEST_F(DiskCacheEntryTest, GetAvailableRangeBlockFileDiscontinuous) {
1777   // crbug.com/791056 --- blockfile problem when there is a sub-KiB write before
1778   // a bunch of full 1KiB blocks, and a GetAvailableRange is issued to which
1779   // both are a potentially relevant.
1780   InitCache();
1781 
1782   std::string key("the first key");
1783   disk_cache::Entry* entry;
1784   ASSERT_THAT(CreateEntry(key, &entry), IsOk());
1785 
1786   auto buf_2k = base::MakeRefCounted<net::IOBufferWithSize>(2 * 1024);
1787   CacheTestFillBuffer(buf_2k->data(), 2 * 1024, false);
1788 
1789   const int kSmallSize = 612;  // sub-1k
1790   auto buf_small = base::MakeRefCounted<net::IOBufferWithSize>(kSmallSize);
1791   CacheTestFillBuffer(buf_small->data(), kSmallSize, false);
1792 
1793   // Sets some bits for blocks representing 1K ranges [1024, 3072),
1794   // which will be relevant for the next GetAvailableRange call.
1795   EXPECT_EQ(2 * 1024, WriteSparseData(entry, /* offset = */ 1024, buf_2k.get(),
1796                                       /* size = */ 2 * 1024));
1797 
1798   // Now record a partial write from start of the first kb.
1799   EXPECT_EQ(kSmallSize, WriteSparseData(entry, /* offset = */ 0,
1800                                         buf_small.get(), kSmallSize));
1801 
1802   // Try to query a range starting from that block 0.
1803   // The cache tracks: [0, 612) [1024, 3072).
1804   // The request is for: [812, 2059) so response should be [1024, 2059), which
1805   // has length = 1035. Previously this return a negative number for rv.
1806   TestRangeResultCompletionCallback cb;
1807   RangeResult result =
1808       cb.GetResult(entry->GetAvailableRange(812, 1247, cb.callback()));
1809   EXPECT_EQ(net::OK, result.net_error);
1810   EXPECT_EQ(1035, result.available_len);
1811   EXPECT_EQ(1024, result.start);
1812 
1813   // Now query [512, 1536). This matches both [512, 612) and [1024, 1536),
1814   // so this should return [512, 612).
1815   result = cb.GetResult(entry->GetAvailableRange(512, 1024, cb.callback()));
1816   EXPECT_EQ(net::OK, result.net_error);
1817   EXPECT_EQ(100, result.available_len);
1818   EXPECT_EQ(512, result.start);
1819 
1820   // Now query next portion, [612, 1636). This now just should produce
1821   // [1024, 1636)
1822   result = cb.GetResult(entry->GetAvailableRange(612, 1024, cb.callback()));
1823   EXPECT_EQ(net::OK, result.net_error);
1824   EXPECT_EQ(612, result.available_len);
1825   EXPECT_EQ(1024, result.start);
1826 
1827   // Do a continuous small write, this one at [3072, 3684).
1828   // This means the cache tracks [1024, 3072) via bitmaps and [3072, 3684)
1829   // as the last write.
1830   EXPECT_EQ(kSmallSize, WriteSparseData(entry, /* offset = */ 3072,
1831                                         buf_small.get(), kSmallSize));
1832 
1833   // Query [2048, 4096). Should get [2048, 3684)
1834   result = cb.GetResult(entry->GetAvailableRange(2048, 2048, cb.callback()));
1835   EXPECT_EQ(net::OK, result.net_error);
1836   EXPECT_EQ(1636, result.available_len);
1837   EXPECT_EQ(2048, result.start);
1838 
1839   // Now write at [4096, 4708). Since only one sub-kb thing is tracked, this
1840   // now tracks  [1024, 3072) via bitmaps and [4096, 4708) as the last write.
1841   EXPECT_EQ(kSmallSize, WriteSparseData(entry, /* offset = */ 4096,
1842                                         buf_small.get(), kSmallSize));
1843 
1844   // Query [2048, 4096). Should get [2048, 3072)
1845   result = cb.GetResult(entry->GetAvailableRange(2048, 2048, cb.callback()));
1846   EXPECT_EQ(net::OK, result.net_error);
1847   EXPECT_EQ(1024, result.available_len);
1848   EXPECT_EQ(2048, result.start);
1849 
1850   // Query 2K more after that: [3072, 5120). Should get [4096, 4708)
1851   result = cb.GetResult(entry->GetAvailableRange(3072, 2048, cb.callback()));
1852   EXPECT_EQ(net::OK, result.net_error);
1853   EXPECT_EQ(612, result.available_len);
1854   EXPECT_EQ(4096, result.start);
1855 
1856   // Also double-check that offsets within later children are correctly
1857   // computed.
1858   EXPECT_EQ(kSmallSize, WriteSparseData(entry, /* offset = */ 0x200400,
1859                                         buf_small.get(), kSmallSize));
1860   result =
1861       cb.GetResult(entry->GetAvailableRange(0x100000, 0x200000, cb.callback()));
1862   EXPECT_EQ(net::OK, result.net_error);
1863   EXPECT_EQ(kSmallSize, result.available_len);
1864   EXPECT_EQ(0x200400, result.start);
1865 
1866   entry->Close();
1867 }
1868 
1869 // Tests that non-sequential writes that are not aligned with the minimum sparse
1870 // data granularity (1024 bytes) do in fact result in dropped data.
TEST_F(DiskCacheEntryTest,SparseWriteDropped)1871 TEST_F(DiskCacheEntryTest, SparseWriteDropped) {
1872   InitCache();
1873   std::string key("the first key");
1874   disk_cache::Entry* entry;
1875   ASSERT_THAT(CreateEntry(key, &entry), IsOk());
1876 
1877   const int kSize = 180;
1878   auto buf_1 = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
1879   auto buf_2 = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
1880   CacheTestFillBuffer(buf_1->data(), kSize, false);
1881 
1882   // Do small writes (180 bytes) that get increasingly close to a 1024-byte
1883   // boundary. All data should be dropped until a boundary is crossed, at which
1884   // point the data after the boundary is saved (at least for a while).
1885   int offset = 1024 - 500;
1886   int rv = 0;
1887   net::TestCompletionCallback cb;
1888   TestRangeResultCompletionCallback range_cb;
1889   RangeResult result;
1890   for (int i = 0; i < 5; i++) {
1891     // Check result of last GetAvailableRange.
1892     EXPECT_EQ(0, result.available_len);
1893 
1894     rv = entry->WriteSparseData(offset, buf_1.get(), kSize, cb.callback());
1895     EXPECT_EQ(kSize, cb.GetResult(rv));
1896 
1897     result = range_cb.GetResult(
1898         entry->GetAvailableRange(offset - 100, kSize, range_cb.callback()));
1899     EXPECT_EQ(net::OK, result.net_error);
1900     EXPECT_EQ(0, result.available_len);
1901 
1902     result = range_cb.GetResult(
1903         entry->GetAvailableRange(offset, kSize, range_cb.callback()));
1904     if (!result.available_len) {
1905       rv = entry->ReadSparseData(offset, buf_2.get(), kSize, cb.callback());
1906       EXPECT_EQ(0, cb.GetResult(rv));
1907     }
1908     offset += 1024 * i + 100;
1909   }
1910 
1911   // The last write started 100 bytes below a bundary, so there should be 80
1912   // bytes after the boundary.
1913   EXPECT_EQ(80, result.available_len);
1914   EXPECT_EQ(1024 * 7, result.start);
1915   rv = entry->ReadSparseData(result.start, buf_2.get(), kSize, cb.callback());
1916   EXPECT_EQ(80, cb.GetResult(rv));
1917   EXPECT_EQ(0, memcmp(buf_1.get()->data() + 100, buf_2.get()->data(), 80));
1918 
1919   // And even that part is dropped when another write changes the offset.
1920   offset = result.start;
1921   rv = entry->WriteSparseData(0, buf_1.get(), kSize, cb.callback());
1922   EXPECT_EQ(kSize, cb.GetResult(rv));
1923 
1924   result = range_cb.GetResult(
1925       entry->GetAvailableRange(offset, kSize, range_cb.callback()));
1926   EXPECT_EQ(net::OK, result.net_error);
1927   EXPECT_EQ(0, result.available_len);
1928   entry->Close();
1929 }
1930 
1931 // Tests that small sequential writes are not dropped.
TEST_F(DiskCacheEntryTest,SparseSquentialWriteNotDropped)1932 TEST_F(DiskCacheEntryTest, SparseSquentialWriteNotDropped) {
1933   InitCache();
1934   std::string key("the first key");
1935   disk_cache::Entry* entry;
1936   ASSERT_THAT(CreateEntry(key, &entry), IsOk());
1937 
1938   const int kSize = 180;
1939   auto buf_1 = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
1940   auto buf_2 = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
1941   CacheTestFillBuffer(buf_1->data(), kSize, false);
1942 
1943   // Any starting offset is fine as long as it is 1024-bytes aligned.
1944   int rv = 0;
1945   RangeResult result;
1946   net::TestCompletionCallback cb;
1947   TestRangeResultCompletionCallback range_cb;
1948   int64_t offset = 1024 * 11;
1949   for (; offset < 20000; offset += kSize) {
1950     rv = entry->WriteSparseData(offset, buf_1.get(), kSize, cb.callback());
1951     EXPECT_EQ(kSize, cb.GetResult(rv));
1952 
1953     result = range_cb.GetResult(
1954         entry->GetAvailableRange(offset, kSize, range_cb.callback()));
1955     EXPECT_EQ(net::OK, result.net_error);
1956     EXPECT_EQ(kSize, result.available_len);
1957     EXPECT_EQ(offset, result.start);
1958 
1959     rv = entry->ReadSparseData(offset, buf_2.get(), kSize, cb.callback());
1960     EXPECT_EQ(kSize, cb.GetResult(rv));
1961     EXPECT_EQ(0, memcmp(buf_1.get()->data(), buf_2.get()->data(), kSize));
1962   }
1963 
1964   entry->Close();
1965   FlushQueueForTest();
1966 
1967   // Verify again the last write made.
1968   ASSERT_THAT(OpenEntry(key, &entry), IsOk());
1969   offset -= kSize;
1970   result = range_cb.GetResult(
1971       entry->GetAvailableRange(offset, kSize, range_cb.callback()));
1972   EXPECT_EQ(net::OK, result.net_error);
1973   EXPECT_EQ(kSize, result.available_len);
1974   EXPECT_EQ(offset, result.start);
1975 
1976   rv = entry->ReadSparseData(offset, buf_2.get(), kSize, cb.callback());
1977   EXPECT_EQ(kSize, cb.GetResult(rv));
1978   EXPECT_EQ(0, memcmp(buf_1.get()->data(), buf_2.get()->data(), kSize));
1979 
1980   entry->Close();
1981 }
1982 
CouldBeSparse()1983 void DiskCacheEntryTest::CouldBeSparse() {
1984   std::string key("the first key");
1985   disk_cache::Entry* entry;
1986   ASSERT_THAT(CreateEntry(key, &entry), IsOk());
1987 
1988   const int kSize = 16 * 1024;
1989   auto buf = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
1990   CacheTestFillBuffer(buf->data(), kSize, false);
1991 
1992   // Write at offset 0x20F0000 (33 MB - 64 KB).
1993   EXPECT_EQ(kSize, WriteSparseData(entry, 0x20F0000, buf.get(), kSize));
1994 
1995   EXPECT_TRUE(entry->CouldBeSparse());
1996   entry->Close();
1997 
1998   ASSERT_THAT(OpenEntry(key, &entry), IsOk());
1999   EXPECT_TRUE(entry->CouldBeSparse());
2000   entry->Close();
2001 
2002   // Now verify a regular entry.
2003   key.assign("another key");
2004   ASSERT_THAT(CreateEntry(key, &entry), IsOk());
2005   EXPECT_FALSE(entry->CouldBeSparse());
2006 
2007   EXPECT_EQ(kSize, WriteData(entry, 0, 0, buf.get(), kSize, false));
2008   EXPECT_EQ(kSize, WriteData(entry, 1, 0, buf.get(), kSize, false));
2009   EXPECT_EQ(kSize, WriteData(entry, 2, 0, buf.get(), kSize, false));
2010 
2011   EXPECT_FALSE(entry->CouldBeSparse());
2012   entry->Close();
2013 
2014   ASSERT_THAT(OpenEntry(key, &entry), IsOk());
2015   EXPECT_FALSE(entry->CouldBeSparse());
2016   entry->Close();
2017 }
2018 
TEST_F(DiskCacheEntryTest,CouldBeSparse)2019 TEST_F(DiskCacheEntryTest, CouldBeSparse) {
2020   InitCache();
2021   CouldBeSparse();
2022 }
2023 
TEST_F(DiskCacheEntryTest,MemoryCouldBeSparse)2024 TEST_F(DiskCacheEntryTest, MemoryCouldBeSparse) {
2025   SetMemoryOnlyMode();
2026   InitCache();
2027   CouldBeSparse();
2028 }
2029 
TEST_F(DiskCacheEntryTest,MemoryOnlyMisalignedSparseIO)2030 TEST_F(DiskCacheEntryTest, MemoryOnlyMisalignedSparseIO) {
2031   SetMemoryOnlyMode();
2032   InitCache();
2033 
2034   const int kSize = 8192;
2035   auto buf_1 = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
2036   auto buf_2 = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
2037   CacheTestFillBuffer(buf_1->data(), kSize, false);
2038 
2039   std::string key("the first key");
2040   disk_cache::Entry* entry;
2041   ASSERT_THAT(CreateEntry(key, &entry), IsOk());
2042 
2043   // This loop writes back to back starting from offset 0 and 9000.
2044   for (int i = 0; i < kSize; i += 1024) {
2045     scoped_refptr<net::WrappedIOBuffer> buf_3 =
2046         base::MakeRefCounted<net::WrappedIOBuffer>(buf_1->data() + i,
2047                                                    kSize - i);
2048     VerifySparseIO(entry, i, buf_3.get(), 1024, buf_2.get());
2049     VerifySparseIO(entry, 9000 + i, buf_3.get(), 1024, buf_2.get());
2050   }
2051 
2052   // Make sure we have data written.
2053   VerifyContentSparseIO(entry, 0, buf_1->data(), kSize);
2054   VerifyContentSparseIO(entry, 9000, buf_1->data(), kSize);
2055 
2056   // This tests a large write that spans 3 entries from a misaligned offset.
2057   VerifySparseIO(entry, 20481, buf_1.get(), 8192, buf_2.get());
2058 
2059   entry->Close();
2060 }
2061 
TEST_F(DiskCacheEntryTest,MemoryOnlyMisalignedGetAvailableRange)2062 TEST_F(DiskCacheEntryTest, MemoryOnlyMisalignedGetAvailableRange) {
2063   SetMemoryOnlyMode();
2064   InitCache();
2065 
2066   const int kSize = 8192;
2067   auto buf = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
2068   CacheTestFillBuffer(buf->data(), kSize, false);
2069 
2070   disk_cache::Entry* entry;
2071   std::string key("the first key");
2072   ASSERT_THAT(CreateEntry(key, &entry), IsOk());
2073 
2074   // Writes in the middle of an entry.
2075   EXPECT_EQ(1024, entry->WriteSparseData(0, buf.get(), 1024,
2076                                          net::CompletionOnceCallback()));
2077   EXPECT_EQ(1024, entry->WriteSparseData(5120, buf.get(), 1024,
2078                                          net::CompletionOnceCallback()));
2079   EXPECT_EQ(1024, entry->WriteSparseData(10000, buf.get(), 1024,
2080                                          net::CompletionOnceCallback()));
2081 
2082   // Writes in the middle of an entry and spans 2 child entries.
2083   EXPECT_EQ(8192, entry->WriteSparseData(50000, buf.get(), 8192,
2084                                          net::CompletionOnceCallback()));
2085 
2086   TestRangeResultCompletionCallback cb;
2087   // Test that we stop at a discontinuous child at the second block.
2088   RangeResult result =
2089       cb.GetResult(entry->GetAvailableRange(0, 10000, cb.callback()));
2090   EXPECT_EQ(net::OK, result.net_error);
2091   EXPECT_EQ(1024, result.available_len);
2092   EXPECT_EQ(0, result.start);
2093 
2094   // Test that number of bytes is reported correctly when we start from the
2095   // middle of a filled region.
2096   result = cb.GetResult(entry->GetAvailableRange(512, 10000, cb.callback()));
2097   EXPECT_EQ(net::OK, result.net_error);
2098   EXPECT_EQ(512, result.available_len);
2099   EXPECT_EQ(512, result.start);
2100 
2101   // Test that we found bytes in the child of next block.
2102   result = cb.GetResult(entry->GetAvailableRange(1024, 10000, cb.callback()));
2103   EXPECT_EQ(net::OK, result.net_error);
2104   EXPECT_EQ(1024, result.available_len);
2105   EXPECT_EQ(5120, result.start);
2106 
2107   // Test that the desired length is respected. It starts within a filled
2108   // region.
2109   result = cb.GetResult(entry->GetAvailableRange(5500, 512, cb.callback()));
2110   EXPECT_EQ(net::OK, result.net_error);
2111   EXPECT_EQ(512, result.available_len);
2112   EXPECT_EQ(5500, result.start);
2113 
2114   // Test that the desired length is respected. It starts before a filled
2115   // region.
2116   result = cb.GetResult(entry->GetAvailableRange(5000, 620, cb.callback()));
2117   EXPECT_EQ(net::OK, result.net_error);
2118   EXPECT_EQ(500, result.available_len);
2119   EXPECT_EQ(5120, result.start);
2120 
2121   // Test that multiple blocks are scanned.
2122   result = cb.GetResult(entry->GetAvailableRange(40000, 20000, cb.callback()));
2123   EXPECT_EQ(net::OK, result.net_error);
2124   EXPECT_EQ(8192, result.available_len);
2125   EXPECT_EQ(50000, result.start);
2126 
2127   entry->Close();
2128 }
2129 
UpdateSparseEntry()2130 void DiskCacheEntryTest::UpdateSparseEntry() {
2131   std::string key("the first key");
2132   disk_cache::Entry* entry1;
2133   ASSERT_THAT(CreateEntry(key, &entry1), IsOk());
2134 
2135   const int kSize = 2048;
2136   auto buf_1 = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
2137   auto buf_2 = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
2138   CacheTestFillBuffer(buf_1->data(), kSize, false);
2139 
2140   // Write at offset 0.
2141   VerifySparseIO(entry1, 0, buf_1.get(), kSize, buf_2.get());
2142   entry1->Close();
2143 
2144   // Write at offset 2048.
2145   ASSERT_THAT(OpenEntry(key, &entry1), IsOk());
2146   VerifySparseIO(entry1, 2048, buf_1.get(), kSize, buf_2.get());
2147 
2148   disk_cache::Entry* entry2;
2149   ASSERT_THAT(CreateEntry("the second key", &entry2), IsOk());
2150 
2151   entry1->Close();
2152   entry2->Close();
2153   FlushQueueForTest();
2154   if (memory_only_ || simple_cache_mode_)
2155     EXPECT_EQ(2, cache_->GetEntryCount());
2156   else
2157     EXPECT_EQ(3, cache_->GetEntryCount());
2158 }
2159 
TEST_F(DiskCacheEntryTest,UpdateSparseEntry)2160 TEST_F(DiskCacheEntryTest, UpdateSparseEntry) {
2161   InitCache();
2162   UpdateSparseEntry();
2163 }
2164 
TEST_F(DiskCacheEntryTest,MemoryOnlyUpdateSparseEntry)2165 TEST_F(DiskCacheEntryTest, MemoryOnlyUpdateSparseEntry) {
2166   SetMemoryOnlyMode();
2167   InitCache();
2168   UpdateSparseEntry();
2169 }
2170 
DoomSparseEntry()2171 void DiskCacheEntryTest::DoomSparseEntry() {
2172   std::string key1("the first key");
2173   std::string key2("the second key");
2174   disk_cache::Entry *entry1, *entry2;
2175   ASSERT_THAT(CreateEntry(key1, &entry1), IsOk());
2176   ASSERT_THAT(CreateEntry(key2, &entry2), IsOk());
2177 
2178   const int kSize = 4 * 1024;
2179   auto buf = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
2180   CacheTestFillBuffer(buf->data(), kSize, false);
2181 
2182   int64_t offset = 1024;
2183   // Write to a bunch of ranges.
2184   for (int i = 0; i < 12; i++) {
2185     EXPECT_EQ(kSize, WriteSparseData(entry1, offset, buf.get(), kSize));
2186     // Keep the second map under the default size.
2187     if (i < 9)
2188       EXPECT_EQ(kSize, WriteSparseData(entry2, offset, buf.get(), kSize));
2189 
2190     offset *= 4;
2191   }
2192 
2193   if (memory_only_ || simple_cache_mode_)
2194     EXPECT_EQ(2, cache_->GetEntryCount());
2195   else
2196     EXPECT_EQ(15, cache_->GetEntryCount());
2197 
2198   // Doom the first entry while it's still open.
2199   entry1->Doom();
2200   entry1->Close();
2201   entry2->Close();
2202 
2203   // Doom the second entry after it's fully saved.
2204   EXPECT_THAT(DoomEntry(key2), IsOk());
2205 
2206   // Make sure we do all needed work. This may fail for entry2 if between Close
2207   // and DoomEntry the system decides to remove all traces of the file from the
2208   // system cache so we don't see that there is pending IO.
2209   base::RunLoop().RunUntilIdle();
2210 
2211   if (memory_only_) {
2212     EXPECT_EQ(0, cache_->GetEntryCount());
2213   } else {
2214     if (5 == cache_->GetEntryCount()) {
2215       // Most likely we are waiting for the result of reading the sparse info
2216       // (it's always async on Posix so it is easy to miss). Unfortunately we
2217       // don't have any signal to watch for so we can only wait.
2218       base::PlatformThread::Sleep(base::Milliseconds(500));
2219       base::RunLoop().RunUntilIdle();
2220     }
2221     EXPECT_EQ(0, cache_->GetEntryCount());
2222   }
2223 }
2224 
TEST_F(DiskCacheEntryTest,DoomSparseEntry)2225 TEST_F(DiskCacheEntryTest, DoomSparseEntry) {
2226   UseCurrentThread();
2227   InitCache();
2228   DoomSparseEntry();
2229 }
2230 
TEST_F(DiskCacheEntryTest,MemoryOnlyDoomSparseEntry)2231 TEST_F(DiskCacheEntryTest, MemoryOnlyDoomSparseEntry) {
2232   SetMemoryOnlyMode();
2233   InitCache();
2234   DoomSparseEntry();
2235 }
2236 
2237 // A TestCompletionCallback wrapper that deletes the cache from within the
2238 // callback.  The way TestCompletionCallback works means that all tasks (even
2239 // new ones) are executed by the message loop before returning to the caller so
2240 // the only way to simulate a race is to execute what we want on the callback.
2241 class SparseTestCompletionCallback: public net::TestCompletionCallback {
2242  public:
SparseTestCompletionCallback(std::unique_ptr<disk_cache::Backend> cache)2243   explicit SparseTestCompletionCallback(
2244       std::unique_ptr<disk_cache::Backend> cache)
2245       : cache_(std::move(cache)) {}
2246 
2247   SparseTestCompletionCallback(const SparseTestCompletionCallback&) = delete;
2248   SparseTestCompletionCallback& operator=(const SparseTestCompletionCallback&) =
2249       delete;
2250 
2251  private:
SetResult(int result)2252   void SetResult(int result) override {
2253     cache_.reset();
2254     TestCompletionCallback::SetResult(result);
2255   }
2256 
2257   std::unique_ptr<disk_cache::Backend> cache_;
2258 };
2259 
2260 // Tests that we don't crash when the backend is deleted while we are working
2261 // deleting the sub-entries of a sparse entry.
TEST_F(DiskCacheEntryTest,DoomSparseEntry2)2262 TEST_F(DiskCacheEntryTest, DoomSparseEntry2) {
2263   UseCurrentThread();
2264   InitCache();
2265   std::string key("the key");
2266   disk_cache::Entry* entry;
2267   ASSERT_THAT(CreateEntry(key, &entry), IsOk());
2268 
2269   const int kSize = 4 * 1024;
2270   auto buf = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
2271   CacheTestFillBuffer(buf->data(), kSize, false);
2272 
2273   int64_t offset = 1024;
2274   // Write to a bunch of ranges.
2275   for (int i = 0; i < 12; i++) {
2276     EXPECT_EQ(kSize, entry->WriteSparseData(offset, buf.get(), kSize,
2277                                             net::CompletionOnceCallback()));
2278     offset *= 4;
2279   }
2280   EXPECT_EQ(9, cache_->GetEntryCount());
2281 
2282   entry->Close();
2283   disk_cache::Backend* cache = cache_.get();
2284   SparseTestCompletionCallback cb(TakeCache());
2285   int rv = cache->DoomEntry(key, net::HIGHEST, cb.callback());
2286   EXPECT_THAT(rv, IsError(net::ERR_IO_PENDING));
2287   EXPECT_THAT(cb.WaitForResult(), IsOk());
2288 }
2289 
PartialSparseEntry()2290 void DiskCacheEntryTest::PartialSparseEntry() {
2291   std::string key("the first key");
2292   disk_cache::Entry* entry;
2293   ASSERT_THAT(CreateEntry(key, &entry), IsOk());
2294 
2295   // We should be able to deal with IO that is not aligned to the block size
2296   // of a sparse entry, at least to write a big range without leaving holes.
2297   const int kSize = 4 * 1024;
2298   const int kSmallSize = 128;
2299   auto buf1 = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
2300   CacheTestFillBuffer(buf1->data(), kSize, false);
2301 
2302   // The first write is just to extend the entry. The third write occupies
2303   // a 1KB block partially, it may not be written internally depending on the
2304   // implementation.
2305   EXPECT_EQ(kSize, WriteSparseData(entry, 20000, buf1.get(), kSize));
2306   EXPECT_EQ(kSize, WriteSparseData(entry, 500, buf1.get(), kSize));
2307   EXPECT_EQ(kSmallSize,
2308             WriteSparseData(entry, 1080321, buf1.get(), kSmallSize));
2309   entry->Close();
2310   ASSERT_THAT(OpenEntry(key, &entry), IsOk());
2311 
2312   auto buf2 = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
2313   memset(buf2->data(), 0, kSize);
2314   EXPECT_EQ(0, ReadSparseData(entry, 8000, buf2.get(), kSize));
2315 
2316   EXPECT_EQ(500, ReadSparseData(entry, kSize, buf2.get(), kSize));
2317   EXPECT_EQ(0, memcmp(buf2->data(), buf1->data() + kSize - 500, 500));
2318   EXPECT_EQ(0, ReadSparseData(entry, 0, buf2.get(), kSize));
2319 
2320   // This read should not change anything.
2321   if (memory_only_ || simple_cache_mode_)
2322     EXPECT_EQ(96, ReadSparseData(entry, 24000, buf2.get(), kSize));
2323   else
2324     EXPECT_EQ(0, ReadSparseData(entry, 24000, buf2.get(), kSize));
2325 
2326   EXPECT_EQ(500, ReadSparseData(entry, kSize, buf2.get(), kSize));
2327   EXPECT_EQ(0, ReadSparseData(entry, 99, buf2.get(), kSize));
2328 
2329   TestRangeResultCompletionCallback cb;
2330   RangeResult result;
2331   if (memory_only_ || simple_cache_mode_) {
2332     result = cb.GetResult(entry->GetAvailableRange(0, 600, cb.callback()));
2333     EXPECT_EQ(net::OK, result.net_error);
2334     EXPECT_EQ(100, result.available_len);
2335     EXPECT_EQ(500, result.start);
2336   } else {
2337     result = cb.GetResult(entry->GetAvailableRange(0, 2048, cb.callback()));
2338     EXPECT_EQ(net::OK, result.net_error);
2339     EXPECT_EQ(1024, result.available_len);
2340     EXPECT_EQ(1024, result.start);
2341   }
2342   result = cb.GetResult(entry->GetAvailableRange(kSize, kSize, cb.callback()));
2343   EXPECT_EQ(net::OK, result.net_error);
2344   EXPECT_EQ(500, result.available_len);
2345   EXPECT_EQ(kSize, result.start);
2346   result =
2347       cb.GetResult(entry->GetAvailableRange(20 * 1024, 10000, cb.callback()));
2348   EXPECT_EQ(net::OK, result.net_error);
2349   if (memory_only_ || simple_cache_mode_)
2350     EXPECT_EQ(3616, result.available_len);
2351   else
2352     EXPECT_EQ(3072, result.available_len);
2353 
2354   EXPECT_EQ(20 * 1024, result.start);
2355 
2356   // 1. Query before a filled 1KB block.
2357   // 2. Query within a filled 1KB block.
2358   // 3. Query beyond a filled 1KB block.
2359   if (memory_only_ || simple_cache_mode_) {
2360     result =
2361         cb.GetResult(entry->GetAvailableRange(19400, kSize, cb.callback()));
2362     EXPECT_EQ(net::OK, result.net_error);
2363     EXPECT_EQ(3496, result.available_len);
2364     EXPECT_EQ(20000, result.start);
2365   } else {
2366     result =
2367         cb.GetResult(entry->GetAvailableRange(19400, kSize, cb.callback()));
2368     EXPECT_EQ(net::OK, result.net_error);
2369     EXPECT_EQ(3016, result.available_len);
2370     EXPECT_EQ(20480, result.start);
2371   }
2372   result = cb.GetResult(entry->GetAvailableRange(3073, kSize, cb.callback()));
2373   EXPECT_EQ(net::OK, result.net_error);
2374   EXPECT_EQ(1523, result.available_len);
2375   EXPECT_EQ(3073, result.start);
2376   result = cb.GetResult(entry->GetAvailableRange(4600, kSize, cb.callback()));
2377   EXPECT_EQ(net::OK, result.net_error);
2378   EXPECT_EQ(0, result.available_len);
2379   EXPECT_EQ(4600, result.start);
2380 
2381   // Now make another write and verify that there is no hole in between.
2382   EXPECT_EQ(kSize, WriteSparseData(entry, 500 + kSize, buf1.get(), kSize));
2383   result = cb.GetResult(entry->GetAvailableRange(1024, 10000, cb.callback()));
2384   EXPECT_EQ(net::OK, result.net_error);
2385   EXPECT_EQ(7 * 1024 + 500, result.available_len);
2386   EXPECT_EQ(1024, result.start);
2387   EXPECT_EQ(kSize, ReadSparseData(entry, kSize, buf2.get(), kSize));
2388   EXPECT_EQ(0, memcmp(buf2->data(), buf1->data() + kSize - 500, 500));
2389   EXPECT_EQ(0, memcmp(buf2->data() + 500, buf1->data(), kSize - 500));
2390 
2391   entry->Close();
2392 }
2393 
TEST_F(DiskCacheEntryTest,PartialSparseEntry)2394 TEST_F(DiskCacheEntryTest, PartialSparseEntry) {
2395   InitCache();
2396   PartialSparseEntry();
2397 }
2398 
TEST_F(DiskCacheEntryTest,MemoryPartialSparseEntry)2399 TEST_F(DiskCacheEntryTest, MemoryPartialSparseEntry) {
2400   SetMemoryOnlyMode();
2401   InitCache();
2402   PartialSparseEntry();
2403 }
2404 
SparseInvalidArg()2405 void DiskCacheEntryTest::SparseInvalidArg() {
2406   std::string key("key");
2407   disk_cache::Entry* entry = nullptr;
2408   ASSERT_THAT(CreateEntry(key, &entry), IsOk());
2409 
2410   const int kSize = 2048;
2411   auto buf = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
2412   CacheTestFillBuffer(buf->data(), kSize, false);
2413 
2414   EXPECT_EQ(net::ERR_INVALID_ARGUMENT,
2415             WriteSparseData(entry, -1, buf.get(), kSize));
2416   EXPECT_EQ(net::ERR_INVALID_ARGUMENT,
2417             WriteSparseData(entry, 0, buf.get(), -1));
2418 
2419   EXPECT_EQ(net::ERR_INVALID_ARGUMENT,
2420             ReadSparseData(entry, -1, buf.get(), kSize));
2421   EXPECT_EQ(net::ERR_INVALID_ARGUMENT, ReadSparseData(entry, 0, buf.get(), -1));
2422 
2423   int64_t start_out;
2424   EXPECT_EQ(net::ERR_INVALID_ARGUMENT,
2425             GetAvailableRange(entry, -1, kSize, &start_out));
2426   EXPECT_EQ(net::ERR_INVALID_ARGUMENT,
2427             GetAvailableRange(entry, 0, -1, &start_out));
2428 
2429   int rv = WriteSparseData(
2430       entry, std::numeric_limits<int64_t>::max() - kSize + 1, buf.get(), kSize);
2431   // Blockfile rejects anything over 64GiB with
2432   // net::ERR_CACHE_OPERATION_NOT_SUPPORTED, which is also OK here, as it's not
2433   // an overflow or something else nonsensical.
2434   EXPECT_TRUE(rv == net::ERR_INVALID_ARGUMENT ||
2435               rv == net::ERR_CACHE_OPERATION_NOT_SUPPORTED);
2436 
2437   entry->Close();
2438 }
2439 
TEST_F(DiskCacheEntryTest,SparseInvalidArg)2440 TEST_F(DiskCacheEntryTest, SparseInvalidArg) {
2441   InitCache();
2442   SparseInvalidArg();
2443 }
2444 
TEST_F(DiskCacheEntryTest,MemoryOnlySparseInvalidArg)2445 TEST_F(DiskCacheEntryTest, MemoryOnlySparseInvalidArg) {
2446   SetMemoryOnlyMode();
2447   InitCache();
2448   SparseInvalidArg();
2449 }
2450 
TEST_F(DiskCacheEntryTest,SimpleSparseInvalidArg)2451 TEST_F(DiskCacheEntryTest, SimpleSparseInvalidArg) {
2452   SetSimpleCacheMode();
2453   InitCache();
2454   SparseInvalidArg();
2455 }
2456 
SparseClipEnd(int64_t max_index,bool expect_unsupported)2457 void DiskCacheEntryTest::SparseClipEnd(int64_t max_index,
2458                                        bool expect_unsupported) {
2459   std::string key("key");
2460   disk_cache::Entry* entry = nullptr;
2461   ASSERT_THAT(CreateEntry(key, &entry), IsOk());
2462 
2463   const int kSize = 1024;
2464   auto buf = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
2465   CacheTestFillBuffer(buf->data(), kSize, false);
2466 
2467   auto read_buf = base::MakeRefCounted<net::IOBufferWithSize>(kSize * 2);
2468   CacheTestFillBuffer(read_buf->data(), kSize * 2, false);
2469 
2470   const int64_t kOffset = max_index - kSize;
2471   int rv = WriteSparseData(entry, kOffset, buf.get(), kSize);
2472   EXPECT_EQ(
2473       rv, expect_unsupported ? net::ERR_CACHE_OPERATION_NOT_SUPPORTED : kSize);
2474 
2475   // Try to read further than offset range, should get clipped (if supported).
2476   rv = ReadSparseData(entry, kOffset, read_buf.get(), kSize * 2);
2477   if (expect_unsupported) {
2478     EXPECT_EQ(rv, net::ERR_CACHE_OPERATION_NOT_SUPPORTED);
2479   } else {
2480     EXPECT_EQ(kSize, rv);
2481     EXPECT_EQ(0, memcmp(buf->data(), read_buf->data(), kSize));
2482   }
2483 
2484   TestRangeResultCompletionCallback cb;
2485   RangeResult result = cb.GetResult(
2486       entry->GetAvailableRange(kOffset - kSize, kSize * 3, cb.callback()));
2487   if (expect_unsupported) {
2488     // GetAvailableRange just returns nothing found, not an error.
2489     EXPECT_EQ(net::OK, result.net_error);
2490     EXPECT_EQ(result.available_len, 0);
2491   } else {
2492     EXPECT_EQ(net::OK, result.net_error);
2493     EXPECT_EQ(kSize, result.available_len);
2494     EXPECT_EQ(kOffset, result.start);
2495   }
2496 
2497   entry->Close();
2498 }
2499 
TEST_F(DiskCacheEntryTest,SparseClipEnd)2500 TEST_F(DiskCacheEntryTest, SparseClipEnd) {
2501   InitCache();
2502 
2503   // Blockfile refuses to deal with sparse indices over 64GiB.
2504   SparseClipEnd(std::numeric_limits<int64_t>::max(),
2505                 /*expected_unsupported=*/true);
2506 }
2507 
TEST_F(DiskCacheEntryTest,SparseClipEnd2)2508 TEST_F(DiskCacheEntryTest, SparseClipEnd2) {
2509   InitCache();
2510 
2511   const int64_t kLimit = 64ll * 1024 * 1024 * 1024;
2512   // Separate test for blockfile for indices right at the edge of its address
2513   // space limit. kLimit must match kMaxEndOffset in sparse_control.cc
2514   SparseClipEnd(kLimit, /*expected_unsupported=*/false);
2515 
2516   // Test with things after kLimit, too, which isn't an issue for backends
2517   // supporting the entire 64-bit offset range.
2518   std::string key("key2");
2519   disk_cache::Entry* entry = nullptr;
2520   ASSERT_THAT(CreateEntry(key, &entry), IsOk());
2521 
2522   const int kSize = 1024;
2523   auto buf = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
2524   CacheTestFillBuffer(buf->data(), kSize, false);
2525 
2526   // Try to write after --- fails.
2527   int rv = WriteSparseData(entry, kLimit, buf.get(), kSize);
2528   EXPECT_EQ(net::ERR_CACHE_OPERATION_NOT_SUPPORTED, rv);
2529 
2530   // Similarly for read.
2531   rv = ReadSparseData(entry, kLimit, buf.get(), kSize);
2532   EXPECT_EQ(net::ERR_CACHE_OPERATION_NOT_SUPPORTED, rv);
2533 
2534   // GetAvailableRange just returns nothing.
2535   TestRangeResultCompletionCallback cb;
2536   RangeResult result =
2537       cb.GetResult(entry->GetAvailableRange(kLimit, kSize * 3, cb.callback()));
2538   EXPECT_EQ(net::OK, result.net_error);
2539   EXPECT_EQ(0, result.available_len);
2540   entry->Close();
2541 }
2542 
TEST_F(DiskCacheEntryTest,MemoryOnlySparseClipEnd)2543 TEST_F(DiskCacheEntryTest, MemoryOnlySparseClipEnd) {
2544   SetMemoryOnlyMode();
2545   InitCache();
2546   SparseClipEnd(std::numeric_limits<int64_t>::max(),
2547                 /* expected_unsupported = */ false);
2548 }
2549 
TEST_F(DiskCacheEntryTest,SimpleSparseClipEnd)2550 TEST_F(DiskCacheEntryTest, SimpleSparseClipEnd) {
2551   SetSimpleCacheMode();
2552   InitCache();
2553   SparseClipEnd(std::numeric_limits<int64_t>::max(),
2554                 /* expected_unsupported = */ false);
2555 }
2556 
2557 // Tests that corrupt sparse children are removed automatically.
TEST_F(DiskCacheEntryTest,CleanupSparseEntry)2558 TEST_F(DiskCacheEntryTest, CleanupSparseEntry) {
2559   InitCache();
2560   std::string key("the first key");
2561   disk_cache::Entry* entry;
2562   ASSERT_THAT(CreateEntry(key, &entry), IsOk());
2563 
2564   const int kSize = 4 * 1024;
2565   auto buf1 = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
2566   CacheTestFillBuffer(buf1->data(), kSize, false);
2567 
2568   const int k1Meg = 1024 * 1024;
2569   EXPECT_EQ(kSize, WriteSparseData(entry, 8192, buf1.get(), kSize));
2570   EXPECT_EQ(kSize, WriteSparseData(entry, k1Meg + 8192, buf1.get(), kSize));
2571   EXPECT_EQ(kSize, WriteSparseData(entry, 2 * k1Meg + 8192, buf1.get(), kSize));
2572   entry->Close();
2573   EXPECT_EQ(4, cache_->GetEntryCount());
2574 
2575   std::unique_ptr<TestIterator> iter = CreateIterator();
2576   int count = 0;
2577   std::string child_keys[2];
2578   while (iter->OpenNextEntry(&entry) == net::OK) {
2579     ASSERT_TRUE(entry != nullptr);
2580     // Writing to an entry will alter the LRU list and invalidate the iterator.
2581     if (entry->GetKey() != key && count < 2)
2582       child_keys[count++] = entry->GetKey();
2583     entry->Close();
2584   }
2585   for (const auto& child_key : child_keys) {
2586     ASSERT_THAT(OpenEntry(child_key, &entry), IsOk());
2587     // Overwrite the header's magic and signature.
2588     EXPECT_EQ(12, WriteData(entry, 2, 0, buf1.get(), 12, false));
2589     entry->Close();
2590   }
2591 
2592   EXPECT_EQ(4, cache_->GetEntryCount());
2593   ASSERT_THAT(OpenEntry(key, &entry), IsOk());
2594 
2595   // Two children should be gone. One while reading and one while writing.
2596   EXPECT_EQ(0, ReadSparseData(entry, 2 * k1Meg + 8192, buf1.get(), kSize));
2597   EXPECT_EQ(kSize, WriteSparseData(entry, k1Meg + 16384, buf1.get(), kSize));
2598   EXPECT_EQ(0, ReadSparseData(entry, k1Meg + 8192, buf1.get(), kSize));
2599 
2600   // We never touched this one.
2601   EXPECT_EQ(kSize, ReadSparseData(entry, 8192, buf1.get(), kSize));
2602   entry->Close();
2603 
2604   // We re-created one of the corrupt children.
2605   EXPECT_EQ(3, cache_->GetEntryCount());
2606 }
2607 
TEST_F(DiskCacheEntryTest,CancelSparseIO)2608 TEST_F(DiskCacheEntryTest, CancelSparseIO) {
2609   UseCurrentThread();
2610   InitCache();
2611   std::string key("the first key");
2612   disk_cache::Entry* entry;
2613   ASSERT_THAT(CreateEntry(key, &entry), IsOk());
2614 
2615   const int kSize = 40 * 1024;
2616   auto buf = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
2617   CacheTestFillBuffer(buf->data(), kSize, false);
2618 
2619   // This will open and write two "real" entries.
2620   net::TestCompletionCallback cb1, cb2, cb3, cb4;
2621   int rv = entry->WriteSparseData(
2622       1024 * 1024 - 4096, buf.get(), kSize, cb1.callback());
2623   EXPECT_THAT(rv, IsError(net::ERR_IO_PENDING));
2624 
2625   TestRangeResultCompletionCallback cb5;
2626   RangeResult result =
2627       cb5.GetResult(entry->GetAvailableRange(0, kSize, cb5.callback()));
2628   if (!cb1.have_result()) {
2629     // We may or may not have finished writing to the entry. If we have not,
2630     // we cannot start another operation at this time.
2631     EXPECT_THAT(rv, IsError(net::ERR_CACHE_OPERATION_NOT_SUPPORTED));
2632   }
2633 
2634   // We cancel the pending operation, and register multiple notifications.
2635   entry->CancelSparseIO();
2636   EXPECT_THAT(entry->ReadyForSparseIO(cb2.callback()),
2637               IsError(net::ERR_IO_PENDING));
2638   EXPECT_THAT(entry->ReadyForSparseIO(cb3.callback()),
2639               IsError(net::ERR_IO_PENDING));
2640   entry->CancelSparseIO();  // Should be a no op at this point.
2641   EXPECT_THAT(entry->ReadyForSparseIO(cb4.callback()),
2642               IsError(net::ERR_IO_PENDING));
2643 
2644   if (!cb1.have_result()) {
2645     EXPECT_EQ(net::ERR_CACHE_OPERATION_NOT_SUPPORTED,
2646               entry->ReadSparseData(result.start, buf.get(), kSize,
2647                                     net::CompletionOnceCallback()));
2648     EXPECT_EQ(net::ERR_CACHE_OPERATION_NOT_SUPPORTED,
2649               entry->WriteSparseData(result.start, buf.get(), kSize,
2650                                      net::CompletionOnceCallback()));
2651   }
2652 
2653   // Now see if we receive all notifications. Note that we should not be able
2654   // to write everything (unless the timing of the system is really weird).
2655   rv = cb1.WaitForResult();
2656   EXPECT_TRUE(rv == 4096 || rv == kSize);
2657   EXPECT_THAT(cb2.WaitForResult(), IsOk());
2658   EXPECT_THAT(cb3.WaitForResult(), IsOk());
2659   EXPECT_THAT(cb4.WaitForResult(), IsOk());
2660 
2661   result = cb5.GetResult(
2662       entry->GetAvailableRange(result.start, kSize, cb5.callback()));
2663   EXPECT_EQ(net::OK, result.net_error);
2664   EXPECT_EQ(0, result.available_len);
2665   entry->Close();
2666 }
2667 
2668 // Tests that we perform sanity checks on an entry's key. Note that there are
2669 // other tests that exercise sanity checks by using saved corrupt files.
TEST_F(DiskCacheEntryTest,KeySanityCheck)2670 TEST_F(DiskCacheEntryTest, KeySanityCheck) {
2671   UseCurrentThread();
2672   InitCache();
2673   std::string key("the first key");
2674   disk_cache::Entry* entry;
2675   ASSERT_THAT(CreateEntry(key, &entry), IsOk());
2676 
2677   disk_cache::EntryImpl* entry_impl =
2678       static_cast<disk_cache::EntryImpl*>(entry);
2679   disk_cache::EntryStore* store = entry_impl->entry()->Data();
2680 
2681   // We have reserved space for a short key (one block), let's say that the key
2682   // takes more than one block, and remove the NULLs after the actual key.
2683   store->key_len = 800;
2684   memset(store->key + key.size(), 'k', sizeof(store->key) - key.size());
2685   entry_impl->entry()->set_modified();
2686   entry->Close();
2687 
2688   // We have a corrupt entry. Now reload it. We should NOT read beyond the
2689   // allocated buffer here.
2690   ASSERT_NE(net::OK, OpenEntry(key, &entry));
2691   DisableIntegrityCheck();
2692 }
2693 
TEST_F(DiskCacheEntryTest,KeySanityCheck2)2694 TEST_F(DiskCacheEntryTest, KeySanityCheck2) {
2695   UseCurrentThread();
2696   InitCache();
2697   std::string key("the first key");
2698   disk_cache::Entry* entry;
2699   ASSERT_THAT(CreateEntry(key, &entry), IsOk());
2700 
2701   disk_cache::EntryImpl* entry_impl =
2702       static_cast<disk_cache::EntryImpl*>(entry);
2703   disk_cache::EntryStore* store = entry_impl->entry()->Data();
2704 
2705   // Fill in the rest of inline key store with non-nulls. Unlike in
2706   // KeySanityCheck, this does not change the length to identify it as
2707   // stored under |long_key|.
2708   memset(store->key + key.size(), 'k', sizeof(store->key) - key.size());
2709   entry_impl->entry()->set_modified();
2710   entry->Close();
2711 
2712   // We have a corrupt entry. Now reload it. We should NOT read beyond the
2713   // allocated buffer here.
2714   ASSERT_NE(net::OK, OpenEntry(key, &entry));
2715   DisableIntegrityCheck();
2716 }
2717 
TEST_F(DiskCacheEntryTest,KeySanityCheck3)2718 TEST_F(DiskCacheEntryTest, KeySanityCheck3) {
2719   const size_t kVeryLong = 40 * 1024;
2720   UseCurrentThread();
2721   InitCache();
2722   std::string key(kVeryLong, 'a');
2723   disk_cache::Entry* entry;
2724   ASSERT_THAT(CreateEntry(key, &entry), IsOk());
2725 
2726   disk_cache::EntryImpl* entry_impl =
2727       static_cast<disk_cache::EntryImpl*>(entry);
2728   disk_cache::EntryStore* store = entry_impl->entry()->Data();
2729 
2730   // Test meaningful when using long keys; and also want this to be
2731   // an external file to avoid needing to duplicate offset math here.
2732   disk_cache::Addr key_addr(store->long_key);
2733   ASSERT_TRUE(key_addr.is_initialized());
2734   ASSERT_TRUE(key_addr.is_separate_file());
2735 
2736   // Close the entry before messing up its files.
2737   entry->Close();
2738 
2739   // Mess up the terminating null in the external key file.
2740   auto key_file =
2741       base::MakeRefCounted<disk_cache::File>(true /* want sync ops*/);
2742   ASSERT_TRUE(key_file->Init(cache_impl_->GetFileName(key_addr)));
2743 
2744   ASSERT_TRUE(key_file->Write("b", 1u, kVeryLong));
2745   key_file = nullptr;
2746 
2747   // This case gets graceful recovery.
2748   ASSERT_THAT(OpenEntry(key, &entry), IsOk());
2749 
2750   // Make sure the key object isn't messed up.
2751   EXPECT_EQ(kVeryLong, strlen(entry->GetKey().data()));
2752   entry->Close();
2753 }
2754 
TEST_F(DiskCacheEntryTest,SimpleCacheInternalAsyncIO)2755 TEST_F(DiskCacheEntryTest, SimpleCacheInternalAsyncIO) {
2756   SetSimpleCacheMode();
2757   InitCache();
2758   InternalAsyncIO();
2759 }
2760 
TEST_F(DiskCacheEntryTest,SimpleCacheExternalAsyncIO)2761 TEST_F(DiskCacheEntryTest, SimpleCacheExternalAsyncIO) {
2762   SetSimpleCacheMode();
2763   InitCache();
2764   ExternalAsyncIO();
2765 }
2766 
TEST_F(DiskCacheEntryTest,SimpleCacheReleaseBuffer)2767 TEST_F(DiskCacheEntryTest, SimpleCacheReleaseBuffer) {
2768   SetSimpleCacheMode();
2769   InitCache();
2770   for (int i = 0; i < disk_cache::kSimpleEntryStreamCount; ++i) {
2771     EXPECT_THAT(DoomAllEntries(), IsOk());
2772     ReleaseBuffer(i);
2773   }
2774 }
2775 
TEST_F(DiskCacheEntryTest,SimpleCacheStreamAccess)2776 TEST_F(DiskCacheEntryTest, SimpleCacheStreamAccess) {
2777   SetSimpleCacheMode();
2778   InitCache();
2779   StreamAccess();
2780 }
2781 
TEST_F(DiskCacheEntryTest,SimpleCacheGetKey)2782 TEST_F(DiskCacheEntryTest, SimpleCacheGetKey) {
2783   SetSimpleCacheMode();
2784   InitCache();
2785   GetKey();
2786 }
2787 
TEST_F(DiskCacheEntryTest,SimpleCacheGetTimes)2788 TEST_F(DiskCacheEntryTest, SimpleCacheGetTimes) {
2789   SetSimpleCacheMode();
2790   InitCache();
2791   for (int i = 0; i < disk_cache::kSimpleEntryStreamCount; ++i) {
2792     EXPECT_THAT(DoomAllEntries(), IsOk());
2793     GetTimes(i);
2794   }
2795 }
2796 
TEST_F(DiskCacheEntryTest,SimpleCacheGrowData)2797 TEST_F(DiskCacheEntryTest, SimpleCacheGrowData) {
2798   SetSimpleCacheMode();
2799   InitCache();
2800   for (int i = 0; i < disk_cache::kSimpleEntryStreamCount; ++i) {
2801     EXPECT_THAT(DoomAllEntries(), IsOk());
2802     GrowData(i);
2803   }
2804 }
2805 
TEST_F(DiskCacheEntryTest,SimpleCacheTruncateData)2806 TEST_F(DiskCacheEntryTest, SimpleCacheTruncateData) {
2807   SetSimpleCacheMode();
2808   InitCache();
2809   for (int i = 0; i < disk_cache::kSimpleEntryStreamCount; ++i) {
2810     EXPECT_THAT(DoomAllEntries(), IsOk());
2811     TruncateData(i);
2812   }
2813 }
2814 
TEST_F(DiskCacheEntryTest,SimpleCacheZeroLengthIO)2815 TEST_F(DiskCacheEntryTest, SimpleCacheZeroLengthIO) {
2816   SetSimpleCacheMode();
2817   InitCache();
2818   for (int i = 0; i < disk_cache::kSimpleEntryStreamCount; ++i) {
2819     EXPECT_THAT(DoomAllEntries(), IsOk());
2820     ZeroLengthIO(i);
2821   }
2822 }
2823 
TEST_F(DiskCacheEntryTest,SimpleCacheSizeAtCreate)2824 TEST_F(DiskCacheEntryTest, SimpleCacheSizeAtCreate) {
2825   SetSimpleCacheMode();
2826   InitCache();
2827   SizeAtCreate();
2828 }
2829 
TEST_F(DiskCacheEntryTest,SimpleCacheReuseExternalEntry)2830 TEST_F(DiskCacheEntryTest, SimpleCacheReuseExternalEntry) {
2831   SetSimpleCacheMode();
2832   SetMaxSize(200 * 1024);
2833   InitCache();
2834   for (int i = 0; i < disk_cache::kSimpleEntryStreamCount; ++i) {
2835     EXPECT_THAT(DoomAllEntries(), IsOk());
2836     ReuseEntry(20 * 1024, i);
2837   }
2838 }
2839 
TEST_F(DiskCacheEntryTest,SimpleCacheReuseInternalEntry)2840 TEST_F(DiskCacheEntryTest, SimpleCacheReuseInternalEntry) {
2841   SetSimpleCacheMode();
2842   SetMaxSize(100 * 1024);
2843   InitCache();
2844   for (int i = 0; i < disk_cache::kSimpleEntryStreamCount; ++i) {
2845     EXPECT_THAT(DoomAllEntries(), IsOk());
2846     ReuseEntry(10 * 1024, i);
2847   }
2848 }
2849 
TEST_F(DiskCacheEntryTest,SimpleCacheGiantEntry)2850 TEST_F(DiskCacheEntryTest, SimpleCacheGiantEntry) {
2851   const int kBufSize = 32 * 1024;
2852   auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(kBufSize);
2853   CacheTestFillBuffer(buffer->data(), kBufSize, false);
2854 
2855   // Make sure SimpleCache can write up to 5MiB entry even with a 20MiB cache
2856   // size that Android WebView uses at the time of this test's writing.
2857   SetSimpleCacheMode();
2858   SetMaxSize(20 * 1024 * 1024);
2859   InitCache();
2860 
2861   {
2862     std::string key1("the first key");
2863     disk_cache::Entry* entry1 = nullptr;
2864     ASSERT_THAT(CreateEntry(key1, &entry1), IsOk());
2865 
2866     const int kSize1 = 5 * 1024 * 1024;
2867     EXPECT_EQ(kBufSize, WriteData(entry1, 1 /* stream */, kSize1 - kBufSize,
2868                                   buffer.get(), kBufSize, true /* truncate */));
2869     entry1->Close();
2870   }
2871 
2872   // ... but not bigger than that.
2873   {
2874     std::string key2("the second key");
2875     disk_cache::Entry* entry2 = nullptr;
2876     ASSERT_THAT(CreateEntry(key2, &entry2), IsOk());
2877 
2878     const int kSize2 = 5 * 1024 * 1024 + 1;
2879     EXPECT_EQ(net::ERR_FAILED,
2880               WriteData(entry2, 1 /* stream */, kSize2 - kBufSize, buffer.get(),
2881                         kBufSize, true /* truncate */));
2882     entry2->Close();
2883   }
2884 }
2885 
TEST_F(DiskCacheEntryTest,SimpleCacheSizeChanges)2886 TEST_F(DiskCacheEntryTest, SimpleCacheSizeChanges) {
2887   SetSimpleCacheMode();
2888   InitCache();
2889   for (int i = 0; i < disk_cache::kSimpleEntryStreamCount; ++i) {
2890     EXPECT_THAT(DoomAllEntries(), IsOk());
2891     SizeChanges(i);
2892   }
2893 }
2894 
TEST_F(DiskCacheEntryTest,SimpleCacheInvalidData)2895 TEST_F(DiskCacheEntryTest, SimpleCacheInvalidData) {
2896   SetSimpleCacheMode();
2897   InitCache();
2898   for (int i = 0; i < disk_cache::kSimpleEntryStreamCount; ++i) {
2899     EXPECT_THAT(DoomAllEntries(), IsOk());
2900     InvalidData(i);
2901   }
2902 }
2903 
TEST_F(DiskCacheEntryTest,SimpleCacheReadWriteDestroyBuffer)2904 TEST_F(DiskCacheEntryTest, SimpleCacheReadWriteDestroyBuffer) {
2905   // Proving that the test works well with optimistic operations enabled is
2906   // subtle, instead run only in APP_CACHE mode to disable optimistic
2907   // operations. Stream 0 always uses optimistic operations, so the test is not
2908   // run on stream 0.
2909   SetCacheType(net::APP_CACHE);
2910   SetSimpleCacheMode();
2911   InitCache();
2912   for (int i = 1; i < disk_cache::kSimpleEntryStreamCount; ++i) {
2913     EXPECT_THAT(DoomAllEntries(), IsOk());
2914     ReadWriteDestroyBuffer(i);
2915   }
2916 }
2917 
TEST_F(DiskCacheEntryTest,SimpleCacheDoomEntry)2918 TEST_F(DiskCacheEntryTest, SimpleCacheDoomEntry) {
2919   SetSimpleCacheMode();
2920   InitCache();
2921   DoomNormalEntry();
2922 }
2923 
TEST_F(DiskCacheEntryTest,SimpleCacheDoomEntryNextToOpenEntry)2924 TEST_F(DiskCacheEntryTest, SimpleCacheDoomEntryNextToOpenEntry) {
2925   SetSimpleCacheMode();
2926   InitCache();
2927   DoomEntryNextToOpenEntry();
2928 }
2929 
TEST_F(DiskCacheEntryTest,SimpleCacheDoomedEntry)2930 TEST_F(DiskCacheEntryTest, SimpleCacheDoomedEntry) {
2931   SetSimpleCacheMode();
2932   InitCache();
2933   // Stream 2 is excluded because the implementation does not support writing to
2934   // it on a doomed entry, if it was previously lazily omitted.
2935   for (int i = 0; i < disk_cache::kSimpleEntryStreamCount - 1; ++i) {
2936     EXPECT_THAT(DoomAllEntries(), IsOk());
2937     DoomedEntry(i);
2938   }
2939 }
2940 
2941 // Creates an entry with corrupted last byte in stream 0.
2942 // Requires SimpleCacheMode.
SimpleCacheMakeBadChecksumEntry(const std::string & key,int data_size)2943 bool DiskCacheEntryTest::SimpleCacheMakeBadChecksumEntry(const std::string& key,
2944                                                          int data_size) {
2945   disk_cache::Entry* entry = nullptr;
2946 
2947   if (CreateEntry(key, &entry) != net::OK || !entry) {
2948     LOG(ERROR) << "Could not create entry";
2949     return false;
2950   }
2951 
2952   auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(data_size);
2953   memset(buffer->data(), 'A', data_size);
2954 
2955   EXPECT_EQ(data_size, WriteData(entry, 1, 0, buffer.get(), data_size, false));
2956   entry->Close();
2957   entry = nullptr;
2958 
2959   // Corrupt the last byte of the data.
2960   base::FilePath entry_file0_path = cache_path_.AppendASCII(
2961       disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, 0));
2962   base::File entry_file0(entry_file0_path,
2963                          base::File::FLAG_WRITE | base::File::FLAG_OPEN);
2964   if (!entry_file0.IsValid())
2965     return false;
2966 
2967   int64_t file_offset =
2968       sizeof(disk_cache::SimpleFileHeader) + key.size() + data_size - 2;
2969   EXPECT_EQ(1, entry_file0.Write(file_offset, "X", 1));
2970   return true;
2971 }
2972 
TEST_F(DiskCacheEntryTest,SimpleCacheBadChecksum)2973 TEST_F(DiskCacheEntryTest, SimpleCacheBadChecksum) {
2974   SetSimpleCacheMode();
2975   InitCache();
2976 
2977   const char key[] = "the first key";
2978   const int kLargeSize = 50000;
2979   ASSERT_TRUE(SimpleCacheMakeBadChecksumEntry(key, kLargeSize));
2980 
2981   disk_cache::Entry* entry = nullptr;
2982 
2983   // Open the entry. Can't spot the checksum that quickly with it so
2984   // huge.
2985   ASSERT_THAT(OpenEntry(key, &entry), IsOk());
2986   ScopedEntryPtr entry_closer(entry);
2987 
2988   EXPECT_GE(kLargeSize, entry->GetDataSize(1));
2989   auto read_buffer = base::MakeRefCounted<net::IOBufferWithSize>(kLargeSize);
2990   EXPECT_EQ(net::ERR_CACHE_CHECKSUM_MISMATCH,
2991             ReadData(entry, 1, 0, read_buffer.get(), kLargeSize));
2992 }
2993 
2994 // Tests that an entry that has had an IO error occur can still be Doomed().
TEST_F(DiskCacheEntryTest,SimpleCacheErrorThenDoom)2995 TEST_F(DiskCacheEntryTest, SimpleCacheErrorThenDoom) {
2996   SetSimpleCacheMode();
2997   InitCache();
2998 
2999   const char key[] = "the first key";
3000   const int kLargeSize = 50000;
3001   ASSERT_TRUE(SimpleCacheMakeBadChecksumEntry(key, kLargeSize));
3002 
3003   disk_cache::Entry* entry = nullptr;
3004 
3005   // Open the entry, forcing an IO error.
3006   ASSERT_THAT(OpenEntry(key, &entry), IsOk());
3007   ScopedEntryPtr entry_closer(entry);
3008 
3009   EXPECT_GE(kLargeSize, entry->GetDataSize(1));
3010   auto read_buffer = base::MakeRefCounted<net::IOBufferWithSize>(kLargeSize);
3011   EXPECT_EQ(net::ERR_CACHE_CHECKSUM_MISMATCH,
3012             ReadData(entry, 1, 0, read_buffer.get(), kLargeSize));
3013   entry->Doom();  // Should not crash.
3014 }
3015 
TEST_F(DiskCacheEntryTest,SimpleCacheCreateAfterDiskLayerDoom)3016 TEST_F(DiskCacheEntryTest, SimpleCacheCreateAfterDiskLayerDoom) {
3017   // Code coverage for what happens when a queued create runs after failure
3018   // was noticed at SimpleSynchronousEntry layer.
3019   SetSimpleCacheMode();
3020   // Disable optimistic ops so we can block on CreateEntry and start
3021   // WriteData off with an empty op queue.
3022   SetCacheType(net::APP_CACHE);
3023   InitCache();
3024 
3025   const char key[] = "the key";
3026   const int kSize1 = 10;
3027   auto buffer1 = base::MakeRefCounted<net::IOBufferWithSize>(kSize1);
3028   CacheTestFillBuffer(buffer1->data(), kSize1, false);
3029 
3030   disk_cache::Entry* entry = nullptr;
3031   ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3032   ASSERT_TRUE(entry != nullptr);
3033 
3034   // Make an empty _1 file, to cause a stream 2 write to fail.
3035   base::FilePath entry_file1_path = cache_path_.AppendASCII(
3036       disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, 1));
3037   base::File entry_file1(entry_file1_path,
3038                          base::File::FLAG_WRITE | base::File::FLAG_CREATE);
3039   ASSERT_TRUE(entry_file1.IsValid());
3040 
3041   entry->WriteData(2, 0, buffer1.get(), kSize1, net::CompletionOnceCallback(),
3042                    /* truncate= */ true);
3043   entry->Close();
3044 
3045   // At this point we have put WriteData & Close on the queue, and WriteData
3046   // started, but we haven't given the event loop control so the failure
3047   // hasn't been reported and handled here, so the entry is still active
3048   // for the key. Queue up another create for same key, and run through the
3049   // events.
3050   disk_cache::Entry* entry2 = nullptr;
3051   ASSERT_EQ(net::ERR_FAILED, CreateEntry(key, &entry2));
3052   ASSERT_TRUE(entry2 == nullptr);
3053 
3054   EXPECT_EQ(0, cache_->GetEntryCount());
3055 
3056   // Should be able to create properly next time, though.
3057   disk_cache::Entry* entry3 = nullptr;
3058   ASSERT_EQ(net::OK, CreateEntry(key, &entry3));
3059   ASSERT_TRUE(entry3 != nullptr);
3060   entry3->Close();
3061 }
3062 
TEST_F(DiskCacheEntryTest,SimpleCacheQueuedOpenOnDoomedEntry)3063 TEST_F(DiskCacheEntryTest, SimpleCacheQueuedOpenOnDoomedEntry) {
3064   // This tests the following sequence of ops:
3065   // A = Create(K);
3066   // Close(A);
3067   // B = Open(K);
3068   // Doom(K);
3069   // Close(B);
3070   //
3071   // ... where the execution of the Open sits on the queue all the way till
3072   // Doom. This now succeeds, as the doom is merely queued at time of Open,
3073   // rather than completed.
3074 
3075   SetSimpleCacheMode();
3076   // Disable optimistic ops so we can block on CreateEntry and start
3077   // WriteData off with an empty op queue.
3078   SetCacheType(net::APP_CACHE);
3079   InitCache();
3080 
3081   const char key[] = "the key";
3082 
3083   disk_cache::Entry* entry = nullptr;
3084   ASSERT_EQ(net::OK, CreateEntry(key, &entry));  // event loop!
3085   ASSERT_TRUE(entry != nullptr);
3086 
3087   entry->Close();
3088 
3089   // Done via cache_ -> no event loop.
3090   TestEntryResultCompletionCallback cb;
3091   EntryResult result = cache_->OpenEntry(key, net::HIGHEST, cb.callback());
3092   ASSERT_EQ(net::ERR_IO_PENDING, result.net_error());
3093 
3094   net::TestCompletionCallback cb2;
3095   cache_->DoomEntry(key, net::HIGHEST, cb2.callback());
3096   // Now event loop.
3097   result = cb.WaitForResult();
3098   EXPECT_EQ(net::OK, result.net_error());
3099   result.ReleaseEntry()->Close();
3100 
3101   EXPECT_EQ(net::OK, cb2.WaitForResult());
3102   EXPECT_EQ(0, cache_->GetEntryCount());
3103 }
3104 
TEST_F(DiskCacheEntryTest,SimpleCacheDoomErrorRace)3105 TEST_F(DiskCacheEntryTest, SimpleCacheDoomErrorRace) {
3106   // Code coverage for a doom racing with a doom induced by a failure.
3107   SetSimpleCacheMode();
3108   // Disable optimistic ops so we can block on CreateEntry and start
3109   // WriteData off with an empty op queue.
3110   SetCacheType(net::APP_CACHE);
3111   InitCache();
3112 
3113   const char kKey[] = "the first key";
3114   const int kSize1 = 10;
3115   auto buffer1 = base::MakeRefCounted<net::IOBufferWithSize>(kSize1);
3116   CacheTestFillBuffer(buffer1->data(), kSize1, false);
3117 
3118   disk_cache::Entry* entry = nullptr;
3119   ASSERT_EQ(net::OK, CreateEntry(kKey, &entry));
3120   ASSERT_TRUE(entry != nullptr);
3121 
3122   // Now an empty _1 file, to cause a stream 2 write to fail.
3123   base::FilePath entry_file1_path = cache_path_.AppendASCII(
3124       disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(kKey, 1));
3125   base::File entry_file1(entry_file1_path,
3126                          base::File::FLAG_WRITE | base::File::FLAG_CREATE);
3127   ASSERT_TRUE(entry_file1.IsValid());
3128 
3129   entry->WriteData(2, 0, buffer1.get(), kSize1, net::CompletionOnceCallback(),
3130                    /* truncate= */ true);
3131 
3132   net::TestCompletionCallback cb;
3133   cache_->DoomEntry(kKey, net::HIGHEST, cb.callback());
3134   entry->Close();
3135   EXPECT_EQ(0, cb.WaitForResult());
3136 }
3137 
TruncatePath(const base::FilePath & file_path,int64_t length)3138 bool TruncatePath(const base::FilePath& file_path, int64_t length) {
3139   base::File file(file_path, base::File::FLAG_WRITE | base::File::FLAG_OPEN);
3140   if (!file.IsValid())
3141     return false;
3142   return file.SetLength(length);
3143 }
3144 
TEST_F(DiskCacheEntryTest,SimpleCacheNoEOF)3145 TEST_F(DiskCacheEntryTest, SimpleCacheNoEOF) {
3146   SetSimpleCacheMode();
3147   InitCache();
3148 
3149   const std::string key("the first key");
3150 
3151   disk_cache::Entry* entry = nullptr;
3152   ASSERT_THAT(CreateEntry(key, &entry), IsOk());
3153   disk_cache::Entry* null = nullptr;
3154   EXPECT_NE(null, entry);
3155   entry->Close();
3156   entry = nullptr;
3157 
3158   // Force the entry to flush to disk, so subsequent platform file operations
3159   // succed.
3160   ASSERT_THAT(OpenEntry(key, &entry), IsOk());
3161   entry->Close();
3162   entry = nullptr;
3163 
3164   // Truncate the file such that the length isn't sufficient to have an EOF
3165   // record.
3166   int kTruncationBytes = -static_cast<int>(sizeof(disk_cache::SimpleFileEOF));
3167   const base::FilePath entry_path = cache_path_.AppendASCII(
3168       disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, 0));
3169   const int64_t invalid_size = disk_cache::simple_util::GetFileSizeFromDataSize(
3170       key.size(), kTruncationBytes);
3171   EXPECT_TRUE(TruncatePath(entry_path, invalid_size));
3172   EXPECT_THAT(OpenEntry(key, &entry), IsError(net::ERR_FAILED));
3173   DisableIntegrityCheck();
3174 }
3175 
TEST_F(DiskCacheEntryTest,SimpleCacheNonOptimisticOperationsBasic)3176 TEST_F(DiskCacheEntryTest, SimpleCacheNonOptimisticOperationsBasic) {
3177   // Test sequence:
3178   // Create, Write, Read, Close.
3179   SetCacheType(net::APP_CACHE);  // APP_CACHE doesn't use optimistic operations.
3180   SetSimpleCacheMode();
3181   InitCache();
3182   disk_cache::Entry* const null_entry = nullptr;
3183 
3184   disk_cache::Entry* entry = nullptr;
3185   EXPECT_THAT(CreateEntry("my key", &entry), IsOk());
3186   ASSERT_NE(null_entry, entry);
3187   ScopedEntryPtr entry_closer(entry);
3188 
3189   const int kBufferSize = 10;
3190   scoped_refptr<net::IOBufferWithSize> write_buffer =
3191       base::MakeRefCounted<net::IOBufferWithSize>(kBufferSize);
3192   CacheTestFillBuffer(write_buffer->data(), write_buffer->size(), false);
3193   EXPECT_EQ(
3194       write_buffer->size(),
3195       WriteData(entry, 1, 0, write_buffer.get(), write_buffer->size(), false));
3196 
3197   scoped_refptr<net::IOBufferWithSize> read_buffer =
3198       base::MakeRefCounted<net::IOBufferWithSize>(kBufferSize);
3199   EXPECT_EQ(read_buffer->size(),
3200             ReadData(entry, 1, 0, read_buffer.get(), read_buffer->size()));
3201 }
3202 
TEST_F(DiskCacheEntryTest,SimpleCacheNonOptimisticOperationsDontBlock)3203 TEST_F(DiskCacheEntryTest, SimpleCacheNonOptimisticOperationsDontBlock) {
3204   // Test sequence:
3205   // Create, Write, Close.
3206   SetCacheType(net::APP_CACHE);  // APP_CACHE doesn't use optimistic operations.
3207   SetSimpleCacheMode();
3208   InitCache();
3209   disk_cache::Entry* const null_entry = nullptr;
3210 
3211   MessageLoopHelper helper;
3212   CallbackTest create_callback(&helper, false);
3213 
3214   int expected_callback_runs = 0;
3215   const int kBufferSize = 10;
3216   scoped_refptr<net::IOBufferWithSize> write_buffer =
3217       base::MakeRefCounted<net::IOBufferWithSize>(kBufferSize);
3218 
3219   disk_cache::Entry* entry = nullptr;
3220   EXPECT_THAT(CreateEntry("my key", &entry), IsOk());
3221   ASSERT_NE(null_entry, entry);
3222   ScopedEntryPtr entry_closer(entry);
3223 
3224   CacheTestFillBuffer(write_buffer->data(), write_buffer->size(), false);
3225   CallbackTest write_callback(&helper, false);
3226   int ret = entry->WriteData(
3227       1, 0, write_buffer.get(), write_buffer->size(),
3228       base::BindOnce(&CallbackTest::Run, base::Unretained(&write_callback)),
3229       false);
3230   ASSERT_THAT(ret, IsError(net::ERR_IO_PENDING));
3231   helper.WaitUntilCacheIoFinished(++expected_callback_runs);
3232 }
3233 
TEST_F(DiskCacheEntryTest,SimpleCacheNonOptimisticOperationsBasicsWithoutWaiting)3234 TEST_F(DiskCacheEntryTest,
3235        SimpleCacheNonOptimisticOperationsBasicsWithoutWaiting) {
3236   // Test sequence:
3237   // Create, Write, Read, Close.
3238   SetCacheType(net::APP_CACHE);  // APP_CACHE doesn't use optimistic operations.
3239   SetSimpleCacheMode();
3240   InitCache();
3241   disk_cache::Entry* const null_entry = nullptr;
3242   MessageLoopHelper helper;
3243 
3244   disk_cache::Entry* entry = nullptr;
3245   // Note that |entry| is only set once CreateEntry() completed which is why we
3246   // have to wait (i.e. use the helper CreateEntry() function).
3247   EXPECT_THAT(CreateEntry("my key", &entry), IsOk());
3248   ASSERT_NE(null_entry, entry);
3249   ScopedEntryPtr entry_closer(entry);
3250 
3251   const int kBufferSize = 10;
3252   scoped_refptr<net::IOBufferWithSize> write_buffer =
3253       base::MakeRefCounted<net::IOBufferWithSize>(kBufferSize);
3254   CacheTestFillBuffer(write_buffer->data(), write_buffer->size(), false);
3255   CallbackTest write_callback(&helper, false);
3256   int ret = entry->WriteData(
3257       1, 0, write_buffer.get(), write_buffer->size(),
3258       base::BindOnce(&CallbackTest::Run, base::Unretained(&write_callback)),
3259       false);
3260   EXPECT_THAT(ret, IsError(net::ERR_IO_PENDING));
3261   int expected_callback_runs = 1;
3262 
3263   scoped_refptr<net::IOBufferWithSize> read_buffer =
3264       base::MakeRefCounted<net::IOBufferWithSize>(kBufferSize);
3265   CallbackTest read_callback(&helper, false);
3266   ret = entry->ReadData(
3267       1, 0, read_buffer.get(), read_buffer->size(),
3268       base::BindOnce(&CallbackTest::Run, base::Unretained(&read_callback)));
3269   EXPECT_THAT(ret, IsError(net::ERR_IO_PENDING));
3270   ++expected_callback_runs;
3271 
3272   helper.WaitUntilCacheIoFinished(expected_callback_runs);
3273   ASSERT_EQ(read_buffer->size(), write_buffer->size());
3274   EXPECT_EQ(
3275       0,
3276       memcmp(read_buffer->data(), write_buffer->data(), read_buffer->size()));
3277 }
3278 
TEST_F(DiskCacheEntryTest,SimpleCacheOptimistic)3279 TEST_F(DiskCacheEntryTest, SimpleCacheOptimistic) {
3280   // Test sequence:
3281   // Create, Write, Read, Write, Read, Close.
3282   SetSimpleCacheMode();
3283   InitCache();
3284   disk_cache::Entry* null = nullptr;
3285   const char key[] = "the first key";
3286 
3287   MessageLoopHelper helper;
3288   CallbackTest callback1(&helper, false);
3289   CallbackTest callback2(&helper, false);
3290   CallbackTest callback3(&helper, false);
3291   CallbackTest callback4(&helper, false);
3292   CallbackTest callback5(&helper, false);
3293 
3294   int expected = 0;
3295   const int kSize1 = 10;
3296   const int kSize2 = 20;
3297   auto buffer1 = base::MakeRefCounted<net::IOBufferWithSize>(kSize1);
3298   auto buffer1_read = base::MakeRefCounted<net::IOBufferWithSize>(kSize1);
3299   auto buffer2 = base::MakeRefCounted<net::IOBufferWithSize>(kSize2);
3300   auto buffer2_read = base::MakeRefCounted<net::IOBufferWithSize>(kSize2);
3301   CacheTestFillBuffer(buffer1->data(), kSize1, false);
3302   CacheTestFillBuffer(buffer2->data(), kSize2, false);
3303 
3304   // Create is optimistic, must return OK.
3305   EntryResult result =
3306       cache_->CreateEntry(key, net::HIGHEST,
3307                           base::BindOnce(&CallbackTest::RunWithEntry,
3308                                          base::Unretained(&callback1)));
3309   ASSERT_EQ(net::OK, result.net_error());
3310   disk_cache::Entry* entry = result.ReleaseEntry();
3311   ASSERT_NE(null, entry);
3312   ScopedEntryPtr entry_closer(entry);
3313 
3314   // This write may or may not be optimistic (it depends if the previous
3315   // optimistic create already finished by the time we call the write here).
3316   int ret = entry->WriteData(
3317       1, 0, buffer1.get(), kSize1,
3318       base::BindOnce(&CallbackTest::Run, base::Unretained(&callback2)), false);
3319   EXPECT_TRUE(kSize1 == ret || net::ERR_IO_PENDING == ret);
3320   if (net::ERR_IO_PENDING == ret)
3321     expected++;
3322 
3323   // This Read must not be optimistic, since we don't support that yet.
3324   EXPECT_EQ(net::ERR_IO_PENDING,
3325             entry->ReadData(1, 0, buffer1_read.get(), kSize1,
3326                             base::BindOnce(&CallbackTest::Run,
3327                                            base::Unretained(&callback3))));
3328   expected++;
3329   EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
3330   EXPECT_EQ(0, memcmp(buffer1->data(), buffer1_read->data(), kSize1));
3331 
3332   // At this point after waiting, the pending operations queue on the entry
3333   // should be empty, so the next Write operation must run as optimistic.
3334   EXPECT_EQ(kSize2,
3335             entry->WriteData(1, 0, buffer2.get(), kSize2,
3336                              base::BindOnce(&CallbackTest::Run,
3337                                             base::Unretained(&callback4)),
3338                              false));
3339 
3340   // Lets do another read so we block until both the write and the read
3341   // operation finishes and we can then test for HasOneRef() below.
3342   EXPECT_EQ(net::ERR_IO_PENDING,
3343             entry->ReadData(1, 0, buffer2_read.get(), kSize2,
3344                             base::BindOnce(&CallbackTest::Run,
3345                                            base::Unretained(&callback5))));
3346   expected++;
3347 
3348   EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
3349   EXPECT_EQ(0, memcmp(buffer2->data(), buffer2_read->data(), kSize2));
3350 
3351   // Check that we are not leaking.
3352   EXPECT_NE(entry, null);
3353   EXPECT_TRUE(
3354       static_cast<disk_cache::SimpleEntryImpl*>(entry)->HasOneRef());
3355 }
3356 
TEST_F(DiskCacheEntryTest,SimpleCacheOptimistic2)3357 TEST_F(DiskCacheEntryTest, SimpleCacheOptimistic2) {
3358   // Test sequence:
3359   // Create, Open, Close, Close.
3360   SetSimpleCacheMode();
3361   InitCache();
3362   const char key[] = "the first key";
3363 
3364   MessageLoopHelper helper;
3365   CallbackTest callback1(&helper, false);
3366   CallbackTest callback2(&helper, false);
3367 
3368   EntryResult result =
3369       cache_->CreateEntry(key, net::HIGHEST,
3370                           base::BindOnce(&CallbackTest::RunWithEntry,
3371                                          base::Unretained(&callback1)));
3372   ASSERT_EQ(net::OK, result.net_error());
3373   disk_cache::Entry* entry = result.ReleaseEntry();
3374   ASSERT_NE(nullptr, entry);
3375   ScopedEntryPtr entry_closer(entry);
3376 
3377   EntryResult result2 =
3378       cache_->OpenEntry(key, net::HIGHEST,
3379                         base::BindOnce(&CallbackTest::RunWithEntry,
3380                                        base::Unretained(&callback2)));
3381   ASSERT_EQ(net::ERR_IO_PENDING, result2.net_error());
3382   ASSERT_TRUE(helper.WaitUntilCacheIoFinished(1));
3383   result2 = callback2.ReleaseLastEntryResult();
3384   EXPECT_EQ(net::OK, result2.net_error());
3385   disk_cache::Entry* entry2 = result2.ReleaseEntry();
3386   EXPECT_NE(nullptr, entry2);
3387   EXPECT_EQ(entry, entry2);
3388 
3389   // We have to call close twice, since we called create and open above.
3390   // (the other closes is from |entry_closer|).
3391   entry->Close();
3392 
3393   // Check that we are not leaking.
3394   EXPECT_TRUE(
3395       static_cast<disk_cache::SimpleEntryImpl*>(entry)->HasOneRef());
3396 }
3397 
TEST_F(DiskCacheEntryTest,SimpleCacheOptimistic3)3398 TEST_F(DiskCacheEntryTest, SimpleCacheOptimistic3) {
3399   // Test sequence:
3400   // Create, Close, Open, Close.
3401   SetSimpleCacheMode();
3402   InitCache();
3403   const char key[] = "the first key";
3404 
3405   EntryResult result =
3406       cache_->CreateEntry(key, net::HIGHEST, EntryResultCallback());
3407   ASSERT_EQ(net::OK, result.net_error());
3408   disk_cache::Entry* entry = result.ReleaseEntry();
3409   ASSERT_NE(nullptr, entry);
3410   entry->Close();
3411 
3412   TestEntryResultCompletionCallback cb;
3413   EntryResult result2 = cache_->OpenEntry(key, net::HIGHEST, cb.callback());
3414   ASSERT_EQ(net::ERR_IO_PENDING, result2.net_error());
3415   result2 = cb.WaitForResult();
3416   ASSERT_THAT(result2.net_error(), IsOk());
3417   disk_cache::Entry* entry2 = result2.ReleaseEntry();
3418   ScopedEntryPtr entry_closer(entry2);
3419 
3420   EXPECT_NE(nullptr, entry2);
3421   EXPECT_EQ(entry, entry2);
3422 
3423   // Check that we are not leaking.
3424   EXPECT_TRUE(
3425       static_cast<disk_cache::SimpleEntryImpl*>(entry2)->HasOneRef());
3426 }
3427 
TEST_F(DiskCacheEntryTest,SimpleCacheOptimistic4)3428 TEST_F(DiskCacheEntryTest, SimpleCacheOptimistic4) {
3429   // Test sequence:
3430   // Create, Close, Write, Open, Open, Close, Write, Read, Close.
3431   SetSimpleCacheMode();
3432   InitCache();
3433   const char key[] = "the first key";
3434 
3435   net::TestCompletionCallback cb;
3436   const int kSize1 = 10;
3437   auto buffer1 = base::MakeRefCounted<net::IOBufferWithSize>(kSize1);
3438   CacheTestFillBuffer(buffer1->data(), kSize1, false);
3439 
3440   EntryResult result =
3441       cache_->CreateEntry(key, net::HIGHEST, EntryResultCallback());
3442   ASSERT_EQ(net::OK, result.net_error());
3443   disk_cache::Entry* entry = result.ReleaseEntry();
3444   ASSERT_NE(nullptr, entry);
3445   entry->Close();
3446 
3447   // Lets do a Write so we block until both the Close and the Write
3448   // operation finishes. Write must fail since we are writing in a closed entry.
3449   EXPECT_EQ(
3450       net::ERR_IO_PENDING,
3451       entry->WriteData(1, 0, buffer1.get(), kSize1, cb.callback(), false));
3452   EXPECT_THAT(cb.GetResult(net::ERR_IO_PENDING), IsError(net::ERR_FAILED));
3453 
3454   // Finish running the pending tasks so that we fully complete the close
3455   // operation and destroy the entry object.
3456   base::RunLoop().RunUntilIdle();
3457 
3458   // At this point the |entry| must have been destroyed, and called
3459   // RemoveSelfFromBackend().
3460   TestEntryResultCompletionCallback cb2;
3461   EntryResult result2 = cache_->OpenEntry(key, net::HIGHEST, cb2.callback());
3462   ASSERT_EQ(net::ERR_IO_PENDING, result2.net_error());
3463   result2 = cb2.WaitForResult();
3464   ASSERT_THAT(result2.net_error(), IsOk());
3465   disk_cache::Entry* entry2 = result2.ReleaseEntry();
3466   EXPECT_NE(nullptr, entry2);
3467 
3468   EntryResult result3 = cache_->OpenEntry(key, net::HIGHEST, cb2.callback());
3469   ASSERT_EQ(net::ERR_IO_PENDING, result3.net_error());
3470   result3 = cb2.WaitForResult();
3471   ASSERT_THAT(result3.net_error(), IsOk());
3472   disk_cache::Entry* entry3 = result3.ReleaseEntry();
3473   EXPECT_NE(nullptr, entry3);
3474   EXPECT_EQ(entry2, entry3);
3475   entry3->Close();
3476 
3477   // The previous Close doesn't actually closes the entry since we opened it
3478   // twice, so the next Write operation must succeed and it must be able to
3479   // perform it optimistically, since there is no operation running on this
3480   // entry.
3481   EXPECT_EQ(kSize1, entry2->WriteData(1, 0, buffer1.get(), kSize1,
3482                                       net::CompletionOnceCallback(), false));
3483 
3484   // Lets do another read so we block until both the write and the read
3485   // operation finishes and we can then test for HasOneRef() below.
3486   EXPECT_EQ(net::ERR_IO_PENDING,
3487             entry2->ReadData(1, 0, buffer1.get(), kSize1, cb.callback()));
3488   EXPECT_EQ(kSize1, cb.GetResult(net::ERR_IO_PENDING));
3489 
3490   // Check that we are not leaking.
3491   EXPECT_TRUE(
3492       static_cast<disk_cache::SimpleEntryImpl*>(entry2)->HasOneRef());
3493   entry2->Close();
3494 }
3495 
TEST_F(DiskCacheEntryTest,SimpleCacheOptimistic5)3496 TEST_F(DiskCacheEntryTest, SimpleCacheOptimistic5) {
3497   // Test sequence:
3498   // Create, Doom, Write, Read, Close.
3499   SetSimpleCacheMode();
3500   InitCache();
3501   const char key[] = "the first key";
3502 
3503   net::TestCompletionCallback cb;
3504   const int kSize1 = 10;
3505   auto buffer1 = base::MakeRefCounted<net::IOBufferWithSize>(kSize1);
3506   CacheTestFillBuffer(buffer1->data(), kSize1, false);
3507 
3508   EntryResult result =
3509       cache_->CreateEntry(key, net::HIGHEST, EntryResultCallback());
3510   ASSERT_EQ(net::OK, result.net_error());
3511   disk_cache::Entry* entry = result.ReleaseEntry();
3512   ASSERT_NE(nullptr, entry);
3513   ScopedEntryPtr entry_closer(entry);
3514   entry->Doom();
3515 
3516   EXPECT_EQ(
3517       net::ERR_IO_PENDING,
3518       entry->WriteData(1, 0, buffer1.get(), kSize1, cb.callback(), false));
3519   EXPECT_EQ(kSize1, cb.GetResult(net::ERR_IO_PENDING));
3520 
3521   EXPECT_EQ(net::ERR_IO_PENDING,
3522             entry->ReadData(1, 0, buffer1.get(), kSize1, cb.callback()));
3523   EXPECT_EQ(kSize1, cb.GetResult(net::ERR_IO_PENDING));
3524 
3525   // Check that we are not leaking.
3526   EXPECT_TRUE(
3527       static_cast<disk_cache::SimpleEntryImpl*>(entry)->HasOneRef());
3528 }
3529 
TEST_F(DiskCacheEntryTest,SimpleCacheOptimistic6)3530 TEST_F(DiskCacheEntryTest, SimpleCacheOptimistic6) {
3531   // Test sequence:
3532   // Create, Write, Doom, Doom, Read, Doom, Close.
3533   SetSimpleCacheMode();
3534   InitCache();
3535   const char key[] = "the first key";
3536 
3537   net::TestCompletionCallback cb;
3538   const int kSize1 = 10;
3539   auto buffer1 = base::MakeRefCounted<net::IOBufferWithSize>(kSize1);
3540   auto buffer1_read = base::MakeRefCounted<net::IOBufferWithSize>(kSize1);
3541   CacheTestFillBuffer(buffer1->data(), kSize1, false);
3542 
3543   EntryResult result =
3544       cache_->CreateEntry(key, net::HIGHEST, EntryResultCallback());
3545   ASSERT_EQ(net::OK, result.net_error());
3546   disk_cache::Entry* entry = result.ReleaseEntry();
3547   EXPECT_NE(nullptr, entry);
3548   ScopedEntryPtr entry_closer(entry);
3549 
3550   EXPECT_EQ(
3551       net::ERR_IO_PENDING,
3552       entry->WriteData(1, 0, buffer1.get(), kSize1, cb.callback(), false));
3553   EXPECT_EQ(kSize1, cb.GetResult(net::ERR_IO_PENDING));
3554 
3555   entry->Doom();
3556   entry->Doom();
3557 
3558   // This Read must not be optimistic, since we don't support that yet.
3559   EXPECT_EQ(net::ERR_IO_PENDING,
3560             entry->ReadData(1, 0, buffer1_read.get(), kSize1, cb.callback()));
3561   EXPECT_EQ(kSize1, cb.GetResult(net::ERR_IO_PENDING));
3562   EXPECT_EQ(0, memcmp(buffer1->data(), buffer1_read->data(), kSize1));
3563 
3564   entry->Doom();
3565 }
3566 
3567 // Confirm that IO buffers are not referenced by the Simple Cache after a write
3568 // completes.
TEST_F(DiskCacheEntryTest,SimpleCacheOptimisticWriteReleases)3569 TEST_F(DiskCacheEntryTest, SimpleCacheOptimisticWriteReleases) {
3570   SetSimpleCacheMode();
3571   InitCache();
3572 
3573   const char key[] = "the first key";
3574 
3575   // First, an optimistic create.
3576   EntryResult result =
3577       cache_->CreateEntry(key, net::HIGHEST, EntryResultCallback());
3578   ASSERT_EQ(net::OK, result.net_error());
3579   disk_cache::Entry* entry = result.ReleaseEntry();
3580   ASSERT_TRUE(entry);
3581   ScopedEntryPtr entry_closer(entry);
3582 
3583   const int kWriteSize = 512;
3584   auto buffer1 = base::MakeRefCounted<net::IOBufferWithSize>(kWriteSize);
3585   EXPECT_TRUE(buffer1->HasOneRef());
3586   CacheTestFillBuffer(buffer1->data(), kWriteSize, false);
3587 
3588   // An optimistic write happens only when there is an empty queue of pending
3589   // operations. To ensure the queue is empty, we issue a write and wait until
3590   // it completes.
3591   EXPECT_EQ(kWriteSize,
3592             WriteData(entry, 1, 0, buffer1.get(), kWriteSize, false));
3593   EXPECT_TRUE(buffer1->HasOneRef());
3594 
3595   // Finally, we should perform an optimistic write and confirm that all
3596   // references to the IO buffer have been released.
3597   EXPECT_EQ(kWriteSize, entry->WriteData(1, 0, buffer1.get(), kWriteSize,
3598                                          net::CompletionOnceCallback(), false));
3599   EXPECT_TRUE(buffer1->HasOneRef());
3600 }
3601 
TEST_F(DiskCacheEntryTest,SimpleCacheCreateDoomRace)3602 TEST_F(DiskCacheEntryTest, SimpleCacheCreateDoomRace) {
3603   // Test sequence:
3604   // Create, Doom, Write, Close, Check files are not on disk anymore.
3605   SetSimpleCacheMode();
3606   InitCache();
3607   const char key[] = "the first key";
3608 
3609   net::TestCompletionCallback cb;
3610   const int kSize1 = 10;
3611   auto buffer1 = base::MakeRefCounted<net::IOBufferWithSize>(kSize1);
3612   CacheTestFillBuffer(buffer1->data(), kSize1, false);
3613 
3614   EntryResult result =
3615       cache_->CreateEntry(key, net::HIGHEST, EntryResultCallback());
3616   ASSERT_EQ(net::OK, result.net_error());
3617   disk_cache::Entry* entry = result.ReleaseEntry();
3618   EXPECT_NE(nullptr, entry);
3619 
3620   EXPECT_THAT(cache_->DoomEntry(key, net::HIGHEST, cb.callback()),
3621               IsError(net::ERR_IO_PENDING));
3622   EXPECT_THAT(cb.GetResult(net::ERR_IO_PENDING), IsOk());
3623 
3624   EXPECT_EQ(
3625       kSize1,
3626       entry->WriteData(0, 0, buffer1.get(), kSize1, cb.callback(), false));
3627 
3628   entry->Close();
3629 
3630   // Finish running the pending tasks so that we fully complete the close
3631   // operation and destroy the entry object.
3632   base::RunLoop().RunUntilIdle();
3633 
3634   for (int i = 0; i < disk_cache::kSimpleEntryNormalFileCount; ++i) {
3635     base::FilePath entry_file_path = cache_path_.AppendASCII(
3636         disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, i));
3637     base::File::Info info;
3638     EXPECT_FALSE(base::GetFileInfo(entry_file_path, &info));
3639   }
3640 }
3641 
TEST_F(DiskCacheEntryTest,SimpleCacheDoomCreateRace)3642 TEST_F(DiskCacheEntryTest, SimpleCacheDoomCreateRace) {
3643   // This test runs as APP_CACHE to make operations more synchronous. Test
3644   // sequence:
3645   // Create, Doom, Create.
3646   SetCacheType(net::APP_CACHE);
3647   SetSimpleCacheMode();
3648   InitCache();
3649   const char key[] = "the first key";
3650 
3651   TestEntryResultCompletionCallback create_callback;
3652 
3653   EntryResult result1 = create_callback.GetResult(
3654       cache_->CreateEntry(key, net::HIGHEST, create_callback.callback()));
3655   ASSERT_EQ(net::OK, result1.net_error());
3656   disk_cache::Entry* entry1 = result1.ReleaseEntry();
3657   ScopedEntryPtr entry1_closer(entry1);
3658   EXPECT_NE(nullptr, entry1);
3659 
3660   net::TestCompletionCallback doom_callback;
3661   EXPECT_EQ(net::ERR_IO_PENDING,
3662             cache_->DoomEntry(key, net::HIGHEST, doom_callback.callback()));
3663 
3664   EntryResult result2 = create_callback.GetResult(
3665       cache_->CreateEntry(key, net::HIGHEST, create_callback.callback()));
3666   ASSERT_EQ(net::OK, result2.net_error());
3667   disk_cache::Entry* entry2 = result2.ReleaseEntry();
3668   ScopedEntryPtr entry2_closer(entry2);
3669   EXPECT_THAT(doom_callback.GetResult(net::ERR_IO_PENDING), IsOk());
3670 }
3671 
TEST_F(DiskCacheEntryTest,SimpleCacheDoomCreateOptimistic)3672 TEST_F(DiskCacheEntryTest, SimpleCacheDoomCreateOptimistic) {
3673   // Test that we optimize the doom -> create sequence when optimistic ops
3674   // are on.
3675   SetSimpleCacheMode();
3676   InitCache();
3677   const char kKey[] = "the key";
3678 
3679   // Create entry and initiate its Doom.
3680   disk_cache::Entry* entry1 = nullptr;
3681   ASSERT_THAT(CreateEntry(kKey, &entry1), IsOk());
3682   ASSERT_TRUE(entry1 != nullptr);
3683 
3684   net::TestCompletionCallback doom_callback;
3685   cache_->DoomEntry(kKey, net::HIGHEST, doom_callback.callback());
3686 
3687   TestEntryResultCompletionCallback create_callback;
3688   // Open entry2, with same key. With optimistic ops, this should succeed
3689   // immediately, hence us using cache_->CreateEntry directly rather than using
3690   // the DiskCacheTestWithCache::CreateEntry wrapper which blocks when needed.
3691   EntryResult result2 =
3692       cache_->CreateEntry(kKey, net::HIGHEST, create_callback.callback());
3693   ASSERT_EQ(net::OK, result2.net_error());
3694   disk_cache::Entry* entry2 = result2.ReleaseEntry();
3695   ASSERT_NE(nullptr, entry2);
3696 
3697   // Do some I/O to make sure it's alive.
3698   const int kSize = 2048;
3699   auto buf_1 = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
3700   auto buf_2 = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
3701   CacheTestFillBuffer(buf_1->data(), kSize, false);
3702 
3703   EXPECT_EQ(kSize, WriteData(entry2, /* index = */ 1, /* offset = */ 0,
3704                              buf_1.get(), kSize, /* truncate = */ false));
3705   EXPECT_EQ(kSize, ReadData(entry2, /* index = */ 1, /* offset = */ 0,
3706                             buf_2.get(), kSize));
3707 
3708   doom_callback.WaitForResult();
3709 
3710   entry1->Close();
3711   entry2->Close();
3712 }
3713 
TEST_F(DiskCacheEntryTest,SimpleCacheDoomCreateOptimisticMassDoom)3714 TEST_F(DiskCacheEntryTest, SimpleCacheDoomCreateOptimisticMassDoom) {
3715   // Test that shows that a certain DCHECK in mass doom code had to be removed
3716   // once optimistic doom -> create was added.
3717   SetSimpleCacheMode();
3718   InitCache();
3719   const char kKey[] = "the key";
3720 
3721   // Create entry and initiate its Doom.
3722   disk_cache::Entry* entry1 = nullptr;
3723   ASSERT_THAT(CreateEntry(kKey, &entry1), IsOk());
3724   ASSERT_TRUE(entry1 != nullptr);
3725 
3726   net::TestCompletionCallback doom_callback;
3727   cache_->DoomEntry(kKey, net::HIGHEST, doom_callback.callback());
3728 
3729   TestEntryResultCompletionCallback create_callback;
3730   // Open entry2, with same key. With optimistic ops, this should succeed
3731   // immediately, hence us using cache_->CreateEntry directly rather than using
3732   // the DiskCacheTestWithCache::CreateEntry wrapper which blocks when needed.
3733   EntryResult result =
3734       cache_->CreateEntry(kKey, net::HIGHEST, create_callback.callback());
3735   ASSERT_EQ(net::OK, result.net_error());
3736   disk_cache::Entry* entry2 = result.ReleaseEntry();
3737   ASSERT_NE(nullptr, entry2);
3738 
3739   net::TestCompletionCallback doomall_callback;
3740 
3741   // This is what had code that had a no-longer valid DCHECK.
3742   cache_->DoomAllEntries(doomall_callback.callback());
3743 
3744   doom_callback.WaitForResult();
3745   doomall_callback.WaitForResult();
3746 
3747   entry1->Close();
3748   entry2->Close();
3749 }
3750 
TEST_F(DiskCacheEntryTest,SimpleCacheDoomOpenOptimistic)3751 TEST_F(DiskCacheEntryTest, SimpleCacheDoomOpenOptimistic) {
3752   // Test that we optimize the doom -> optimize sequence when optimistic ops
3753   // are on.
3754   SetSimpleCacheMode();
3755   InitCache();
3756   const char kKey[] = "the key";
3757 
3758   // Create entry and initiate its Doom.
3759   disk_cache::Entry* entry1 = nullptr;
3760   ASSERT_THAT(CreateEntry(kKey, &entry1), IsOk());
3761   ASSERT_TRUE(entry1 != nullptr);
3762   entry1->Close();
3763 
3764   net::TestCompletionCallback doom_callback;
3765   cache_->DoomEntry(kKey, net::HIGHEST, doom_callback.callback());
3766 
3767   // Try to open entry. This should detect a miss immediately, since it's
3768   // the only thing after a doom.
3769 
3770   EntryResult result2 =
3771       cache_->OpenEntry(kKey, net::HIGHEST, EntryResultCallback());
3772   EXPECT_EQ(net::ERR_FAILED, result2.net_error());
3773   EXPECT_EQ(nullptr, result2.ReleaseEntry());
3774   doom_callback.WaitForResult();
3775 }
3776 
TEST_F(DiskCacheEntryTest,SimpleCacheDoomDoom)3777 TEST_F(DiskCacheEntryTest, SimpleCacheDoomDoom) {
3778   // Test sequence:
3779   // Create, Doom, Create, Doom (1st entry), Open.
3780   SetSimpleCacheMode();
3781   InitCache();
3782   disk_cache::Entry* null = nullptr;
3783 
3784   const char key[] = "the first key";
3785 
3786   disk_cache::Entry* entry1 = nullptr;
3787   ASSERT_THAT(CreateEntry(key, &entry1), IsOk());
3788   ScopedEntryPtr entry1_closer(entry1);
3789   EXPECT_NE(null, entry1);
3790 
3791   EXPECT_THAT(DoomEntry(key), IsOk());
3792 
3793   disk_cache::Entry* entry2 = nullptr;
3794   ASSERT_THAT(CreateEntry(key, &entry2), IsOk());
3795   ScopedEntryPtr entry2_closer(entry2);
3796   EXPECT_NE(null, entry2);
3797 
3798   // Redundantly dooming entry1 should not delete entry2.
3799   disk_cache::SimpleEntryImpl* simple_entry1 =
3800       static_cast<disk_cache::SimpleEntryImpl*>(entry1);
3801   net::TestCompletionCallback cb;
3802   EXPECT_EQ(net::OK,
3803             cb.GetResult(simple_entry1->DoomEntry(cb.callback())));
3804 
3805   disk_cache::Entry* entry3 = nullptr;
3806   ASSERT_THAT(OpenEntry(key, &entry3), IsOk());
3807   ScopedEntryPtr entry3_closer(entry3);
3808   EXPECT_NE(null, entry3);
3809 }
3810 
TEST_F(DiskCacheEntryTest,SimpleCacheDoomCreateDoom)3811 TEST_F(DiskCacheEntryTest, SimpleCacheDoomCreateDoom) {
3812   // Test sequence:
3813   // Create, Doom, Create, Doom.
3814   SetSimpleCacheMode();
3815   InitCache();
3816 
3817   disk_cache::Entry* null = nullptr;
3818 
3819   const char key[] = "the first key";
3820 
3821   disk_cache::Entry* entry1 = nullptr;
3822   ASSERT_THAT(CreateEntry(key, &entry1), IsOk());
3823   ScopedEntryPtr entry1_closer(entry1);
3824   EXPECT_NE(null, entry1);
3825 
3826   entry1->Doom();
3827 
3828   disk_cache::Entry* entry2 = nullptr;
3829   ASSERT_THAT(CreateEntry(key, &entry2), IsOk());
3830   ScopedEntryPtr entry2_closer(entry2);
3831   EXPECT_NE(null, entry2);
3832 
3833   entry2->Doom();
3834 
3835   // This test passes if it doesn't crash.
3836 }
3837 
TEST_F(DiskCacheEntryTest,SimpleCacheDoomCloseCreateCloseOpen)3838 TEST_F(DiskCacheEntryTest, SimpleCacheDoomCloseCreateCloseOpen) {
3839   // Test sequence: Create, Doom, Close, Create, Close, Open.
3840   SetSimpleCacheMode();
3841   InitCache();
3842 
3843   disk_cache::Entry* null = nullptr;
3844 
3845   const char key[] = "this is a key";
3846 
3847   disk_cache::Entry* entry1 = nullptr;
3848   ASSERT_THAT(CreateEntry(key, &entry1), IsOk());
3849   ScopedEntryPtr entry1_closer(entry1);
3850   EXPECT_NE(null, entry1);
3851 
3852   entry1->Doom();
3853   entry1_closer.reset();
3854   entry1 = nullptr;
3855 
3856   disk_cache::Entry* entry2 = nullptr;
3857   ASSERT_THAT(CreateEntry(key, &entry2), IsOk());
3858   ScopedEntryPtr entry2_closer(entry2);
3859   EXPECT_NE(null, entry2);
3860 
3861   entry2_closer.reset();
3862   entry2 = nullptr;
3863 
3864   disk_cache::Entry* entry3 = nullptr;
3865   ASSERT_THAT(OpenEntry(key, &entry3), IsOk());
3866   ScopedEntryPtr entry3_closer(entry3);
3867   EXPECT_NE(null, entry3);
3868 }
3869 
3870 // Checks that an optimistic Create would fail later on a racing Open.
TEST_F(DiskCacheEntryTest,SimpleCacheOptimisticCreateFailsOnOpen)3871 TEST_F(DiskCacheEntryTest, SimpleCacheOptimisticCreateFailsOnOpen) {
3872   SetSimpleCacheMode();
3873   InitCache();
3874 
3875   // Create a corrupt file in place of a future entry. Optimistic create should
3876   // initially succeed, but realize later that creation failed.
3877   const std::string key = "the key";
3878   disk_cache::Entry* entry = nullptr;
3879   disk_cache::Entry* entry2 = nullptr;
3880 
3881   EXPECT_TRUE(disk_cache::simple_util::CreateCorruptFileForTests(
3882       key, cache_path_));
3883   EntryResult result =
3884       cache_->CreateEntry(key, net::HIGHEST, EntryResultCallback());
3885   EXPECT_THAT(result.net_error(), IsOk());
3886   entry = result.ReleaseEntry();
3887   ASSERT_TRUE(entry);
3888   ScopedEntryPtr entry_closer(entry);
3889   ASSERT_NE(net::OK, OpenEntry(key, &entry2));
3890 
3891   // Check that we are not leaking.
3892   EXPECT_TRUE(
3893       static_cast<disk_cache::SimpleEntryImpl*>(entry)->HasOneRef());
3894 
3895   DisableIntegrityCheck();
3896 }
3897 
3898 // Tests that old entries are evicted while new entries remain in the index.
3899 // This test relies on non-mandatory properties of the simple Cache Backend:
3900 // LRU eviction, specific values of high-watermark and low-watermark etc.
3901 // When changing the eviction algorithm, the test will have to be re-engineered.
TEST_F(DiskCacheEntryTest,SimpleCacheEvictOldEntries)3902 TEST_F(DiskCacheEntryTest, SimpleCacheEvictOldEntries) {
3903   const int kMaxSize = 200 * 1024;
3904   const int kWriteSize = kMaxSize / 10;
3905   const int kNumExtraEntries = 12;
3906   SetSimpleCacheMode();
3907   SetMaxSize(kMaxSize);
3908   InitCache();
3909 
3910   std::string key1("the first key");
3911   disk_cache::Entry* entry;
3912   ASSERT_THAT(CreateEntry(key1, &entry), IsOk());
3913   auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(kWriteSize);
3914   CacheTestFillBuffer(buffer->data(), kWriteSize, false);
3915   EXPECT_EQ(kWriteSize,
3916             WriteData(entry, 1, 0, buffer.get(), kWriteSize, false));
3917   entry->Close();
3918   AddDelay();
3919 
3920   std::string key2("the key prefix");
3921   for (int i = 0; i < kNumExtraEntries; i++) {
3922     if (i == kNumExtraEntries - 2) {
3923       // Create a distinct timestamp for the last two entries. These entries
3924       // will be checked for outliving the eviction.
3925       AddDelay();
3926     }
3927     ASSERT_THAT(CreateEntry(key2 + base::NumberToString(i), &entry), IsOk());
3928     ScopedEntryPtr entry_closer(entry);
3929     EXPECT_EQ(kWriteSize,
3930               WriteData(entry, 1, 0, buffer.get(), kWriteSize, false));
3931   }
3932 
3933   // TODO(pasko): Find a way to wait for the eviction task(s) to finish by using
3934   // the internal knowledge about |SimpleBackendImpl|.
3935   ASSERT_NE(net::OK, OpenEntry(key1, &entry))
3936       << "Should have evicted the old entry";
3937   for (int i = 0; i < 2; i++) {
3938     int entry_no = kNumExtraEntries - i - 1;
3939     // Generally there is no guarantee that at this point the backround eviction
3940     // is finished. We are testing the positive case, i.e. when the eviction
3941     // never reaches this entry, should be non-flaky.
3942     ASSERT_EQ(net::OK, OpenEntry(key2 + base::NumberToString(entry_no), &entry))
3943         << "Should not have evicted fresh entry " << entry_no;
3944     entry->Close();
3945   }
3946 }
3947 
3948 // Tests that if a read and a following in-flight truncate are both in progress
3949 // simultaniously that they both can occur successfully. See
3950 // http://crbug.com/239223
TEST_F(DiskCacheEntryTest,SimpleCacheInFlightTruncate)3951 TEST_F(DiskCacheEntryTest, SimpleCacheInFlightTruncate)  {
3952   SetSimpleCacheMode();
3953   InitCache();
3954 
3955   const char key[] = "the first key";
3956 
3957   // We use a very large entry size here to make sure this doesn't hit
3958   // the prefetch path for any concievable setting. Hitting prefetch would
3959   // make us serve the read below from memory entirely on I/O thread, missing
3960   // the point of the test which coverred two concurrent disk ops, with
3961   // portions of work happening on the workpool.
3962   const int kBufferSize = 50000;
3963   auto write_buffer = base::MakeRefCounted<net::IOBufferWithSize>(kBufferSize);
3964   CacheTestFillBuffer(write_buffer->data(), kBufferSize, false);
3965 
3966   disk_cache::Entry* entry = nullptr;
3967   ASSERT_THAT(CreateEntry(key, &entry), IsOk());
3968 
3969   EXPECT_EQ(kBufferSize,
3970             WriteData(entry, 1, 0, write_buffer.get(), kBufferSize, false));
3971   entry->Close();
3972   entry = nullptr;
3973 
3974   ASSERT_THAT(OpenEntry(key, &entry), IsOk());
3975   ScopedEntryPtr entry_closer(entry);
3976 
3977   MessageLoopHelper helper;
3978   int expected = 0;
3979 
3980   // Make a short read.
3981   const int kReadBufferSize = 512;
3982   auto read_buffer =
3983       base::MakeRefCounted<net::IOBufferWithSize>(kReadBufferSize);
3984   CallbackTest read_callback(&helper, false);
3985   EXPECT_EQ(net::ERR_IO_PENDING,
3986             entry->ReadData(1, 0, read_buffer.get(), kReadBufferSize,
3987                             base::BindOnce(&CallbackTest::Run,
3988                                            base::Unretained(&read_callback))));
3989   ++expected;
3990 
3991   // Truncate the entry to the length of that read.
3992   auto truncate_buffer =
3993       base::MakeRefCounted<net::IOBufferWithSize>(kReadBufferSize);
3994   CacheTestFillBuffer(truncate_buffer->data(), kReadBufferSize, false);
3995   CallbackTest truncate_callback(&helper, false);
3996   EXPECT_EQ(
3997       net::ERR_IO_PENDING,
3998       entry->WriteData(1, 0, truncate_buffer.get(), kReadBufferSize,
3999                        base::BindOnce(&CallbackTest::Run,
4000                                       base::Unretained(&truncate_callback)),
4001                        true));
4002   ++expected;
4003 
4004   // Wait for both the read and truncation to finish, and confirm that both
4005   // succeeded.
4006   EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
4007   EXPECT_EQ(kReadBufferSize, read_callback.last_result());
4008   EXPECT_EQ(kReadBufferSize, truncate_callback.last_result());
4009   EXPECT_EQ(0,
4010             memcmp(write_buffer->data(), read_buffer->data(), kReadBufferSize));
4011 }
4012 
4013 // Tests that if a write and a read dependant on it are both in flight
4014 // simultaneiously that they both can complete successfully without erroneous
4015 // early returns. See http://crbug.com/239223
TEST_F(DiskCacheEntryTest,SimpleCacheInFlightRead)4016 TEST_F(DiskCacheEntryTest, SimpleCacheInFlightRead) {
4017   SetSimpleCacheMode();
4018   InitCache();
4019 
4020   const char key[] = "the first key";
4021   EntryResult result =
4022       cache_->CreateEntry(key, net::HIGHEST, EntryResultCallback());
4023   ASSERT_EQ(net::OK, result.net_error());
4024   disk_cache::Entry* entry = result.ReleaseEntry();
4025   ScopedEntryPtr entry_closer(entry);
4026 
4027   const int kBufferSize = 1024;
4028   auto write_buffer = base::MakeRefCounted<net::IOBufferWithSize>(kBufferSize);
4029   CacheTestFillBuffer(write_buffer->data(), kBufferSize, false);
4030 
4031   MessageLoopHelper helper;
4032   int expected = 0;
4033 
4034   CallbackTest write_callback(&helper, false);
4035   EXPECT_EQ(net::ERR_IO_PENDING,
4036             entry->WriteData(1, 0, write_buffer.get(), kBufferSize,
4037                              base::BindOnce(&CallbackTest::Run,
4038                                             base::Unretained(&write_callback)),
4039                              true));
4040   ++expected;
4041 
4042   auto read_buffer = base::MakeRefCounted<net::IOBufferWithSize>(kBufferSize);
4043   CallbackTest read_callback(&helper, false);
4044   EXPECT_EQ(net::ERR_IO_PENDING,
4045             entry->ReadData(1, 0, read_buffer.get(), kBufferSize,
4046                             base::BindOnce(&CallbackTest::Run,
4047                                            base::Unretained(&read_callback))));
4048   ++expected;
4049 
4050   EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
4051   EXPECT_EQ(kBufferSize, write_callback.last_result());
4052   EXPECT_EQ(kBufferSize, read_callback.last_result());
4053   EXPECT_EQ(0, memcmp(write_buffer->data(), read_buffer->data(), kBufferSize));
4054 }
4055 
TEST_F(DiskCacheEntryTest,SimpleCacheOpenCreateRaceWithNoIndex)4056 TEST_F(DiskCacheEntryTest, SimpleCacheOpenCreateRaceWithNoIndex) {
4057   SetSimpleCacheMode();
4058   DisableSimpleCacheWaitForIndex();
4059   DisableIntegrityCheck();
4060   InitCache();
4061 
4062   // Assume the index is not initialized, which is likely, since we are blocking
4063   // the IO thread from executing the index finalization step.
4064   TestEntryResultCompletionCallback cb1;
4065   TestEntryResultCompletionCallback cb2;
4066   EntryResult rv1 = cache_->OpenEntry("key", net::HIGHEST, cb1.callback());
4067   EntryResult rv2 = cache_->CreateEntry("key", net::HIGHEST, cb2.callback());
4068 
4069   rv1 = cb1.GetResult(std::move(rv1));
4070   EXPECT_THAT(rv1.net_error(), IsError(net::ERR_FAILED));
4071   rv2 = cb2.GetResult(std::move(rv2));
4072   ASSERT_THAT(rv2.net_error(), IsOk());
4073   disk_cache::Entry* entry2 = rv2.ReleaseEntry();
4074 
4075   // Try to get an alias for entry2. Open should succeed, and return the same
4076   // pointer.
4077   disk_cache::Entry* entry3 = nullptr;
4078   ASSERT_EQ(net::OK, OpenEntry("key", &entry3));
4079   EXPECT_EQ(entry3, entry2);
4080 
4081   entry2->Close();
4082   entry3->Close();
4083 }
4084 
4085 // Checking one more scenario of overlapped reading of a bad entry.
4086 // Differs from the |SimpleCacheMultipleReadersCheckCRC| only by the order of
4087 // last two reads.
TEST_F(DiskCacheEntryTest,SimpleCacheMultipleReadersCheckCRC2)4088 TEST_F(DiskCacheEntryTest, SimpleCacheMultipleReadersCheckCRC2) {
4089   SetSimpleCacheMode();
4090   InitCache();
4091 
4092   const char key[] = "key";
4093   int size = 50000;
4094   ASSERT_TRUE(SimpleCacheMakeBadChecksumEntry(key, size));
4095 
4096   auto read_buffer1 = base::MakeRefCounted<net::IOBufferWithSize>(size);
4097   auto read_buffer2 = base::MakeRefCounted<net::IOBufferWithSize>(size);
4098 
4099   // Advance the first reader a little.
4100   disk_cache::Entry* entry = nullptr;
4101   ASSERT_THAT(OpenEntry(key, &entry), IsOk());
4102   ScopedEntryPtr entry_closer(entry);
4103   EXPECT_EQ(1, ReadData(entry, 1, 0, read_buffer1.get(), 1));
4104 
4105   // Advance the 2nd reader by the same amount.
4106   disk_cache::Entry* entry2 = nullptr;
4107   EXPECT_THAT(OpenEntry(key, &entry2), IsOk());
4108   ScopedEntryPtr entry2_closer(entry2);
4109   EXPECT_EQ(1, ReadData(entry2, 1, 0, read_buffer2.get(), 1));
4110 
4111   // Continue reading 1st.
4112   EXPECT_GT(0, ReadData(entry, 1, 1, read_buffer1.get(), size));
4113 
4114   // This read should fail as well because we have previous read failures.
4115   EXPECT_GT(0, ReadData(entry2, 1, 1, read_buffer2.get(), 1));
4116   DisableIntegrityCheck();
4117 }
4118 
4119 // Test if we can sequentially read each subset of the data until all the data
4120 // is read, then the CRC is calculated correctly and the reads are successful.
TEST_F(DiskCacheEntryTest,SimpleCacheReadCombineCRC)4121 TEST_F(DiskCacheEntryTest, SimpleCacheReadCombineCRC) {
4122   // Test sequence:
4123   // Create, Write, Read (first half of data), Read (second half of data),
4124   // Close.
4125   SetSimpleCacheMode();
4126   InitCache();
4127   disk_cache::Entry* null = nullptr;
4128   const char key[] = "the first key";
4129 
4130   const int kHalfSize = 200;
4131   const int kSize = 2 * kHalfSize;
4132   auto buffer1 = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
4133   CacheTestFillBuffer(buffer1->data(), kSize, false);
4134   disk_cache::Entry* entry = nullptr;
4135 
4136   ASSERT_THAT(CreateEntry(key, &entry), IsOk());
4137   EXPECT_NE(null, entry);
4138 
4139   EXPECT_EQ(kSize, WriteData(entry, 1, 0, buffer1.get(), kSize, false));
4140   entry->Close();
4141 
4142   disk_cache::Entry* entry2 = nullptr;
4143   ASSERT_THAT(OpenEntry(key, &entry2), IsOk());
4144   EXPECT_EQ(entry, entry2);
4145 
4146   // Read the first half of the data.
4147   int offset = 0;
4148   int buf_len = kHalfSize;
4149   auto buffer1_read1 = base::MakeRefCounted<net::IOBufferWithSize>(buf_len);
4150   EXPECT_EQ(buf_len, ReadData(entry2, 1, offset, buffer1_read1.get(), buf_len));
4151   EXPECT_EQ(0, memcmp(buffer1->data(), buffer1_read1->data(), buf_len));
4152 
4153   // Read the second half of the data.
4154   offset = buf_len;
4155   buf_len = kHalfSize;
4156   auto buffer1_read2 = base::MakeRefCounted<net::IOBufferWithSize>(buf_len);
4157   EXPECT_EQ(buf_len, ReadData(entry2, 1, offset, buffer1_read2.get(), buf_len));
4158   char* buffer1_data = buffer1->data() + offset;
4159   EXPECT_EQ(0, memcmp(buffer1_data, buffer1_read2->data(), buf_len));
4160 
4161   // Check that we are not leaking.
4162   EXPECT_NE(entry, null);
4163   EXPECT_TRUE(
4164       static_cast<disk_cache::SimpleEntryImpl*>(entry)->HasOneRef());
4165   entry->Close();
4166   entry = nullptr;
4167 }
4168 
4169 // Test if we can write the data not in sequence and read correctly. In
4170 // this case the CRC will not be present.
TEST_F(DiskCacheEntryTest,SimpleCacheNonSequentialWrite)4171 TEST_F(DiskCacheEntryTest, SimpleCacheNonSequentialWrite) {
4172   // Test sequence:
4173   // Create, Write (second half of data), Write (first half of data), Read,
4174   // Close.
4175   SetSimpleCacheMode();
4176   InitCache();
4177   disk_cache::Entry* null = nullptr;
4178   const char key[] = "the first key";
4179 
4180   const int kHalfSize = 200;
4181   const int kSize = 2 * kHalfSize;
4182   auto buffer1 = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
4183   auto buffer2 = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
4184   CacheTestFillBuffer(buffer1->data(), kSize, false);
4185   char* buffer1_data = buffer1->data() + kHalfSize;
4186   memcpy(buffer2->data(), buffer1_data, kHalfSize);
4187 
4188   disk_cache::Entry* entry = nullptr;
4189   ASSERT_THAT(CreateEntry(key, &entry), IsOk());
4190   entry->Close();
4191   for (int i = 0; i < disk_cache::kSimpleEntryStreamCount; ++i) {
4192     ASSERT_THAT(OpenEntry(key, &entry), IsOk());
4193     EXPECT_NE(null, entry);
4194 
4195     int offset = kHalfSize;
4196     int buf_len = kHalfSize;
4197 
4198     EXPECT_EQ(buf_len,
4199               WriteData(entry, i, offset, buffer2.get(), buf_len, false));
4200     offset = 0;
4201     buf_len = kHalfSize;
4202     EXPECT_EQ(buf_len,
4203               WriteData(entry, i, offset, buffer1.get(), buf_len, false));
4204     entry->Close();
4205 
4206     ASSERT_THAT(OpenEntry(key, &entry), IsOk());
4207 
4208     auto buffer1_read1 = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
4209     EXPECT_EQ(kSize, ReadData(entry, i, 0, buffer1_read1.get(), kSize));
4210     EXPECT_EQ(0, memcmp(buffer1->data(), buffer1_read1->data(), kSize));
4211     // Check that we are not leaking.
4212     ASSERT_NE(entry, null);
4213     EXPECT_TRUE(static_cast<disk_cache::SimpleEntryImpl*>(entry)->HasOneRef());
4214     entry->Close();
4215   }
4216 }
4217 
4218 // Test that changing stream1 size does not affect stream0 (stream0 and stream1
4219 // are stored in the same file in Simple Cache).
TEST_F(DiskCacheEntryTest,SimpleCacheStream1SizeChanges)4220 TEST_F(DiskCacheEntryTest, SimpleCacheStream1SizeChanges) {
4221   SetSimpleCacheMode();
4222   InitCache();
4223   disk_cache::Entry* entry = nullptr;
4224   const std::string key("the key");
4225   const int kSize = 100;
4226   auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
4227   auto buffer_read = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
4228   CacheTestFillBuffer(buffer->data(), kSize, false);
4229 
4230   ASSERT_THAT(CreateEntry(key, &entry), IsOk());
4231   EXPECT_TRUE(entry);
4232 
4233   // Write something into stream0.
4234   EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
4235   EXPECT_EQ(kSize, ReadData(entry, 0, 0, buffer_read.get(), kSize));
4236   EXPECT_EQ(0, memcmp(buffer->data(), buffer_read->data(), kSize));
4237   entry->Close();
4238 
4239   // Extend stream1.
4240   ASSERT_THAT(OpenEntry(key, &entry), IsOk());
4241   int stream1_size = 100;
4242   EXPECT_EQ(0, WriteData(entry, 1, stream1_size, buffer.get(), 0, false));
4243   EXPECT_EQ(stream1_size, entry->GetDataSize(1));
4244   entry->Close();
4245 
4246   // Check that stream0 data has not been modified and that the EOF record for
4247   // stream 0 contains a crc.
4248   // The entry needs to be reopened before checking the crc: Open will perform
4249   // the synchronization with the previous Close. This ensures the EOF records
4250   // have been written to disk before we attempt to read them independently.
4251   ASSERT_THAT(OpenEntry(key, &entry), IsOk());
4252   base::FilePath entry_file0_path = cache_path_.AppendASCII(
4253       disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, 0));
4254   base::File entry_file0(entry_file0_path,
4255                          base::File::FLAG_READ | base::File::FLAG_OPEN);
4256   ASSERT_TRUE(entry_file0.IsValid());
4257 
4258   int data_size[disk_cache::kSimpleEntryStreamCount] = {kSize, stream1_size, 0};
4259   int sparse_data_size = 0;
4260   disk_cache::SimpleEntryStat entry_stat(
4261       base::Time::Now(), base::Time::Now(), data_size, sparse_data_size);
4262   int eof_offset = entry_stat.GetEOFOffsetInFile(key.size(), 0);
4263   disk_cache::SimpleFileEOF eof_record;
4264   ASSERT_EQ(static_cast<int>(sizeof(eof_record)),
4265             entry_file0.Read(eof_offset, reinterpret_cast<char*>(&eof_record),
4266                              sizeof(eof_record)));
4267   EXPECT_EQ(disk_cache::kSimpleFinalMagicNumber, eof_record.final_magic_number);
4268   EXPECT_TRUE((eof_record.flags & disk_cache::SimpleFileEOF::FLAG_HAS_CRC32) ==
4269               disk_cache::SimpleFileEOF::FLAG_HAS_CRC32);
4270 
4271   buffer_read = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
4272   EXPECT_EQ(kSize, ReadData(entry, 0, 0, buffer_read.get(), kSize));
4273   EXPECT_EQ(0, memcmp(buffer->data(), buffer_read->data(), kSize));
4274 
4275   // Shrink stream1.
4276   stream1_size = 50;
4277   EXPECT_EQ(0, WriteData(entry, 1, stream1_size, buffer.get(), 0, true));
4278   EXPECT_EQ(stream1_size, entry->GetDataSize(1));
4279   entry->Close();
4280 
4281   // Check that stream0 data has not been modified.
4282   buffer_read = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
4283   ASSERT_THAT(OpenEntry(key, &entry), IsOk());
4284   EXPECT_EQ(kSize, ReadData(entry, 0, 0, buffer_read.get(), kSize));
4285   EXPECT_EQ(0, memcmp(buffer->data(), buffer_read->data(), kSize));
4286   entry->Close();
4287   entry = nullptr;
4288 }
4289 
4290 // Test that writing within the range for which the crc has already been
4291 // computed will properly invalidate the computed crc.
TEST_F(DiskCacheEntryTest,SimpleCacheCRCRewrite)4292 TEST_F(DiskCacheEntryTest, SimpleCacheCRCRewrite) {
4293   // Test sequence:
4294   // Create, Write (big data), Write (small data in the middle), Close.
4295   // Open, Read (all), Close.
4296   SetSimpleCacheMode();
4297   InitCache();
4298   disk_cache::Entry* null = nullptr;
4299   const char key[] = "the first key";
4300 
4301   const int kHalfSize = 200;
4302   const int kSize = 2 * kHalfSize;
4303   auto buffer1 = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
4304   auto buffer2 = base::MakeRefCounted<net::IOBufferWithSize>(kHalfSize);
4305   CacheTestFillBuffer(buffer1->data(), kSize, false);
4306   CacheTestFillBuffer(buffer2->data(), kHalfSize, false);
4307 
4308   disk_cache::Entry* entry = nullptr;
4309   ASSERT_THAT(CreateEntry(key, &entry), IsOk());
4310   EXPECT_NE(null, entry);
4311   entry->Close();
4312 
4313   for (int i = 0; i < disk_cache::kSimpleEntryStreamCount; ++i) {
4314     ASSERT_THAT(OpenEntry(key, &entry), IsOk());
4315     int offset = 0;
4316     int buf_len = kSize;
4317 
4318     EXPECT_EQ(buf_len,
4319               WriteData(entry, i, offset, buffer1.get(), buf_len, false));
4320     offset = kHalfSize;
4321     buf_len = kHalfSize;
4322     EXPECT_EQ(buf_len,
4323               WriteData(entry, i, offset, buffer2.get(), buf_len, false));
4324     entry->Close();
4325 
4326     ASSERT_THAT(OpenEntry(key, &entry), IsOk());
4327 
4328     auto buffer1_read1 = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
4329     EXPECT_EQ(kSize, ReadData(entry, i, 0, buffer1_read1.get(), kSize));
4330     EXPECT_EQ(0, memcmp(buffer1->data(), buffer1_read1->data(), kHalfSize));
4331     EXPECT_EQ(
4332         0,
4333         memcmp(buffer2->data(), buffer1_read1->data() + kHalfSize, kHalfSize));
4334 
4335     entry->Close();
4336   }
4337 }
4338 
SimpleCacheThirdStreamFileExists(const char * key)4339 bool DiskCacheEntryTest::SimpleCacheThirdStreamFileExists(const char* key) {
4340   int third_stream_file_index =
4341       disk_cache::simple_util::GetFileIndexFromStreamIndex(2);
4342   base::FilePath third_stream_file_path = cache_path_.AppendASCII(
4343       disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(
4344           key, third_stream_file_index));
4345   return PathExists(third_stream_file_path);
4346 }
4347 
SyncDoomEntry(const char * key)4348 void DiskCacheEntryTest::SyncDoomEntry(const char* key) {
4349   net::TestCompletionCallback callback;
4350   cache_->DoomEntry(key, net::HIGHEST, callback.callback());
4351   callback.WaitForResult();
4352 }
4353 
CreateEntryWithHeaderBodyAndSideData(const std::string & key,int data_size)4354 void DiskCacheEntryTest::CreateEntryWithHeaderBodyAndSideData(
4355     const std::string& key,
4356     int data_size) {
4357   // Use one buffer for simplicity.
4358   auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(data_size);
4359   CacheTestFillBuffer(buffer->data(), data_size, false);
4360 
4361   disk_cache::Entry* entry = nullptr;
4362   ASSERT_THAT(CreateEntry(key, &entry), IsOk());
4363   for (int i = 0; i < disk_cache::kSimpleEntryStreamCount; ++i) {
4364     EXPECT_EQ(data_size, WriteData(entry, i, /* offset */ 0, buffer.get(),
4365                                    data_size, false));
4366   }
4367   entry->Close();
4368 }
4369 
TruncateFileFromEnd(int file_index,const std::string & key,int data_size,int truncate_size)4370 void DiskCacheEntryTest::TruncateFileFromEnd(int file_index,
4371                                              const std::string& key,
4372                                              int data_size,
4373                                              int truncate_size) {
4374   // Remove last eof bytes from cache file.
4375   ASSERT_GT(data_size, truncate_size);
4376   const int64_t new_size =
4377       disk_cache::simple_util::GetFileSizeFromDataSize(key.size(), data_size) -
4378       truncate_size;
4379   const base::FilePath entry_path = cache_path_.AppendASCII(
4380       disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, file_index));
4381   EXPECT_TRUE(TruncatePath(entry_path, new_size));
4382 }
4383 
UseAfterBackendDestruction()4384 void DiskCacheEntryTest::UseAfterBackendDestruction() {
4385   disk_cache::Entry* entry = nullptr;
4386   ASSERT_THAT(CreateEntry("the first key", &entry), IsOk());
4387   ResetCaches();
4388 
4389   const int kSize = 100;
4390   auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
4391   CacheTestFillBuffer(buffer->data(), kSize, false);
4392 
4393   // Do some writes and reads, but don't change the result. We're OK
4394   // with them failing, just not them crashing.
4395   WriteData(entry, 1, 0, buffer.get(), kSize, false);
4396   ReadData(entry, 1, 0, buffer.get(), kSize);
4397   WriteSparseData(entry, 20000, buffer.get(), kSize);
4398 
4399   entry->Close();
4400 }
4401 
CloseSparseAfterBackendDestruction()4402 void DiskCacheEntryTest::CloseSparseAfterBackendDestruction() {
4403   const int kSize = 100;
4404   auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
4405   CacheTestFillBuffer(buffer->data(), kSize, false);
4406 
4407   disk_cache::Entry* entry = nullptr;
4408   ASSERT_THAT(CreateEntry("the first key", &entry), IsOk());
4409   WriteSparseData(entry, 20000, buffer.get(), kSize);
4410 
4411   ResetCaches();
4412 
4413   // This call shouldn't DCHECK or crash.
4414   entry->Close();
4415 }
4416 
4417 // Check that a newly-created entry with no third-stream writes omits the
4418 // third stream file.
TEST_F(DiskCacheEntryTest,SimpleCacheOmittedThirdStream1)4419 TEST_F(DiskCacheEntryTest, SimpleCacheOmittedThirdStream1) {
4420   SetSimpleCacheMode();
4421   InitCache();
4422 
4423   const char key[] = "key";
4424 
4425   disk_cache::Entry* entry;
4426 
4427   // Create entry and close without writing: third stream file should be
4428   // omitted, since the stream is empty.
4429   ASSERT_THAT(CreateEntry(key, &entry), IsOk());
4430   entry->Close();
4431   EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key));
4432 
4433   SyncDoomEntry(key);
4434   EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key));
4435 }
4436 
4437 // Check that a newly-created entry with only a single zero-offset, zero-length
4438 // write omits the third stream file.
TEST_F(DiskCacheEntryTest,SimpleCacheOmittedThirdStream2)4439 TEST_F(DiskCacheEntryTest, SimpleCacheOmittedThirdStream2) {
4440   SetSimpleCacheMode();
4441   InitCache();
4442 
4443   const int kHalfSize = 8;
4444   const int kSize = kHalfSize * 2;
4445   const char key[] = "key";
4446   auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
4447   CacheTestFillBuffer(buffer->data(), kHalfSize, false);
4448 
4449   disk_cache::Entry* entry;
4450 
4451   // Create entry, write empty buffer to third stream, and close: third stream
4452   // should still be omitted, since the entry ignores writes that don't modify
4453   // data or change the length.
4454   ASSERT_THAT(CreateEntry(key, &entry), IsOk());
4455   EXPECT_EQ(0, WriteData(entry, 2, 0, buffer.get(), 0, true));
4456   entry->Close();
4457   EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key));
4458 
4459   SyncDoomEntry(key);
4460   EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key));
4461 }
4462 
4463 // Check that we can read back data written to the third stream.
TEST_F(DiskCacheEntryTest,SimpleCacheOmittedThirdStream3)4464 TEST_F(DiskCacheEntryTest, SimpleCacheOmittedThirdStream3) {
4465   SetSimpleCacheMode();
4466   InitCache();
4467 
4468   const int kHalfSize = 8;
4469   const int kSize = kHalfSize * 2;
4470   const char key[] = "key";
4471   auto buffer1 = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
4472   auto buffer2 = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
4473   CacheTestFillBuffer(buffer1->data(), kHalfSize, false);
4474 
4475   disk_cache::Entry* entry;
4476 
4477   // Create entry, write data to third stream, and close: third stream should
4478   // not be omitted, since it contains data.  Re-open entry and ensure there
4479   // are that many bytes in the third stream.
4480   ASSERT_THAT(CreateEntry(key, &entry), IsOk());
4481   EXPECT_EQ(kHalfSize, WriteData(entry, 2, 0, buffer1.get(), kHalfSize, true));
4482   entry->Close();
4483   EXPECT_TRUE(SimpleCacheThirdStreamFileExists(key));
4484 
4485   ASSERT_THAT(OpenEntry(key, &entry), IsOk());
4486   EXPECT_EQ(kHalfSize, ReadData(entry, 2, 0, buffer2.get(), kSize));
4487   EXPECT_EQ(0, memcmp(buffer1->data(), buffer2->data(), kHalfSize));
4488   entry->Close();
4489   EXPECT_TRUE(SimpleCacheThirdStreamFileExists(key));
4490 
4491   SyncDoomEntry(key);
4492   EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key));
4493 }
4494 
4495 // Check that we remove the third stream file upon opening an entry and finding
4496 // the third stream empty.  (This is the upgrade path for entries written
4497 // before the third stream was optional.)
TEST_F(DiskCacheEntryTest,SimpleCacheOmittedThirdStream4)4498 TEST_F(DiskCacheEntryTest, SimpleCacheOmittedThirdStream4) {
4499   SetSimpleCacheMode();
4500   InitCache();
4501 
4502   const int kHalfSize = 8;
4503   const int kSize = kHalfSize * 2;
4504   const char key[] = "key";
4505   auto buffer1 = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
4506   auto buffer2 = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
4507   CacheTestFillBuffer(buffer1->data(), kHalfSize, false);
4508 
4509   disk_cache::Entry* entry;
4510 
4511   // Create entry, write data to third stream, truncate third stream back to
4512   // empty, and close: third stream will not initially be omitted, since entry
4513   // creates the file when the first significant write comes in, and only
4514   // removes it on open if it is empty.  Reopen, ensure that the file is
4515   // deleted, and that there's no data in the third stream.
4516   ASSERT_THAT(CreateEntry(key, &entry), IsOk());
4517   EXPECT_EQ(kHalfSize, WriteData(entry, 2, 0, buffer1.get(), kHalfSize, true));
4518   EXPECT_EQ(0, WriteData(entry, 2, 0, buffer1.get(), 0, true));
4519   entry->Close();
4520   EXPECT_TRUE(SimpleCacheThirdStreamFileExists(key));
4521 
4522   ASSERT_THAT(OpenEntry(key, &entry), IsOk());
4523   EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key));
4524   EXPECT_EQ(0, ReadData(entry, 2, 0, buffer2.get(), kSize));
4525   entry->Close();
4526   EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key));
4527 
4528   SyncDoomEntry(key);
4529   EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key));
4530 }
4531 
4532 // Check that we don't accidentally create the third stream file once the entry
4533 // has been doomed.
TEST_F(DiskCacheEntryTest,SimpleCacheOmittedThirdStream5)4534 TEST_F(DiskCacheEntryTest, SimpleCacheOmittedThirdStream5) {
4535   SetSimpleCacheMode();
4536   InitCache();
4537 
4538   const int kHalfSize = 8;
4539   const int kSize = kHalfSize * 2;
4540   const char key[] = "key";
4541   auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
4542   CacheTestFillBuffer(buffer->data(), kHalfSize, false);
4543 
4544   disk_cache::Entry* entry;
4545 
4546   // Create entry, doom entry, write data to third stream, and close: third
4547   // stream should not exist.  (Note: We don't care if the write fails, just
4548   // that it doesn't cause the file to be created on disk.)
4549   ASSERT_THAT(CreateEntry(key, &entry), IsOk());
4550   entry->Doom();
4551   WriteData(entry, 2, 0, buffer.get(), kHalfSize, true);
4552   entry->Close();
4553   EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key));
4554 }
4555 
4556 // There could be a race between Doom and an optimistic write.
TEST_F(DiskCacheEntryTest,SimpleCacheDoomOptimisticWritesRace)4557 TEST_F(DiskCacheEntryTest, SimpleCacheDoomOptimisticWritesRace) {
4558   // Test sequence:
4559   // Create, first Write, second Write, Close.
4560   // Open, Close.
4561   SetSimpleCacheMode();
4562   InitCache();
4563   disk_cache::Entry* null = nullptr;
4564   const char key[] = "the first key";
4565 
4566   const int kSize = 200;
4567   auto buffer1 = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
4568   auto buffer2 = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
4569   CacheTestFillBuffer(buffer1->data(), kSize, false);
4570   CacheTestFillBuffer(buffer2->data(), kSize, false);
4571 
4572   // The race only happens on stream 1 and stream 2.
4573   for (int i = 0; i < disk_cache::kSimpleEntryStreamCount; ++i) {
4574     ASSERT_THAT(DoomAllEntries(), IsOk());
4575     disk_cache::Entry* entry = nullptr;
4576 
4577     ASSERT_THAT(CreateEntry(key, &entry), IsOk());
4578     EXPECT_NE(null, entry);
4579     entry->Close();
4580     entry = nullptr;
4581 
4582     ASSERT_THAT(DoomAllEntries(), IsOk());
4583     ASSERT_THAT(CreateEntry(key, &entry), IsOk());
4584     EXPECT_NE(null, entry);
4585 
4586     int offset = 0;
4587     int buf_len = kSize;
4588     // This write should not be optimistic (since create is).
4589     EXPECT_EQ(buf_len,
4590               WriteData(entry, i, offset, buffer1.get(), buf_len, false));
4591 
4592     offset = kSize;
4593     // This write should be optimistic.
4594     EXPECT_EQ(buf_len,
4595               WriteData(entry, i, offset, buffer2.get(), buf_len, false));
4596     entry->Close();
4597 
4598     ASSERT_THAT(OpenEntry(key, &entry), IsOk());
4599     EXPECT_NE(null, entry);
4600 
4601     entry->Close();
4602     entry = nullptr;
4603   }
4604 }
4605 
4606 // Tests for a regression in crbug.com/317138 , in which deleting an already
4607 // doomed entry was removing the active entry from the index.
TEST_F(DiskCacheEntryTest,SimpleCachePreserveActiveEntries)4608 TEST_F(DiskCacheEntryTest, SimpleCachePreserveActiveEntries) {
4609   SetSimpleCacheMode();
4610   InitCache();
4611 
4612   disk_cache::Entry* null = nullptr;
4613 
4614   const char key[] = "this is a key";
4615 
4616   disk_cache::Entry* entry1 = nullptr;
4617   ASSERT_THAT(CreateEntry(key, &entry1), IsOk());
4618   ScopedEntryPtr entry1_closer(entry1);
4619   EXPECT_NE(null, entry1);
4620   entry1->Doom();
4621 
4622   disk_cache::Entry* entry2 = nullptr;
4623   ASSERT_THAT(CreateEntry(key, &entry2), IsOk());
4624   ScopedEntryPtr entry2_closer(entry2);
4625   EXPECT_NE(null, entry2);
4626   entry2_closer.reset();
4627 
4628   // Closing then reopening entry2 insures that entry2 is serialized, and so
4629   // it can be opened from files without error.
4630   entry2 = nullptr;
4631   ASSERT_THAT(OpenEntry(key, &entry2), IsOk());
4632   EXPECT_NE(null, entry2);
4633   entry2_closer.reset(entry2);
4634 
4635   scoped_refptr<disk_cache::SimpleEntryImpl>
4636       entry1_refptr = static_cast<disk_cache::SimpleEntryImpl*>(entry1);
4637 
4638   // If crbug.com/317138 has regressed, this will remove |entry2| from
4639   // the backend's |active_entries_| while |entry2| is still alive and its
4640   // files are still on disk.
4641   entry1_closer.reset();
4642   entry1 = nullptr;
4643 
4644   // Close does not have a callback. However, we need to be sure the close is
4645   // finished before we continue the test. We can take advantage of how the ref
4646   // counting of a SimpleEntryImpl works to fake out a callback: When the
4647   // last Close() call is made to an entry, an IO operation is sent to the
4648   // synchronous entry to close the platform files. This IO operation holds a
4649   // ref pointer to the entry, which expires when the operation is done. So,
4650   // we take a refpointer, and watch the SimpleEntry object until it has only
4651   // one ref; this indicates the IO operation is complete.
4652   while (!entry1_refptr->HasOneRef()) {
4653     base::PlatformThread::YieldCurrentThread();
4654     base::RunLoop().RunUntilIdle();
4655   }
4656   entry1_refptr = nullptr;
4657 
4658   // In the bug case, this new entry ends up being a duplicate object pointing
4659   // at the same underlying files.
4660   disk_cache::Entry* entry3 = nullptr;
4661   EXPECT_THAT(OpenEntry(key, &entry3), IsOk());
4662   ScopedEntryPtr entry3_closer(entry3);
4663   EXPECT_NE(null, entry3);
4664 
4665   // The test passes if these two dooms do not crash.
4666   entry2->Doom();
4667   entry3->Doom();
4668 }
4669 
TEST_F(DiskCacheEntryTest,SimpleCacheBasicSparseIO)4670 TEST_F(DiskCacheEntryTest, SimpleCacheBasicSparseIO) {
4671   SetSimpleCacheMode();
4672   InitCache();
4673   BasicSparseIO();
4674 }
4675 
TEST_F(DiskCacheEntryTest,SimpleCacheHugeSparseIO)4676 TEST_F(DiskCacheEntryTest, SimpleCacheHugeSparseIO) {
4677   SetSimpleCacheMode();
4678   InitCache();
4679   HugeSparseIO();
4680 }
4681 
TEST_F(DiskCacheEntryTest,SimpleCacheGetAvailableRange)4682 TEST_F(DiskCacheEntryTest, SimpleCacheGetAvailableRange) {
4683   SetSimpleCacheMode();
4684   InitCache();
4685   GetAvailableRangeTest();
4686 }
4687 
TEST_F(DiskCacheEntryTest,SimpleCacheUpdateSparseEntry)4688 TEST_F(DiskCacheEntryTest, SimpleCacheUpdateSparseEntry) {
4689   SetSimpleCacheMode();
4690   InitCache();
4691   UpdateSparseEntry();
4692 }
4693 
TEST_F(DiskCacheEntryTest,SimpleCacheDoomSparseEntry)4694 TEST_F(DiskCacheEntryTest, SimpleCacheDoomSparseEntry) {
4695   SetSimpleCacheMode();
4696   InitCache();
4697   DoomSparseEntry();
4698 }
4699 
TEST_F(DiskCacheEntryTest,SimpleCachePartialSparseEntry)4700 TEST_F(DiskCacheEntryTest, SimpleCachePartialSparseEntry) {
4701   SetSimpleCacheMode();
4702   InitCache();
4703   PartialSparseEntry();
4704 }
4705 
TEST_F(DiskCacheEntryTest,SimpleCacheTruncateLargeSparseFile)4706 TEST_F(DiskCacheEntryTest, SimpleCacheTruncateLargeSparseFile) {
4707   const int kSize = 1024;
4708 
4709   SetSimpleCacheMode();
4710   // An entry is allowed sparse data 1/10 the size of the cache, so this size
4711   // allows for one |kSize|-sized range plus overhead, but not two ranges.
4712   SetMaxSize(kSize * 15);
4713   InitCache();
4714 
4715   const char key[] = "key";
4716   disk_cache::Entry* null = nullptr;
4717   disk_cache::Entry* entry;
4718   ASSERT_THAT(CreateEntry(key, &entry), IsOk());
4719   EXPECT_NE(null, entry);
4720 
4721   auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
4722   CacheTestFillBuffer(buffer->data(), kSize, false);
4723   net::TestCompletionCallback callback;
4724   int ret;
4725 
4726   // Verify initial conditions.
4727   ret = entry->ReadSparseData(0, buffer.get(), kSize, callback.callback());
4728   EXPECT_EQ(0, callback.GetResult(ret));
4729 
4730   ret = entry->ReadSparseData(kSize, buffer.get(), kSize, callback.callback());
4731   EXPECT_EQ(0, callback.GetResult(ret));
4732 
4733   // Write a range and make sure it reads back.
4734   ret = entry->WriteSparseData(0, buffer.get(), kSize, callback.callback());
4735   EXPECT_EQ(kSize, callback.GetResult(ret));
4736 
4737   ret = entry->ReadSparseData(0, buffer.get(), kSize, callback.callback());
4738   EXPECT_EQ(kSize, callback.GetResult(ret));
4739 
4740   // Write another range and make sure it reads back.
4741   ret = entry->WriteSparseData(kSize, buffer.get(), kSize, callback.callback());
4742   EXPECT_EQ(kSize, callback.GetResult(ret));
4743 
4744   ret = entry->ReadSparseData(kSize, buffer.get(), kSize, callback.callback());
4745   EXPECT_EQ(kSize, callback.GetResult(ret));
4746 
4747   // Make sure the first range was removed when the second was written.
4748   ret = entry->ReadSparseData(0, buffer.get(), kSize, callback.callback());
4749   EXPECT_EQ(0, callback.GetResult(ret));
4750 
4751   // Close and reopen the entry and make sure the first entry is still absent
4752   // and the second entry is still present.
4753   entry->Close();
4754   ASSERT_THAT(OpenEntry(key, &entry), IsOk());
4755 
4756   ret = entry->ReadSparseData(0, buffer.get(), kSize, callback.callback());
4757   EXPECT_EQ(0, callback.GetResult(ret));
4758 
4759   ret = entry->ReadSparseData(kSize, buffer.get(), kSize, callback.callback());
4760   EXPECT_EQ(kSize, callback.GetResult(ret));
4761 
4762   entry->Close();
4763 }
4764 
TEST_F(DiskCacheEntryTest,SimpleCacheNoBodyEOF)4765 TEST_F(DiskCacheEntryTest, SimpleCacheNoBodyEOF) {
4766   SetSimpleCacheMode();
4767   InitCache();
4768 
4769   const std::string key("the first key");
4770   const int kSize = 1024;
4771   CreateEntryWithHeaderBodyAndSideData(key, kSize);
4772 
4773   disk_cache::Entry* entry = nullptr;
4774   ASSERT_THAT(OpenEntry(key, &entry), IsOk());
4775   entry->Close();
4776 
4777   TruncateFileFromEnd(0 /*header and body file index*/, key, kSize,
4778                       static_cast<int>(sizeof(disk_cache::SimpleFileEOF)));
4779   EXPECT_THAT(OpenEntry(key, &entry), IsError(net::ERR_FAILED));
4780 }
4781 
TEST_F(DiskCacheEntryTest,SimpleCacheNoSideDataEOF)4782 TEST_F(DiskCacheEntryTest, SimpleCacheNoSideDataEOF) {
4783   SetSimpleCacheMode();
4784   InitCache();
4785 
4786   const char key[] = "the first key";
4787   const int kSize = 1024;
4788   CreateEntryWithHeaderBodyAndSideData(key, kSize);
4789 
4790   disk_cache::Entry* entry = nullptr;
4791   ASSERT_THAT(OpenEntry(key, &entry), IsOk());
4792   entry->Close();
4793 
4794   TruncateFileFromEnd(1 /*side data file_index*/, key, kSize,
4795                       static_cast<int>(sizeof(disk_cache::SimpleFileEOF)));
4796   EXPECT_THAT(OpenEntry(key, &entry), IsOk());
4797   // The corrupted stream should have been deleted.
4798   EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key));
4799   // _0 should still exist.
4800   base::FilePath path_0 = cache_path_.AppendASCII(
4801       disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, 0));
4802   EXPECT_TRUE(base::PathExists(path_0));
4803 
4804   auto check_stream_data = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
4805   EXPECT_EQ(kSize, ReadData(entry, 0, 0, check_stream_data.get(), kSize));
4806   EXPECT_EQ(kSize, ReadData(entry, 1, 0, check_stream_data.get(), kSize));
4807   EXPECT_EQ(0, entry->GetDataSize(2));
4808   entry->Close();
4809 }
4810 
TEST_F(DiskCacheEntryTest,SimpleCacheReadWithoutKeySHA256)4811 TEST_F(DiskCacheEntryTest, SimpleCacheReadWithoutKeySHA256) {
4812   // This test runs as APP_CACHE to make operations more synchronous.
4813   SetCacheType(net::APP_CACHE);
4814   SetSimpleCacheMode();
4815   InitCache();
4816   disk_cache::Entry* entry;
4817   std::string key("a key");
4818   ASSERT_THAT(CreateEntry(key, &entry), IsOk());
4819 
4820   const std::string stream_0_data = "data for stream zero";
4821   auto stream_0_iobuffer =
4822       base::MakeRefCounted<net::StringIOBuffer>(stream_0_data);
4823   EXPECT_EQ(static_cast<int>(stream_0_data.size()),
4824             WriteData(entry, 0, 0, stream_0_iobuffer.get(),
4825                       stream_0_data.size(), false));
4826   const std::string stream_1_data = "FOR STREAM ONE, QUITE DIFFERENT THINGS";
4827   auto stream_1_iobuffer =
4828       base::MakeRefCounted<net::StringIOBuffer>(stream_1_data);
4829   EXPECT_EQ(static_cast<int>(stream_1_data.size()),
4830             WriteData(entry, 1, 0, stream_1_iobuffer.get(),
4831                       stream_1_data.size(), false));
4832   entry->Close();
4833 
4834   base::RunLoop().RunUntilIdle();
4835   disk_cache::FlushCacheThreadForTesting();
4836   base::RunLoop().RunUntilIdle();
4837 
4838   EXPECT_TRUE(
4839       disk_cache::simple_util::RemoveKeySHA256FromEntry(key, cache_path_));
4840   ASSERT_THAT(OpenEntry(key, &entry), IsOk());
4841   ScopedEntryPtr entry_closer(entry);
4842 
4843   EXPECT_EQ(static_cast<int>(stream_0_data.size()), entry->GetDataSize(0));
4844   auto check_stream_0_data =
4845       base::MakeRefCounted<net::IOBufferWithSize>(stream_0_data.size());
4846   EXPECT_EQ(
4847       static_cast<int>(stream_0_data.size()),
4848       ReadData(entry, 0, 0, check_stream_0_data.get(), stream_0_data.size()));
4849   EXPECT_EQ(0, stream_0_data.compare(0, std::string::npos,
4850                                      check_stream_0_data->data(),
4851                                      stream_0_data.size()));
4852 
4853   EXPECT_EQ(static_cast<int>(stream_1_data.size()), entry->GetDataSize(1));
4854   auto check_stream_1_data =
4855       base::MakeRefCounted<net::IOBufferWithSize>(stream_1_data.size());
4856   EXPECT_EQ(
4857       static_cast<int>(stream_1_data.size()),
4858       ReadData(entry, 1, 0, check_stream_1_data.get(), stream_1_data.size()));
4859   EXPECT_EQ(0, stream_1_data.compare(0, std::string::npos,
4860                                      check_stream_1_data->data(),
4861                                      stream_1_data.size()));
4862 }
4863 
TEST_F(DiskCacheEntryTest,SimpleCacheDoubleOpenWithoutKeySHA256)4864 TEST_F(DiskCacheEntryTest, SimpleCacheDoubleOpenWithoutKeySHA256) {
4865   // This test runs as APP_CACHE to make operations more synchronous.
4866   SetCacheType(net::APP_CACHE);
4867   SetSimpleCacheMode();
4868   InitCache();
4869   disk_cache::Entry* entry;
4870   std::string key("a key");
4871   ASSERT_THAT(CreateEntry(key, &entry), IsOk());
4872   entry->Close();
4873 
4874   base::RunLoop().RunUntilIdle();
4875   disk_cache::FlushCacheThreadForTesting();
4876   base::RunLoop().RunUntilIdle();
4877 
4878   EXPECT_TRUE(
4879       disk_cache::simple_util::RemoveKeySHA256FromEntry(key, cache_path_));
4880   ASSERT_THAT(OpenEntry(key, &entry), IsOk());
4881   entry->Close();
4882 
4883   base::RunLoop().RunUntilIdle();
4884   disk_cache::FlushCacheThreadForTesting();
4885   base::RunLoop().RunUntilIdle();
4886 
4887   ASSERT_THAT(OpenEntry(key, &entry), IsOk());
4888   entry->Close();
4889 }
4890 
TEST_F(DiskCacheEntryTest,SimpleCacheReadCorruptKeySHA256)4891 TEST_F(DiskCacheEntryTest, SimpleCacheReadCorruptKeySHA256) {
4892   // This test runs as APP_CACHE to make operations more synchronous.
4893   SetCacheType(net::APP_CACHE);
4894   SetSimpleCacheMode();
4895   InitCache();
4896   disk_cache::Entry* entry;
4897   std::string key("a key");
4898   ASSERT_THAT(CreateEntry(key, &entry), IsOk());
4899   entry->Close();
4900 
4901   base::RunLoop().RunUntilIdle();
4902   disk_cache::FlushCacheThreadForTesting();
4903   base::RunLoop().RunUntilIdle();
4904 
4905   EXPECT_TRUE(
4906       disk_cache::simple_util::CorruptKeySHA256FromEntry(key, cache_path_));
4907   EXPECT_NE(net::OK, OpenEntry(key, &entry));
4908 }
4909 
TEST_F(DiskCacheEntryTest,SimpleCacheReadCorruptLength)4910 TEST_F(DiskCacheEntryTest, SimpleCacheReadCorruptLength) {
4911   SetCacheType(net::APP_CACHE);
4912   SetSimpleCacheMode();
4913   InitCache();
4914   disk_cache::Entry* entry;
4915   std::string key("a key");
4916   ASSERT_EQ(net::OK, CreateEntry(key, &entry));
4917   entry->Close();
4918 
4919   base::RunLoop().RunUntilIdle();
4920   disk_cache::FlushCacheThreadForTesting();
4921   base::RunLoop().RunUntilIdle();
4922 
4923   EXPECT_TRUE(
4924       disk_cache::simple_util::CorruptStream0LengthFromEntry(key, cache_path_));
4925   EXPECT_NE(net::OK, OpenEntry(key, &entry));
4926 }
4927 
TEST_F(DiskCacheEntryTest,SimpleCacheCreateRecoverFromRmdir)4928 TEST_F(DiskCacheEntryTest, SimpleCacheCreateRecoverFromRmdir) {
4929   // This test runs as APP_CACHE to make operations more synchronous.
4930   // (in particular we want to see if create succeeded or not, so we don't
4931   //  want an optimistic one).
4932   SetCacheType(net::APP_CACHE);
4933   SetSimpleCacheMode();
4934   InitCache();
4935 
4936   // Pretend someone deleted the cache dir. This shouldn't be too scary in
4937   // the test since cache_path_ is set as:
4938   //   CHECK(temp_dir_.CreateUniqueTempDir());
4939   //   cache_path_ = temp_dir_.GetPath().AppendASCII("cache");
4940   disk_cache::DeleteCache(cache_path_,
4941                           true /* delete the dir, what we really want*/);
4942 
4943   disk_cache::Entry* entry;
4944   std::string key("a key");
4945   ASSERT_THAT(CreateEntry(key, &entry), IsOk());
4946   entry->Close();
4947 }
4948 
TEST_F(DiskCacheEntryTest,SimpleCacheSparseErrorHandling)4949 TEST_F(DiskCacheEntryTest, SimpleCacheSparseErrorHandling) {
4950   // If there is corruption in sparse file, we should delete all the files
4951   // before returning the failure. Further additional sparse operations in
4952   // failure state should fail gracefully.
4953   SetSimpleCacheMode();
4954   InitCache();
4955 
4956   std::string key("a key");
4957 
4958   disk_cache::SimpleFileTracker::EntryFileKey num_key(
4959       disk_cache::simple_util::GetEntryHashKey(key));
4960   base::FilePath path_0 = cache_path_.AppendASCII(
4961       disk_cache::simple_util::GetFilenameFromEntryFileKeyAndFileIndex(num_key,
4962                                                                        0));
4963   base::FilePath path_s = cache_path_.AppendASCII(
4964       disk_cache::simple_util::GetSparseFilenameFromEntryFileKey(num_key));
4965 
4966   disk_cache::Entry* entry = nullptr;
4967   ASSERT_THAT(CreateEntry(key, &entry), IsOk());
4968 
4969   const int kSize = 1024;
4970   auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
4971   CacheTestFillBuffer(buffer->data(), kSize, false);
4972 
4973   EXPECT_EQ(kSize, WriteSparseData(entry, 0, buffer.get(), kSize));
4974   entry->Close();
4975 
4976   disk_cache::FlushCacheThreadForTesting();
4977   EXPECT_TRUE(base::PathExists(path_0));
4978   EXPECT_TRUE(base::PathExists(path_s));
4979 
4980   // Now corrupt the _s file in a way that makes it look OK on open, but not on
4981   // read.
4982   base::File file_s(path_s, base::File::FLAG_OPEN | base::File::FLAG_READ |
4983                                 base::File::FLAG_WRITE);
4984   ASSERT_TRUE(file_s.IsValid());
4985   file_s.SetLength(sizeof(disk_cache::SimpleFileHeader) +
4986                    sizeof(disk_cache::SimpleFileSparseRangeHeader) +
4987                    key.size());
4988   file_s.Close();
4989 
4990   // Re-open, it should still be fine.
4991   ASSERT_THAT(OpenEntry(key, &entry), IsOk());
4992 
4993   // Read should fail though.
4994   EXPECT_EQ(net::ERR_CACHE_READ_FAILURE,
4995             ReadSparseData(entry, 0, buffer.get(), kSize));
4996 
4997   // At the point read returns to us, the files should already been gone.
4998   EXPECT_FALSE(base::PathExists(path_0));
4999   EXPECT_FALSE(base::PathExists(path_s));
5000 
5001   // Re-trying should still fail. Not DCHECK-fail.
5002   EXPECT_EQ(net::ERR_FAILED, ReadSparseData(entry, 0, buffer.get(), kSize));
5003 
5004   // Similarly for other ops.
5005   EXPECT_EQ(net::ERR_FAILED, WriteSparseData(entry, 0, buffer.get(), kSize));
5006   net::TestCompletionCallback cb;
5007 
5008   TestRangeResultCompletionCallback range_cb;
5009   RangeResult result = range_cb.GetResult(
5010       entry->GetAvailableRange(0, 1024, range_cb.callback()));
5011   EXPECT_EQ(net::ERR_FAILED, result.net_error);
5012 
5013   entry->Close();
5014   disk_cache::FlushCacheThreadForTesting();
5015 
5016   // Closing shouldn't resurrect files, either.
5017   EXPECT_FALSE(base::PathExists(path_0));
5018   EXPECT_FALSE(base::PathExists(path_s));
5019 }
5020 
TEST_F(DiskCacheEntryTest,SimpleCacheCreateCollision)5021 TEST_F(DiskCacheEntryTest, SimpleCacheCreateCollision) {
5022   // These two keys collide; this test is that we properly handled creation
5023   // of both.
5024   const char kCollKey1[] =
5025       "\xfb\x4e\x9c\x1d\x66\x71\xf7\x54\xa3\x11\xa0\x7e\x16\xa5\x68\xf6";
5026   const char kCollKey2[] =
5027       "\xbc\x60\x64\x92\xbc\xa0\x5c\x15\x17\x93\x29\x2d\xe4\x21\xbd\x03";
5028 
5029   const int kSize = 256;
5030   auto buffer1 = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
5031   auto buffer2 = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
5032   auto read_buffer = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
5033   CacheTestFillBuffer(buffer1->data(), kSize, false);
5034   CacheTestFillBuffer(buffer2->data(), kSize, false);
5035 
5036   SetSimpleCacheMode();
5037   InitCache();
5038 
5039   disk_cache::Entry* entry1;
5040   ASSERT_THAT(CreateEntry(kCollKey1, &entry1), IsOk());
5041 
5042   disk_cache::Entry* entry2;
5043   ASSERT_THAT(CreateEntry(kCollKey2, &entry2), IsOk());
5044 
5045   // Make sure that entry was actually created and we didn't just succeed
5046   // optimistically. (Oddly I can't seem to hit the sequence of events required
5047   // for the bug that used to be here if I just set this to APP_CACHE).
5048   EXPECT_EQ(kSize, WriteData(entry2, 0, 0, buffer2.get(), kSize, false));
5049 
5050   // entry1 is still usable, though, and distinct (we just won't be able to
5051   // re-open it).
5052   EXPECT_EQ(kSize, WriteData(entry1, 0, 0, buffer1.get(), kSize, false));
5053   EXPECT_EQ(kSize, ReadData(entry1, 0, 0, read_buffer.get(), kSize));
5054   EXPECT_EQ(0, memcmp(buffer1->data(), read_buffer->data(), kSize));
5055 
5056   EXPECT_EQ(kSize, ReadData(entry2, 0, 0, read_buffer.get(), kSize));
5057   EXPECT_EQ(0, memcmp(buffer2->data(), read_buffer->data(), kSize));
5058 
5059   entry1->Close();
5060   entry2->Close();
5061 }
5062 
TEST_F(DiskCacheEntryTest,SimpleCacheConvertToSparseStream2LeftOver)5063 TEST_F(DiskCacheEntryTest, SimpleCacheConvertToSparseStream2LeftOver) {
5064   // Testcase for what happens when we have a sparse stream and a left over
5065   // empty stream 2 file.
5066   const int kSize = 10;
5067   auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
5068   CacheTestFillBuffer(buffer->data(), kSize, false);
5069 
5070   SetSimpleCacheMode();
5071   InitCache();
5072   disk_cache::Entry* entry;
5073   std::string key("a key");
5074   ASSERT_THAT(CreateEntry(key, &entry), IsOk());
5075   // Create an empty stream 2. To do that, we first make a non-empty one, then
5076   // truncate it (since otherwise the write would just get ignored).
5077   EXPECT_EQ(kSize, WriteData(entry, /* stream = */ 2, /* offset = */ 0,
5078                              buffer.get(), kSize, false));
5079   EXPECT_EQ(0, WriteData(entry, /* stream = */ 2, /* offset = */ 0,
5080                          buffer.get(), 0, true));
5081 
5082   EXPECT_EQ(kSize, WriteSparseData(entry, 5, buffer.get(), kSize));
5083   entry->Close();
5084 
5085   // Reopen, and try to get the sparse data back.
5086   ASSERT_THAT(OpenEntry(key, &entry), IsOk());
5087   auto buffer2 = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
5088   EXPECT_EQ(kSize, ReadSparseData(entry, 5, buffer2.get(), kSize));
5089   EXPECT_EQ(0, memcmp(buffer->data(), buffer2->data(), kSize));
5090   entry->Close();
5091 }
5092 
TEST_F(DiskCacheEntryTest,SimpleCacheLazyStream2CreateFailure)5093 TEST_F(DiskCacheEntryTest, SimpleCacheLazyStream2CreateFailure) {
5094   // Testcase for what happens when lazy-creation of stream 2 fails.
5095   const int kSize = 10;
5096   auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
5097   CacheTestFillBuffer(buffer->data(), kSize, false);
5098 
5099   // Synchronous ops, for ease of disk state;
5100   SetCacheType(net::APP_CACHE);
5101   SetSimpleCacheMode();
5102   InitCache();
5103 
5104   const char kKey[] = "a key";
5105   disk_cache::Entry* entry = nullptr;
5106   ASSERT_THAT(CreateEntry(kKey, &entry), IsOk());
5107 
5108   // Create _1 file for stream 2; this should inject a failure when the cache
5109   // tries to create it itself.
5110   base::FilePath entry_file1_path = cache_path_.AppendASCII(
5111       disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(kKey, 1));
5112   base::File entry_file1(entry_file1_path,
5113                          base::File::FLAG_WRITE | base::File::FLAG_CREATE);
5114   ASSERT_TRUE(entry_file1.IsValid());
5115   entry_file1.Close();
5116 
5117   EXPECT_EQ(net::ERR_CACHE_WRITE_FAILURE,
5118             WriteData(entry, /* index = */ 2, /* offset = */ 0, buffer.get(),
5119                       kSize, /* truncate = */ false));
5120   entry->Close();
5121 }
5122 
TEST_F(DiskCacheEntryTest,SimpleCacheChecksumpScrewUp)5123 TEST_F(DiskCacheEntryTest, SimpleCacheChecksumpScrewUp) {
5124   // Test for a bug that occurred during development of  movement of CRC
5125   // computation off I/O thread.
5126   const int kSize = 10;
5127   auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
5128   CacheTestFillBuffer(buffer->data(), kSize, false);
5129 
5130   const int kDoubleSize = kSize * 2;
5131   auto big_buffer = base::MakeRefCounted<net::IOBufferWithSize>(kDoubleSize);
5132   CacheTestFillBuffer(big_buffer->data(), kDoubleSize, false);
5133 
5134   SetSimpleCacheMode();
5135   InitCache();
5136 
5137   const char kKey[] = "a key";
5138   disk_cache::Entry* entry = nullptr;
5139   ASSERT_THAT(CreateEntry(kKey, &entry), IsOk());
5140 
5141   // Write out big_buffer for the double range. Checksum will be set to this.
5142   ASSERT_EQ(kDoubleSize,
5143             WriteData(entry, 1, 0, big_buffer.get(), kDoubleSize, false));
5144 
5145   // Reset remembered position to 0 by writing at an earlier non-zero offset.
5146   ASSERT_EQ(1, WriteData(entry, /* stream = */ 1, /* offset = */ 1,
5147                          big_buffer.get(), /* len = */ 1, false));
5148 
5149   // Now write out the half-range twice. An intermediate revision would
5150   // incorrectly compute checksum as if payload was buffer followed by buffer
5151   // rather than buffer followed by end of big_buffer.
5152   ASSERT_EQ(kSize, WriteData(entry, 1, 0, buffer.get(), kSize, false));
5153   ASSERT_EQ(kSize, WriteData(entry, 1, 0, buffer.get(), kSize, false));
5154   entry->Close();
5155 
5156   ASSERT_THAT(OpenEntry(kKey, &entry), IsOk());
5157   auto buffer2 = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
5158   EXPECT_EQ(kSize, ReadData(entry, 1, 0, buffer2.get(), kSize));
5159   EXPECT_EQ(0, memcmp(buffer->data(), buffer2->data(), kSize));
5160   EXPECT_EQ(kSize, ReadData(entry, 1, kSize, buffer2.get(), kSize));
5161   EXPECT_EQ(0, memcmp(big_buffer->data() + kSize, buffer2->data(), kSize));
5162   entry->Close();
5163 }
5164 
TEST_F(DiskCacheEntryTest,SimpleUseAfterBackendDestruction)5165 TEST_F(DiskCacheEntryTest, SimpleUseAfterBackendDestruction) {
5166   SetSimpleCacheMode();
5167   InitCache();
5168   UseAfterBackendDestruction();
5169 }
5170 
TEST_F(DiskCacheEntryTest,MemoryOnlyUseAfterBackendDestruction)5171 TEST_F(DiskCacheEntryTest, MemoryOnlyUseAfterBackendDestruction) {
5172   // https://crbug.com/741620
5173   SetMemoryOnlyMode();
5174   InitCache();
5175   UseAfterBackendDestruction();
5176 }
5177 
TEST_F(DiskCacheEntryTest,SimpleCloseSparseAfterBackendDestruction)5178 TEST_F(DiskCacheEntryTest, SimpleCloseSparseAfterBackendDestruction) {
5179   SetSimpleCacheMode();
5180   InitCache();
5181   CloseSparseAfterBackendDestruction();
5182 }
5183 
TEST_F(DiskCacheEntryTest,MemoryOnlyCloseSparseAfterBackendDestruction)5184 TEST_F(DiskCacheEntryTest, MemoryOnlyCloseSparseAfterBackendDestruction) {
5185   // https://crbug.com/946434
5186   SetMemoryOnlyMode();
5187   InitCache();
5188   CloseSparseAfterBackendDestruction();
5189 }
5190 
LastUsedTimePersists()5191 void DiskCacheEntryTest::LastUsedTimePersists() {
5192   // Make sure that SetLastUsedTimeForTest persists. When used with SimpleCache,
5193   // this also checks that Entry::GetLastUsed is based on information in index,
5194   // when available, not atime on disk, which can be inaccurate.
5195   const char kKey[] = "a key";
5196   InitCache();
5197 
5198   disk_cache::Entry* entry1 = nullptr;
5199   ASSERT_THAT(CreateEntry(kKey, &entry1), IsOk());
5200   ASSERT_TRUE(nullptr != entry1);
5201   base::Time modified_last_used = entry1->GetLastUsed() - base::Minutes(5);
5202   entry1->SetLastUsedTimeForTest(modified_last_used);
5203   entry1->Close();
5204 
5205   disk_cache::Entry* entry2 = nullptr;
5206   ASSERT_THAT(OpenEntry(kKey, &entry2), IsOk());
5207   ASSERT_TRUE(nullptr != entry2);
5208 
5209   base::TimeDelta diff = modified_last_used - entry2->GetLastUsed();
5210   EXPECT_LT(diff, base::Seconds(2));
5211   EXPECT_GT(diff, -base::Seconds(2));
5212   entry2->Close();
5213 }
5214 
TEST_F(DiskCacheEntryTest,LastUsedTimePersists)5215 TEST_F(DiskCacheEntryTest, LastUsedTimePersists) {
5216   LastUsedTimePersists();
5217 }
5218 
TEST_F(DiskCacheEntryTest,SimpleLastUsedTimePersists)5219 TEST_F(DiskCacheEntryTest, SimpleLastUsedTimePersists) {
5220   SetSimpleCacheMode();
5221   LastUsedTimePersists();
5222 }
5223 
TEST_F(DiskCacheEntryTest,MemoryOnlyLastUsedTimePersists)5224 TEST_F(DiskCacheEntryTest, MemoryOnlyLastUsedTimePersists) {
5225   SetMemoryOnlyMode();
5226   LastUsedTimePersists();
5227 }
5228 
TruncateBackwards()5229 void DiskCacheEntryTest::TruncateBackwards() {
5230   const char kKey[] = "a key";
5231 
5232   disk_cache::Entry* entry = nullptr;
5233   ASSERT_THAT(CreateEntry(kKey, &entry), IsOk());
5234   ASSERT_TRUE(entry != nullptr);
5235 
5236   const int kBigSize = 40 * 1024;
5237   const int kSmallSize = 9727;
5238 
5239   auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(kBigSize);
5240   CacheTestFillBuffer(buffer->data(), kBigSize, false);
5241   auto read_buf = base::MakeRefCounted<net::IOBufferWithSize>(kBigSize);
5242 
5243   ASSERT_EQ(kSmallSize, WriteData(entry, /* index = */ 0,
5244                                   /* offset = */ kBigSize, buffer.get(),
5245                                   /* size = */ kSmallSize,
5246                                   /* truncate = */ false));
5247   memset(read_buf->data(), 0, kBigSize);
5248   ASSERT_EQ(kSmallSize, ReadData(entry, /* index = */ 0,
5249                                  /* offset = */ kBigSize, read_buf.get(),
5250                                  /* size = */ kSmallSize));
5251   EXPECT_EQ(0, memcmp(read_buf->data(), buffer->data(), kSmallSize));
5252 
5253   // A partly overlapping truncate before the previous write.
5254   ASSERT_EQ(kBigSize,
5255             WriteData(entry, /* index = */ 0,
5256                       /* offset = */ 3, buffer.get(), /* size = */ kBigSize,
5257                       /* truncate = */ true));
5258   memset(read_buf->data(), 0, kBigSize);
5259   ASSERT_EQ(kBigSize,
5260             ReadData(entry, /* index = */ 0,
5261                      /* offset = */ 3, read_buf.get(), /* size = */ kBigSize));
5262   EXPECT_EQ(0, memcmp(read_buf->data(), buffer->data(), kBigSize));
5263   EXPECT_EQ(kBigSize + 3, entry->GetDataSize(0));
5264   entry->Close();
5265 }
5266 
TEST_F(DiskCacheEntryTest,TruncateBackwards)5267 TEST_F(DiskCacheEntryTest, TruncateBackwards) {
5268   // https://crbug.com/946539/
5269   InitCache();
5270   TruncateBackwards();
5271 }
5272 
TEST_F(DiskCacheEntryTest,SimpleTruncateBackwards)5273 TEST_F(DiskCacheEntryTest, SimpleTruncateBackwards) {
5274   SetSimpleCacheMode();
5275   InitCache();
5276   TruncateBackwards();
5277 }
5278 
TEST_F(DiskCacheEntryTest,MemoryOnlyTruncateBackwards)5279 TEST_F(DiskCacheEntryTest, MemoryOnlyTruncateBackwards) {
5280   SetMemoryOnlyMode();
5281   InitCache();
5282   TruncateBackwards();
5283 }
5284 
ZeroWriteBackwards()5285 void DiskCacheEntryTest::ZeroWriteBackwards() {
5286   const char kKey[] = "a key";
5287 
5288   disk_cache::Entry* entry = nullptr;
5289   ASSERT_THAT(CreateEntry(kKey, &entry), IsOk());
5290   ASSERT_TRUE(entry != nullptr);
5291 
5292   const int kSize = 1024;
5293   auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
5294   CacheTestFillBuffer(buffer->data(), kSize, false);
5295 
5296   // Offset here needs to be > blockfile's kMaxBlockSize to hit
5297   // https://crbug.com/946538, as writes close to beginning are handled
5298   // specially.
5299   EXPECT_EQ(0, WriteData(entry, /* index = */ 0,
5300                          /* offset = */ 17000, buffer.get(),
5301                          /* size = */ 0, /* truncate = */ true));
5302 
5303   EXPECT_EQ(0, WriteData(entry, /* index = */ 0,
5304                          /* offset = */ 0, buffer.get(),
5305                          /* size = */ 0, /* truncate = */ false));
5306 
5307   EXPECT_EQ(kSize, ReadData(entry, /* index = */ 0,
5308                             /* offset = */ 0, buffer.get(),
5309                             /* size = */ kSize));
5310   for (int i = 0; i < kSize; ++i) {
5311     EXPECT_EQ(0, buffer->data()[i]) << i;
5312   }
5313   entry->Close();
5314 }
5315 
TEST_F(DiskCacheEntryTest,ZeroWriteBackwards)5316 TEST_F(DiskCacheEntryTest, ZeroWriteBackwards) {
5317   // https://crbug.com/946538/
5318   InitCache();
5319   ZeroWriteBackwards();
5320 }
5321 
TEST_F(DiskCacheEntryTest,SimpleZeroWriteBackwards)5322 TEST_F(DiskCacheEntryTest, SimpleZeroWriteBackwards) {
5323   SetSimpleCacheMode();
5324   InitCache();
5325   ZeroWriteBackwards();
5326 }
5327 
TEST_F(DiskCacheEntryTest,MemoryOnlyZeroWriteBackwards)5328 TEST_F(DiskCacheEntryTest, MemoryOnlyZeroWriteBackwards) {
5329   SetMemoryOnlyMode();
5330   InitCache();
5331   ZeroWriteBackwards();
5332 }
5333 
SparseOffset64Bit()5334 void DiskCacheEntryTest::SparseOffset64Bit() {
5335   // Offsets to sparse ops are 64-bit, make sure we keep track of all of them.
5336   // (Or, as at least in case of blockfile, fail things cleanly, as it has a
5337   //  cap on max offset that's much lower).
5338   bool blockfile = !memory_only_ && !simple_cache_mode_;
5339   InitCache();
5340 
5341   const char kKey[] = "a key";
5342 
5343   disk_cache::Entry* entry = nullptr;
5344   ASSERT_THAT(CreateEntry(kKey, &entry), IsOk());
5345   ASSERT_TRUE(entry != nullptr);
5346 
5347   const int kSize = 1024;
5348   // One bit set very high, so intermediate truncations to 32-bit would drop it
5349   // even if they happen after a bunch of shifting right.
5350   const int64_t kOffset = (1ll << 61);
5351 
5352   auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
5353   CacheTestFillBuffer(buffer->data(), kSize, false);
5354 
5355   EXPECT_EQ(blockfile ? net::ERR_CACHE_OPERATION_NOT_SUPPORTED : kSize,
5356             WriteSparseData(entry, kOffset, buffer.get(), kSize));
5357 
5358   int64_t start_out = -1;
5359   EXPECT_EQ(0, GetAvailableRange(entry, /* offset = */ 0, kSize, &start_out));
5360 
5361   start_out = -1;
5362   EXPECT_EQ(blockfile ? 0 : kSize,
5363             GetAvailableRange(entry, kOffset, kSize, &start_out));
5364   EXPECT_EQ(kOffset, start_out);
5365 
5366   entry->Close();
5367 }
5368 
TEST_F(DiskCacheEntryTest,SparseOffset64Bit)5369 TEST_F(DiskCacheEntryTest, SparseOffset64Bit) {
5370   InitCache();
5371   SparseOffset64Bit();
5372 }
5373 
TEST_F(DiskCacheEntryTest,SimpleSparseOffset64Bit)5374 TEST_F(DiskCacheEntryTest, SimpleSparseOffset64Bit) {
5375   SetSimpleCacheMode();
5376   InitCache();
5377   SparseOffset64Bit();
5378 }
5379 
TEST_F(DiskCacheEntryTest,MemoryOnlySparseOffset64Bit)5380 TEST_F(DiskCacheEntryTest, MemoryOnlySparseOffset64Bit) {
5381   // https://crbug.com/946436
5382   SetMemoryOnlyMode();
5383   InitCache();
5384   SparseOffset64Bit();
5385 }
5386 
TEST_F(DiskCacheEntryTest,SimpleCacheCloseResurrection)5387 TEST_F(DiskCacheEntryTest, SimpleCacheCloseResurrection) {
5388   const int kSize = 10;
5389   auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
5390   CacheTestFillBuffer(buffer->data(), kSize, false);
5391 
5392   const char kKey[] = "key";
5393   SetSimpleCacheMode();
5394   InitCache();
5395 
5396   disk_cache::Entry* entry = nullptr;
5397   ASSERT_THAT(CreateEntry(kKey, &entry), IsOk());
5398   ASSERT_TRUE(entry != nullptr);
5399 
5400   // Let optimistic create finish.
5401   base::RunLoop().RunUntilIdle();
5402   disk_cache::FlushCacheThreadForTesting();
5403   base::RunLoop().RunUntilIdle();
5404 
5405   int rv = entry->WriteData(1, 0, buffer.get(), kSize,
5406                             net::CompletionOnceCallback(), false);
5407 
5408   // Write should be optimistic.
5409   ASSERT_EQ(kSize, rv);
5410 
5411   // Since the write is still pending, the open will get queued...
5412   TestEntryResultCompletionCallback cb_open;
5413   EntryResult result2 =
5414       cache_->OpenEntry(kKey, net::HIGHEST, cb_open.callback());
5415   EXPECT_EQ(net::ERR_IO_PENDING, result2.net_error());
5416 
5417   // ... as the open is queued, this Close will temporarily reduce the number
5418   // of external references to 0.  This should not break things.
5419   entry->Close();
5420 
5421   // Wait till open finishes.
5422   result2 = cb_open.GetResult(std::move(result2));
5423   ASSERT_EQ(net::OK, result2.net_error());
5424   disk_cache::Entry* entry2 = result2.ReleaseEntry();
5425   ASSERT_TRUE(entry2 != nullptr);
5426 
5427   // Get first close a chance to finish.
5428   base::RunLoop().RunUntilIdle();
5429   disk_cache::FlushCacheThreadForTesting();
5430   base::RunLoop().RunUntilIdle();
5431 
5432   // Make sure |entry2| is still usable.
5433   auto buffer2 = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
5434   memset(buffer2->data(), 0, kSize);
5435   EXPECT_EQ(kSize, ReadData(entry2, 1, 0, buffer2.get(), kSize));
5436   EXPECT_EQ(0, memcmp(buffer->data(), buffer2->data(), kSize));
5437   entry2->Close();
5438 }
5439 
TEST_F(DiskCacheEntryTest,BlockFileSparsePendingAfterDtor)5440 TEST_F(DiskCacheEntryTest, BlockFileSparsePendingAfterDtor) {
5441   // Test of behavior of ~EntryImpl for sparse entry that runs after backend
5442   // destruction.
5443   //
5444   // Hand-creating the backend for realistic shutdown behavior.
5445   CleanupCacheDir();
5446   CreateBackend(disk_cache::kNone);
5447 
5448   disk_cache::Entry* entry = nullptr;
5449   ASSERT_THAT(CreateEntry("key", &entry), IsOk());
5450   ASSERT_TRUE(entry != nullptr);
5451 
5452   const int kSize = 61184;
5453 
5454   auto buf = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
5455   CacheTestFillBuffer(buf->data(), kSize, false);
5456 
5457   // The write pattern here avoids the second write being handled by the
5458   // buffering layer, making SparseControl have to deal with its asynchrony.
5459   EXPECT_EQ(1, WriteSparseData(entry, 65535, buf.get(), 1));
5460   EXPECT_EQ(net::ERR_IO_PENDING,
5461             entry->WriteSparseData(2560, buf.get(), kSize, base::DoNothing()));
5462   entry->Close();
5463   ResetCaches();
5464 
5465   // Create a new instance as a way of flushing the thread.
5466   InitCache();
5467   FlushQueueForTest();
5468 }
5469 
5470 class DiskCacheSimplePrefetchTest : public DiskCacheEntryTest {
5471  public:
5472   DiskCacheSimplePrefetchTest() = default;
5473 
5474   enum { kEntrySize = 1024 };
5475 
SetUp()5476   void SetUp() override {
5477     payload_ = base::MakeRefCounted<net::IOBufferWithSize>(kEntrySize);
5478     CacheTestFillBuffer(payload_->data(), kEntrySize, false);
5479     DiskCacheEntryTest::SetUp();
5480   }
5481 
SetupFullAndTrailerPrefetch(int full_size,int trailer_speculative_size)5482   void SetupFullAndTrailerPrefetch(int full_size,
5483                                    int trailer_speculative_size) {
5484     std::map<std::string, std::string> params;
5485     params[disk_cache::kSimpleCacheFullPrefetchBytesParam] =
5486         base::NumberToString(full_size);
5487     params[disk_cache::kSimpleCacheTrailerPrefetchSpeculativeBytesParam] =
5488         base::NumberToString(trailer_speculative_size);
5489     scoped_feature_list_.InitAndEnableFeatureWithParameters(
5490         disk_cache::kSimpleCachePrefetchExperiment, params);
5491   }
5492 
SetupFullPrefetch(int size)5493   void SetupFullPrefetch(int size) { SetupFullAndTrailerPrefetch(size, 0); }
5494 
InitCacheAndCreateEntry(const std::string & key)5495   void InitCacheAndCreateEntry(const std::string& key) {
5496     SetSimpleCacheMode();
5497     SetCacheType(SimpleCacheType());
5498     InitCache();
5499 
5500     disk_cache::Entry* entry;
5501     ASSERT_EQ(net::OK, CreateEntry(key, &entry));
5502     // Use stream 1 since that's what new prefetch stuff is about.
5503     ASSERT_EQ(kEntrySize,
5504               WriteData(entry, 1, 0, payload_.get(), kEntrySize, false));
5505     entry->Close();
5506   }
5507 
SimpleCacheType() const5508   virtual net::CacheType SimpleCacheType() const { return net::DISK_CACHE; }
5509 
InitCacheAndCreateEntryWithNoCrc(const std::string & key)5510   void InitCacheAndCreateEntryWithNoCrc(const std::string& key) {
5511     const int kHalfSize = kEntrySize / 2;
5512     const int kRemSize = kEntrySize - kHalfSize;
5513 
5514     SetSimpleCacheMode();
5515     InitCache();
5516 
5517     disk_cache::Entry* entry;
5518     ASSERT_EQ(net::OK, CreateEntry(key, &entry));
5519     // Use stream 1 since that's what new prefetch stuff is about.
5520     ASSERT_EQ(kEntrySize,
5521               WriteData(entry, 1, 0, payload_.get(), kEntrySize, false));
5522 
5523     // Overwrite later part of the buffer, since we can't keep track of
5524     // the checksum in that case.  Do it with identical contents, though,
5525     // so that the only difference between here and InitCacheAndCreateEntry()
5526     // would be whether the result has a checkum or not.
5527     auto second_half = base::MakeRefCounted<net::IOBufferWithSize>(kRemSize);
5528     memcpy(second_half->data(), payload_->data() + kHalfSize, kRemSize);
5529     ASSERT_EQ(kRemSize, WriteData(entry, 1, kHalfSize, second_half.get(),
5530                                   kRemSize, false));
5531     entry->Close();
5532   }
5533 
TryRead(const std::string & key,bool expect_preread_stream1)5534   void TryRead(const std::string& key, bool expect_preread_stream1) {
5535     disk_cache::Entry* entry = nullptr;
5536     ASSERT_THAT(OpenEntry(key, &entry), IsOk());
5537     auto read_buf = base::MakeRefCounted<net::IOBufferWithSize>(kEntrySize);
5538     net::TestCompletionCallback cb;
5539     int rv = entry->ReadData(1, 0, read_buf.get(), kEntrySize, cb.callback());
5540 
5541     // if preload happened, sync reply is expected.
5542     if (expect_preread_stream1)
5543       EXPECT_EQ(kEntrySize, rv);
5544     else
5545       EXPECT_EQ(net::ERR_IO_PENDING, rv);
5546     rv = cb.GetResult(rv);
5547     EXPECT_EQ(kEntrySize, rv);
5548     EXPECT_EQ(0, memcmp(read_buf->data(), payload_->data(), kEntrySize));
5549     entry->Close();
5550   }
5551 
5552  protected:
5553   scoped_refptr<net::IOBuffer> payload_;
5554   base::test::ScopedFeatureList scoped_feature_list_;
5555 };
5556 
TEST_F(DiskCacheSimplePrefetchTest,NoPrefetch)5557 TEST_F(DiskCacheSimplePrefetchTest, NoPrefetch) {
5558   base::HistogramTester histogram_tester;
5559   SetupFullPrefetch(0);
5560 
5561   const char kKey[] = "a key";
5562   InitCacheAndCreateEntry(kKey);
5563   TryRead(kKey, /* expect_preread_stream1 */ false);
5564 
5565   histogram_tester.ExpectUniqueSample("SimpleCache.Http.SyncOpenPrefetchMode",
5566                                       disk_cache::OPEN_PREFETCH_NONE, 1);
5567 }
5568 
TEST_F(DiskCacheSimplePrefetchTest,YesPrefetch)5569 TEST_F(DiskCacheSimplePrefetchTest, YesPrefetch) {
5570   base::HistogramTester histogram_tester;
5571   SetupFullPrefetch(2 * kEntrySize);
5572 
5573   const char kKey[] = "a key";
5574   InitCacheAndCreateEntry(kKey);
5575   TryRead(kKey, /* expect_preread_stream1 */ true);
5576 
5577   histogram_tester.ExpectUniqueSample("SimpleCache.Http.SyncOpenPrefetchMode",
5578                                       disk_cache::OPEN_PREFETCH_FULL, 1);
5579 }
5580 
TEST_F(DiskCacheSimplePrefetchTest,YesPrefetchNoRead)5581 TEST_F(DiskCacheSimplePrefetchTest, YesPrefetchNoRead) {
5582   base::HistogramTester histogram_tester;
5583   SetupFullPrefetch(2 * kEntrySize);
5584 
5585   const char kKey[] = "a key";
5586   InitCacheAndCreateEntry(kKey);
5587 
5588   disk_cache::Entry* entry = nullptr;
5589   ASSERT_THAT(OpenEntry(kKey, &entry), IsOk());
5590   entry->Close();
5591 
5592   histogram_tester.ExpectUniqueSample("SimpleCache.Http.SyncOpenPrefetchMode",
5593                                       disk_cache::OPEN_PREFETCH_FULL, 1);
5594 }
5595 
5596 // This makes sure we detect checksum error on entry that's small enough to be
5597 // prefetched. This is like DiskCacheEntryTest.BadChecksum, but we make sure
5598 // to configure prefetch explicitly.
TEST_F(DiskCacheSimplePrefetchTest,BadChecksumSmall)5599 TEST_F(DiskCacheSimplePrefetchTest, BadChecksumSmall) {
5600   SetupFullPrefetch(1024);  // bigger than stuff below.
5601   SetSimpleCacheMode();
5602   InitCache();
5603 
5604   const char key[] = "the first key";
5605   ASSERT_TRUE(SimpleCacheMakeBadChecksumEntry(key, 10));
5606 
5607   disk_cache::Entry* entry = nullptr;
5608 
5609   // Open the entry. Since we made a small entry, we will detect the CRC
5610   // problem at open.
5611   EXPECT_THAT(OpenEntry(key, &entry), IsError(net::ERR_FAILED));
5612 }
5613 
TEST_F(DiskCacheSimplePrefetchTest,ChecksumNoPrefetch)5614 TEST_F(DiskCacheSimplePrefetchTest, ChecksumNoPrefetch) {
5615   base::HistogramTester histogram_tester;
5616 
5617   SetupFullPrefetch(0);
5618   const char kKey[] = "a key";
5619   InitCacheAndCreateEntry(kKey);
5620   TryRead(kKey, /* expect_preread_stream1 */ false);
5621 
5622   histogram_tester.ExpectUniqueSample("SimpleCache.Http.SyncCheckEOFResult",
5623                                       disk_cache::CHECK_EOF_RESULT_SUCCESS, 2);
5624 }
5625 
TEST_F(DiskCacheSimplePrefetchTest,NoChecksumNoPrefetch)5626 TEST_F(DiskCacheSimplePrefetchTest, NoChecksumNoPrefetch) {
5627   base::HistogramTester histogram_tester;
5628 
5629   SetupFullPrefetch(0);
5630   const char kKey[] = "a key";
5631   InitCacheAndCreateEntryWithNoCrc(kKey);
5632   TryRead(kKey, /* expect_preread_stream1 */ false);
5633 
5634   histogram_tester.ExpectUniqueSample("SimpleCache.Http.SyncCheckEOFResult",
5635                                       disk_cache::CHECK_EOF_RESULT_SUCCESS, 2);
5636 }
5637 
TEST_F(DiskCacheSimplePrefetchTest,ChecksumPrefetch)5638 TEST_F(DiskCacheSimplePrefetchTest, ChecksumPrefetch) {
5639   base::HistogramTester histogram_tester;
5640 
5641   SetupFullPrefetch(2 * kEntrySize);
5642   const char kKey[] = "a key";
5643   InitCacheAndCreateEntry(kKey);
5644   TryRead(kKey, /* expect_preread_stream1 */ true);
5645 
5646   histogram_tester.ExpectUniqueSample("SimpleCache.Http.SyncCheckEOFResult",
5647                                       disk_cache::CHECK_EOF_RESULT_SUCCESS, 2);
5648 }
5649 
TEST_F(DiskCacheSimplePrefetchTest,NoChecksumPrefetch)5650 TEST_F(DiskCacheSimplePrefetchTest, NoChecksumPrefetch) {
5651   base::HistogramTester histogram_tester;
5652 
5653   SetupFullPrefetch(2 * kEntrySize);
5654   const char kKey[] = "a key";
5655   InitCacheAndCreateEntryWithNoCrc(kKey);
5656   TryRead(kKey, /* expect_preread_stream1 */ true);
5657 
5658   // EOF check is recorded even if there is no CRC there.
5659   histogram_tester.ExpectUniqueSample("SimpleCache.Http.SyncCheckEOFResult",
5660                                       disk_cache::CHECK_EOF_RESULT_SUCCESS, 2);
5661 }
5662 
TEST_F(DiskCacheSimplePrefetchTest,PrefetchReadsSync)5663 TEST_F(DiskCacheSimplePrefetchTest, PrefetchReadsSync) {
5664   // Make sure we can read things synchronously after prefetch.
5665   SetupFullPrefetch(32768);  // way bigger than kEntrySize
5666   const char kKey[] = "a key";
5667   InitCacheAndCreateEntry(kKey);
5668 
5669   disk_cache::Entry* entry = nullptr;
5670   ASSERT_THAT(OpenEntry(kKey, &entry), IsOk());
5671   auto read_buf = base::MakeRefCounted<net::IOBufferWithSize>(kEntrySize);
5672 
5673   // That this is entry->ReadData(...) rather than ReadData(entry, ...) is
5674   // meaningful here, as the latter is a helper in the test fixture that blocks
5675   // if needed.
5676   EXPECT_EQ(kEntrySize, entry->ReadData(1, 0, read_buf.get(), kEntrySize,
5677                                         net::CompletionOnceCallback()));
5678   EXPECT_EQ(0, memcmp(read_buf->data(), payload_->data(), kEntrySize));
5679   entry->Close();
5680 }
5681 
TEST_F(DiskCacheSimplePrefetchTest,NoFullNoSpeculative)5682 TEST_F(DiskCacheSimplePrefetchTest, NoFullNoSpeculative) {
5683   base::HistogramTester histogram_tester;
5684   SetupFullAndTrailerPrefetch(0, 0);
5685 
5686   const char kKey[] = "a key";
5687   InitCacheAndCreateEntry(kKey);
5688   TryRead(kKey, /* expect_preread_stream1 */ false);
5689 
5690   histogram_tester.ExpectUniqueSample("SimpleCache.Http.SyncOpenPrefetchMode",
5691                                       disk_cache::OPEN_PREFETCH_NONE, 1);
5692 }
5693 
TEST_F(DiskCacheSimplePrefetchTest,NoFullSmallSpeculative)5694 TEST_F(DiskCacheSimplePrefetchTest, NoFullSmallSpeculative) {
5695   base::HistogramTester histogram_tester;
5696   SetupFullAndTrailerPrefetch(0, kEntrySize / 2);
5697 
5698   const char kKey[] = "a key";
5699   InitCacheAndCreateEntry(kKey);
5700   TryRead(kKey, /* expect_preread_stream1 */ false);
5701 
5702   histogram_tester.ExpectUniqueSample("SimpleCache.Http.SyncOpenPrefetchMode",
5703                                       disk_cache::OPEN_PREFETCH_TRAILER, 1);
5704 }
5705 
TEST_F(DiskCacheSimplePrefetchTest,NoFullLargeSpeculative)5706 TEST_F(DiskCacheSimplePrefetchTest, NoFullLargeSpeculative) {
5707   base::HistogramTester histogram_tester;
5708   // A large speculative trailer prefetch that exceeds the entry file
5709   // size should effectively trigger full prefetch behavior.
5710   SetupFullAndTrailerPrefetch(0, kEntrySize * 2);
5711 
5712   const char kKey[] = "a key";
5713   InitCacheAndCreateEntry(kKey);
5714   TryRead(kKey, /* expect_preread_stream1 */ true);
5715 
5716   histogram_tester.ExpectUniqueSample("SimpleCache.Http.SyncOpenPrefetchMode",
5717                                       disk_cache::OPEN_PREFETCH_FULL, 1);
5718 }
5719 
TEST_F(DiskCacheSimplePrefetchTest,SmallFullNoSpeculative)5720 TEST_F(DiskCacheSimplePrefetchTest, SmallFullNoSpeculative) {
5721   base::HistogramTester histogram_tester;
5722   SetupFullAndTrailerPrefetch(kEntrySize / 2, 0);
5723 
5724   const char kKey[] = "a key";
5725   InitCacheAndCreateEntry(kKey);
5726   TryRead(kKey, /* expect_preread_stream1 */ false);
5727 
5728   histogram_tester.ExpectUniqueSample("SimpleCache.Http.SyncOpenPrefetchMode",
5729                                       disk_cache::OPEN_PREFETCH_NONE, 1);
5730 }
5731 
TEST_F(DiskCacheSimplePrefetchTest,LargeFullNoSpeculative)5732 TEST_F(DiskCacheSimplePrefetchTest, LargeFullNoSpeculative) {
5733   base::HistogramTester histogram_tester;
5734   SetupFullAndTrailerPrefetch(kEntrySize * 2, 0);
5735 
5736   const char kKey[] = "a key";
5737   InitCacheAndCreateEntry(kKey);
5738   TryRead(kKey, /* expect_preread_stream1 */ true);
5739 
5740   histogram_tester.ExpectUniqueSample("SimpleCache.Http.SyncOpenPrefetchMode",
5741                                       disk_cache::OPEN_PREFETCH_FULL, 1);
5742 }
5743 
TEST_F(DiskCacheSimplePrefetchTest,SmallFullSmallSpeculative)5744 TEST_F(DiskCacheSimplePrefetchTest, SmallFullSmallSpeculative) {
5745   base::HistogramTester histogram_tester;
5746   SetupFullAndTrailerPrefetch(kEntrySize / 2, kEntrySize / 2);
5747 
5748   const char kKey[] = "a key";
5749   InitCacheAndCreateEntry(kKey);
5750   TryRead(kKey, /* expect_preread_stream1 */ false);
5751 
5752   histogram_tester.ExpectUniqueSample("SimpleCache.Http.SyncOpenPrefetchMode",
5753                                       disk_cache::OPEN_PREFETCH_TRAILER, 1);
5754 }
5755 
TEST_F(DiskCacheSimplePrefetchTest,LargeFullSmallSpeculative)5756 TEST_F(DiskCacheSimplePrefetchTest, LargeFullSmallSpeculative) {
5757   base::HistogramTester histogram_tester;
5758   // Full prefetch takes precedence over a trailer speculative prefetch.
5759   SetupFullAndTrailerPrefetch(kEntrySize * 2, kEntrySize / 2);
5760 
5761   const char kKey[] = "a key";
5762   InitCacheAndCreateEntry(kKey);
5763   TryRead(kKey, /* expect_preread_stream1 */ true);
5764 
5765   histogram_tester.ExpectUniqueSample("SimpleCache.Http.SyncOpenPrefetchMode",
5766                                       disk_cache::OPEN_PREFETCH_FULL, 1);
5767 }
5768 
5769 class DiskCacheSimpleAppCachePrefetchTest : public DiskCacheSimplePrefetchTest {
5770  public:
5771   // APP_CACHE mode will enable trailer prefetch hint support.
SimpleCacheType() const5772   net::CacheType SimpleCacheType() const override { return net::APP_CACHE; }
5773 };
5774 
TEST_F(DiskCacheSimpleAppCachePrefetchTest,NoFullNoSpeculative)5775 TEST_F(DiskCacheSimpleAppCachePrefetchTest, NoFullNoSpeculative) {
5776   base::HistogramTester histogram_tester;
5777   SetupFullAndTrailerPrefetch(0, 0);
5778 
5779   const char kKey[] = "a key";
5780   InitCacheAndCreateEntry(kKey);
5781   TryRead(kKey, /* expect_preread_stream1 */ false);
5782 
5783   histogram_tester.ExpectUniqueSample("SimpleCache.App.SyncOpenPrefetchMode",
5784                                       disk_cache::OPEN_PREFETCH_TRAILER, 1);
5785 }
5786 
TEST_F(DiskCacheSimpleAppCachePrefetchTest,NoFullSmallSpeculative)5787 TEST_F(DiskCacheSimpleAppCachePrefetchTest, NoFullSmallSpeculative) {
5788   base::HistogramTester histogram_tester;
5789   SetupFullAndTrailerPrefetch(0, kEntrySize / 2);
5790 
5791   const char kKey[] = "a key";
5792   InitCacheAndCreateEntry(kKey);
5793   TryRead(kKey, /* expect_preread_stream1 */ false);
5794 
5795   histogram_tester.ExpectUniqueSample("SimpleCache.App.SyncOpenPrefetchMode",
5796                                       disk_cache::OPEN_PREFETCH_TRAILER, 1);
5797 }
5798 
TEST_F(DiskCacheSimpleAppCachePrefetchTest,NoFullLargeSpeculative)5799 TEST_F(DiskCacheSimpleAppCachePrefetchTest, NoFullLargeSpeculative) {
5800   base::HistogramTester histogram_tester;
5801   // Even though the speculative trailer prefetch size is larger than the
5802   // file size, the hint should take precedence and still perform a limited
5803   // trailer prefetch.
5804   SetupFullAndTrailerPrefetch(0, kEntrySize * 2);
5805 
5806   const char kKey[] = "a key";
5807   InitCacheAndCreateEntry(kKey);
5808   TryRead(kKey, /* expect_preread_stream1 */ false);
5809 
5810   histogram_tester.ExpectUniqueSample("SimpleCache.App.SyncOpenPrefetchMode",
5811                                       disk_cache::OPEN_PREFETCH_TRAILER, 1);
5812 }
5813 
TEST_F(DiskCacheSimpleAppCachePrefetchTest,SmallFullNoSpeculative)5814 TEST_F(DiskCacheSimpleAppCachePrefetchTest, SmallFullNoSpeculative) {
5815   base::HistogramTester histogram_tester;
5816   SetupFullAndTrailerPrefetch(kEntrySize / 2, 0);
5817 
5818   const char kKey[] = "a key";
5819   InitCacheAndCreateEntry(kKey);
5820   TryRead(kKey, /* expect_preread_stream1 */ false);
5821 
5822   histogram_tester.ExpectUniqueSample("SimpleCache.App.SyncOpenPrefetchMode",
5823                                       disk_cache::OPEN_PREFETCH_TRAILER, 1);
5824 }
5825 
TEST_F(DiskCacheSimpleAppCachePrefetchTest,LargeFullNoSpeculative)5826 TEST_F(DiskCacheSimpleAppCachePrefetchTest, LargeFullNoSpeculative) {
5827   base::HistogramTester histogram_tester;
5828   // Full prefetch takes precedence over a trailer hint prefetch.
5829   SetupFullAndTrailerPrefetch(kEntrySize * 2, 0);
5830 
5831   const char kKey[] = "a key";
5832   InitCacheAndCreateEntry(kKey);
5833   TryRead(kKey, /* expect_preread_stream1 */ true);
5834 
5835   histogram_tester.ExpectUniqueSample("SimpleCache.App.SyncOpenPrefetchMode",
5836                                       disk_cache::OPEN_PREFETCH_FULL, 1);
5837 }
5838 
TEST_F(DiskCacheSimpleAppCachePrefetchTest,SmallFullSmallSpeculative)5839 TEST_F(DiskCacheSimpleAppCachePrefetchTest, SmallFullSmallSpeculative) {
5840   base::HistogramTester histogram_tester;
5841   SetupFullAndTrailerPrefetch(kEntrySize / 2, kEntrySize / 2);
5842 
5843   const char kKey[] = "a key";
5844   InitCacheAndCreateEntry(kKey);
5845   TryRead(kKey, /* expect_preread_stream1 */ false);
5846 
5847   histogram_tester.ExpectUniqueSample("SimpleCache.App.SyncOpenPrefetchMode",
5848                                       disk_cache::OPEN_PREFETCH_TRAILER, 1);
5849 }
5850 
TEST_F(DiskCacheSimpleAppCachePrefetchTest,LargeFullSmallSpeculative)5851 TEST_F(DiskCacheSimpleAppCachePrefetchTest, LargeFullSmallSpeculative) {
5852   base::HistogramTester histogram_tester;
5853   // Full prefetch takes precedence over a trailer speculative prefetch.
5854   SetupFullAndTrailerPrefetch(kEntrySize * 2, kEntrySize / 2);
5855 
5856   const char kKey[] = "a key";
5857   InitCacheAndCreateEntry(kKey);
5858   TryRead(kKey, /* expect_preread_stream1 */ true);
5859 
5860   histogram_tester.ExpectUniqueSample("SimpleCache.App.SyncOpenPrefetchMode",
5861                                       disk_cache::OPEN_PREFETCH_FULL, 1);
5862 }
5863