• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "base/basictypes.h"
6 #include "base/bind.h"
7 #include "base/bind_helpers.h"
8 #include "base/file_util.h"
9 #include "base/strings/string_util.h"
10 #include "base/strings/stringprintf.h"
11 #include "base/threading/platform_thread.h"
12 #include "base/timer/timer.h"
13 #include "net/base/completion_callback.h"
14 #include "net/base/io_buffer.h"
15 #include "net/base/net_errors.h"
16 #include "net/base/test_completion_callback.h"
17 #include "net/disk_cache/backend_impl.h"
18 #include "net/disk_cache/disk_cache_test_base.h"
19 #include "net/disk_cache/disk_cache_test_util.h"
20 #include "net/disk_cache/entry_impl.h"
21 #include "net/disk_cache/mem_entry_impl.h"
22 #include "net/disk_cache/simple/simple_entry_format.h"
23 #include "net/disk_cache/simple/simple_entry_impl.h"
24 #include "net/disk_cache/simple/simple_synchronous_entry.h"
25 #include "net/disk_cache/simple/simple_test_util.h"
26 #include "net/disk_cache/simple/simple_util.h"
27 #include "testing/gtest/include/gtest/gtest.h"
28 
29 using base::Time;
30 using disk_cache::ScopedEntryPtr;
31 
32 // Tests that can run with different types of caches.
33 class DiskCacheEntryTest : public DiskCacheTestWithCache {
34  public:
35   void InternalSyncIOBackground(disk_cache::Entry* entry);
36   void ExternalSyncIOBackground(disk_cache::Entry* entry);
37 
38  protected:
39   void InternalSyncIO();
40   void InternalAsyncIO();
41   void ExternalSyncIO();
42   void ExternalAsyncIO();
43   void ReleaseBuffer();
44   void StreamAccess();
45   void GetKey();
46   void GetTimes();
47   void GrowData();
48   void TruncateData();
49   void ZeroLengthIO();
50   void Buffering();
51   void SizeAtCreate();
52   void SizeChanges();
53   void ReuseEntry(int size);
54   void InvalidData();
55   void ReadWriteDestroyBuffer();
56   void DoomNormalEntry();
57   void DoomEntryNextToOpenEntry();
58   void DoomedEntry();
59   void BasicSparseIO();
60   void HugeSparseIO();
61   void GetAvailableRange();
62   void CouldBeSparse();
63   void UpdateSparseEntry();
64   void DoomSparseEntry();
65   void PartialSparseEntry();
66   bool SimpleCacheMakeBadChecksumEntry(const std::string& key, int* data_size);
67   bool SimpleCacheThirdStreamFileExists(const char* key);
68   void SyncDoomEntry(const char* key);
69 };
70 
71 // This part of the test runs on the background thread.
InternalSyncIOBackground(disk_cache::Entry * entry)72 void DiskCacheEntryTest::InternalSyncIOBackground(disk_cache::Entry* entry) {
73   const int kSize1 = 10;
74   scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
75   CacheTestFillBuffer(buffer1->data(), kSize1, false);
76   EXPECT_EQ(
77       0,
78       entry->ReadData(0, 0, buffer1.get(), kSize1, net::CompletionCallback()));
79   base::strlcpy(buffer1->data(), "the data", kSize1);
80   EXPECT_EQ(10,
81             entry->WriteData(
82                 0, 0, buffer1.get(), kSize1, net::CompletionCallback(), false));
83   memset(buffer1->data(), 0, kSize1);
84   EXPECT_EQ(
85       10,
86       entry->ReadData(0, 0, buffer1.get(), kSize1, net::CompletionCallback()));
87   EXPECT_STREQ("the data", buffer1->data());
88 
89   const int kSize2 = 5000;
90   const int kSize3 = 10000;
91   scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize2));
92   scoped_refptr<net::IOBuffer> buffer3(new net::IOBuffer(kSize3));
93   memset(buffer3->data(), 0, kSize3);
94   CacheTestFillBuffer(buffer2->data(), kSize2, false);
95   base::strlcpy(buffer2->data(), "The really big data goes here", kSize2);
96   EXPECT_EQ(
97       5000,
98       entry->WriteData(
99           1, 1500, buffer2.get(), kSize2, net::CompletionCallback(), false));
100   memset(buffer2->data(), 0, kSize2);
101   EXPECT_EQ(4989,
102             entry->ReadData(
103                 1, 1511, buffer2.get(), kSize2, net::CompletionCallback()));
104   EXPECT_STREQ("big data goes here", buffer2->data());
105   EXPECT_EQ(
106       5000,
107       entry->ReadData(1, 0, buffer2.get(), kSize2, net::CompletionCallback()));
108   EXPECT_EQ(0, memcmp(buffer2->data(), buffer3->data(), 1500));
109   EXPECT_EQ(1500,
110             entry->ReadData(
111                 1, 5000, buffer2.get(), kSize2, net::CompletionCallback()));
112 
113   EXPECT_EQ(0,
114             entry->ReadData(
115                 1, 6500, buffer2.get(), kSize2, net::CompletionCallback()));
116   EXPECT_EQ(
117       6500,
118       entry->ReadData(1, 0, buffer3.get(), kSize3, net::CompletionCallback()));
119   EXPECT_EQ(8192,
120             entry->WriteData(
121                 1, 0, buffer3.get(), 8192, net::CompletionCallback(), false));
122   EXPECT_EQ(
123       8192,
124       entry->ReadData(1, 0, buffer3.get(), kSize3, net::CompletionCallback()));
125   EXPECT_EQ(8192, entry->GetDataSize(1));
126 
127   // We need to delete the memory buffer on this thread.
128   EXPECT_EQ(0, entry->WriteData(
129       0, 0, NULL, 0, net::CompletionCallback(), true));
130   EXPECT_EQ(0, entry->WriteData(
131       1, 0, NULL, 0, net::CompletionCallback(), true));
132 }
133 
134 // We need to support synchronous IO even though it is not a supported operation
135 // from the point of view of the disk cache's public interface, because we use
136 // it internally, not just by a few tests, but as part of the implementation
137 // (see sparse_control.cc, for example).
InternalSyncIO()138 void DiskCacheEntryTest::InternalSyncIO() {
139   disk_cache::Entry* entry = NULL;
140   ASSERT_EQ(net::OK, CreateEntry("the first key", &entry));
141   ASSERT_TRUE(NULL != entry);
142 
143   // The bulk of the test runs from within the callback, on the cache thread.
144   RunTaskForTest(base::Bind(&DiskCacheEntryTest::InternalSyncIOBackground,
145                             base::Unretained(this),
146                             entry));
147 
148 
149   entry->Doom();
150   entry->Close();
151   FlushQueueForTest();
152   EXPECT_EQ(0, cache_->GetEntryCount());
153 }
154 
TEST_F(DiskCacheEntryTest,InternalSyncIO)155 TEST_F(DiskCacheEntryTest, InternalSyncIO) {
156   InitCache();
157   InternalSyncIO();
158 }
159 
TEST_F(DiskCacheEntryTest,MemoryOnlyInternalSyncIO)160 TEST_F(DiskCacheEntryTest, MemoryOnlyInternalSyncIO) {
161   SetMemoryOnlyMode();
162   InitCache();
163   InternalSyncIO();
164 }
165 
InternalAsyncIO()166 void DiskCacheEntryTest::InternalAsyncIO() {
167   disk_cache::Entry* entry = NULL;
168   ASSERT_EQ(net::OK, CreateEntry("the first key", &entry));
169   ASSERT_TRUE(NULL != entry);
170 
171   // Avoid using internal buffers for the test. We have to write something to
172   // the entry and close it so that we flush the internal buffer to disk. After
173   // that, IO operations will be really hitting the disk. We don't care about
174   // the content, so just extending the entry is enough (all extensions zero-
175   // fill any holes).
176   EXPECT_EQ(0, WriteData(entry, 0, 15 * 1024, NULL, 0, false));
177   EXPECT_EQ(0, WriteData(entry, 1, 15 * 1024, NULL, 0, false));
178   entry->Close();
179   ASSERT_EQ(net::OK, OpenEntry("the first key", &entry));
180 
181   MessageLoopHelper helper;
182   // Let's verify that each IO goes to the right callback object.
183   CallbackTest callback1(&helper, false);
184   CallbackTest callback2(&helper, false);
185   CallbackTest callback3(&helper, false);
186   CallbackTest callback4(&helper, false);
187   CallbackTest callback5(&helper, false);
188   CallbackTest callback6(&helper, false);
189   CallbackTest callback7(&helper, false);
190   CallbackTest callback8(&helper, false);
191   CallbackTest callback9(&helper, false);
192   CallbackTest callback10(&helper, false);
193   CallbackTest callback11(&helper, false);
194   CallbackTest callback12(&helper, false);
195   CallbackTest callback13(&helper, false);
196 
197   const int kSize1 = 10;
198   const int kSize2 = 5000;
199   const int kSize3 = 10000;
200   scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
201   scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize2));
202   scoped_refptr<net::IOBuffer> buffer3(new net::IOBuffer(kSize3));
203   CacheTestFillBuffer(buffer1->data(), kSize1, false);
204   CacheTestFillBuffer(buffer2->data(), kSize2, false);
205   CacheTestFillBuffer(buffer3->data(), kSize3, false);
206 
207   EXPECT_EQ(0,
208             entry->ReadData(
209                 0,
210                 15 * 1024,
211                 buffer1.get(),
212                 kSize1,
213                 base::Bind(&CallbackTest::Run, base::Unretained(&callback1))));
214   base::strlcpy(buffer1->data(), "the data", kSize1);
215   int expected = 0;
216   int ret = entry->WriteData(
217       0,
218       0,
219       buffer1.get(),
220       kSize1,
221       base::Bind(&CallbackTest::Run, base::Unretained(&callback2)),
222       false);
223   EXPECT_TRUE(10 == ret || net::ERR_IO_PENDING == ret);
224   if (net::ERR_IO_PENDING == ret)
225     expected++;
226 
227   EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
228   memset(buffer2->data(), 0, kSize2);
229   ret = entry->ReadData(
230       0,
231       0,
232       buffer2.get(),
233       kSize1,
234       base::Bind(&CallbackTest::Run, base::Unretained(&callback3)));
235   EXPECT_TRUE(10 == ret || net::ERR_IO_PENDING == ret);
236   if (net::ERR_IO_PENDING == ret)
237     expected++;
238 
239   EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
240   EXPECT_STREQ("the data", buffer2->data());
241 
242   base::strlcpy(buffer2->data(), "The really big data goes here", kSize2);
243   ret = entry->WriteData(
244       1,
245       1500,
246       buffer2.get(),
247       kSize2,
248       base::Bind(&CallbackTest::Run, base::Unretained(&callback4)),
249       true);
250   EXPECT_TRUE(5000 == ret || net::ERR_IO_PENDING == ret);
251   if (net::ERR_IO_PENDING == ret)
252     expected++;
253 
254   EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
255   memset(buffer3->data(), 0, kSize3);
256   ret = entry->ReadData(
257       1,
258       1511,
259       buffer3.get(),
260       kSize2,
261       base::Bind(&CallbackTest::Run, base::Unretained(&callback5)));
262   EXPECT_TRUE(4989 == ret || net::ERR_IO_PENDING == ret);
263   if (net::ERR_IO_PENDING == ret)
264     expected++;
265 
266   EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
267   EXPECT_STREQ("big data goes here", buffer3->data());
268   ret = entry->ReadData(
269       1,
270       0,
271       buffer2.get(),
272       kSize2,
273       base::Bind(&CallbackTest::Run, base::Unretained(&callback6)));
274   EXPECT_TRUE(5000 == ret || net::ERR_IO_PENDING == ret);
275   if (net::ERR_IO_PENDING == ret)
276     expected++;
277 
278   memset(buffer3->data(), 0, kSize3);
279 
280   EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
281   EXPECT_EQ(0, memcmp(buffer2->data(), buffer3->data(), 1500));
282   ret = entry->ReadData(
283       1,
284       5000,
285       buffer2.get(),
286       kSize2,
287       base::Bind(&CallbackTest::Run, base::Unretained(&callback7)));
288   EXPECT_TRUE(1500 == ret || net::ERR_IO_PENDING == ret);
289   if (net::ERR_IO_PENDING == ret)
290     expected++;
291 
292   ret = entry->ReadData(
293       1,
294       0,
295       buffer3.get(),
296       kSize3,
297       base::Bind(&CallbackTest::Run, base::Unretained(&callback9)));
298   EXPECT_TRUE(6500 == ret || net::ERR_IO_PENDING == ret);
299   if (net::ERR_IO_PENDING == ret)
300     expected++;
301 
302   ret = entry->WriteData(
303       1,
304       0,
305       buffer3.get(),
306       8192,
307       base::Bind(&CallbackTest::Run, base::Unretained(&callback10)),
308       true);
309   EXPECT_TRUE(8192 == ret || net::ERR_IO_PENDING == ret);
310   if (net::ERR_IO_PENDING == ret)
311     expected++;
312 
313   EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
314   ret = entry->ReadData(
315       1,
316       0,
317       buffer3.get(),
318       kSize3,
319       base::Bind(&CallbackTest::Run, base::Unretained(&callback11)));
320   EXPECT_TRUE(8192 == ret || net::ERR_IO_PENDING == ret);
321   if (net::ERR_IO_PENDING == ret)
322     expected++;
323 
324   EXPECT_EQ(8192, entry->GetDataSize(1));
325 
326   ret = entry->ReadData(
327       0,
328       0,
329       buffer1.get(),
330       kSize1,
331       base::Bind(&CallbackTest::Run, base::Unretained(&callback12)));
332   EXPECT_TRUE(10 == ret || net::ERR_IO_PENDING == ret);
333   if (net::ERR_IO_PENDING == ret)
334     expected++;
335 
336   ret = entry->ReadData(
337       1,
338       0,
339       buffer2.get(),
340       kSize2,
341       base::Bind(&CallbackTest::Run, base::Unretained(&callback13)));
342   EXPECT_TRUE(5000 == ret || net::ERR_IO_PENDING == ret);
343   if (net::ERR_IO_PENDING == ret)
344     expected++;
345 
346   EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
347 
348   EXPECT_FALSE(helper.callback_reused_error());
349 
350   entry->Doom();
351   entry->Close();
352   FlushQueueForTest();
353   EXPECT_EQ(0, cache_->GetEntryCount());
354 }
355 
TEST_F(DiskCacheEntryTest,InternalAsyncIO)356 TEST_F(DiskCacheEntryTest, InternalAsyncIO) {
357   InitCache();
358   InternalAsyncIO();
359 }
360 
TEST_F(DiskCacheEntryTest,MemoryOnlyInternalAsyncIO)361 TEST_F(DiskCacheEntryTest, MemoryOnlyInternalAsyncIO) {
362   SetMemoryOnlyMode();
363   InitCache();
364   InternalAsyncIO();
365 }
366 
367 // This part of the test runs on the background thread.
ExternalSyncIOBackground(disk_cache::Entry * entry)368 void DiskCacheEntryTest::ExternalSyncIOBackground(disk_cache::Entry* entry) {
369   const int kSize1 = 17000;
370   const int kSize2 = 25000;
371   scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
372   scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize2));
373   CacheTestFillBuffer(buffer1->data(), kSize1, false);
374   CacheTestFillBuffer(buffer2->data(), kSize2, false);
375   base::strlcpy(buffer1->data(), "the data", kSize1);
376   EXPECT_EQ(17000,
377             entry->WriteData(
378                 0, 0, buffer1.get(), kSize1, net::CompletionCallback(), false));
379   memset(buffer1->data(), 0, kSize1);
380   EXPECT_EQ(
381       17000,
382       entry->ReadData(0, 0, buffer1.get(), kSize1, net::CompletionCallback()));
383   EXPECT_STREQ("the data", buffer1->data());
384 
385   base::strlcpy(buffer2->data(), "The really big data goes here", kSize2);
386   EXPECT_EQ(
387       25000,
388       entry->WriteData(
389           1, 10000, buffer2.get(), kSize2, net::CompletionCallback(), false));
390   memset(buffer2->data(), 0, kSize2);
391   EXPECT_EQ(24989,
392             entry->ReadData(
393                 1, 10011, buffer2.get(), kSize2, net::CompletionCallback()));
394   EXPECT_STREQ("big data goes here", buffer2->data());
395   EXPECT_EQ(
396       25000,
397       entry->ReadData(1, 0, buffer2.get(), kSize2, net::CompletionCallback()));
398   EXPECT_EQ(5000,
399             entry->ReadData(
400                 1, 30000, buffer2.get(), kSize2, net::CompletionCallback()));
401 
402   EXPECT_EQ(0,
403             entry->ReadData(
404                 1, 35000, buffer2.get(), kSize2, net::CompletionCallback()));
405   EXPECT_EQ(
406       17000,
407       entry->ReadData(1, 0, buffer1.get(), kSize1, net::CompletionCallback()));
408   EXPECT_EQ(
409       17000,
410       entry->WriteData(
411           1, 20000, buffer1.get(), kSize1, net::CompletionCallback(), false));
412   EXPECT_EQ(37000, entry->GetDataSize(1));
413 
414   // We need to delete the memory buffer on this thread.
415   EXPECT_EQ(0, entry->WriteData(
416       0, 0, NULL, 0, net::CompletionCallback(), true));
417   EXPECT_EQ(0, entry->WriteData(
418       1, 0, NULL, 0, net::CompletionCallback(), true));
419 }
420 
ExternalSyncIO()421 void DiskCacheEntryTest::ExternalSyncIO() {
422   disk_cache::Entry* entry;
423   ASSERT_EQ(net::OK, CreateEntry("the first key", &entry));
424 
425   // The bulk of the test runs from within the callback, on the cache thread.
426   RunTaskForTest(base::Bind(&DiskCacheEntryTest::ExternalSyncIOBackground,
427                             base::Unretained(this),
428                             entry));
429 
430   entry->Doom();
431   entry->Close();
432   FlushQueueForTest();
433   EXPECT_EQ(0, cache_->GetEntryCount());
434 }
435 
TEST_F(DiskCacheEntryTest,ExternalSyncIO)436 TEST_F(DiskCacheEntryTest, ExternalSyncIO) {
437   InitCache();
438   ExternalSyncIO();
439 }
440 
TEST_F(DiskCacheEntryTest,ExternalSyncIONoBuffer)441 TEST_F(DiskCacheEntryTest, ExternalSyncIONoBuffer) {
442   InitCache();
443   cache_impl_->SetFlags(disk_cache::kNoBuffering);
444   ExternalSyncIO();
445 }
446 
TEST_F(DiskCacheEntryTest,MemoryOnlyExternalSyncIO)447 TEST_F(DiskCacheEntryTest, MemoryOnlyExternalSyncIO) {
448   SetMemoryOnlyMode();
449   InitCache();
450   ExternalSyncIO();
451 }
452 
ExternalAsyncIO()453 void DiskCacheEntryTest::ExternalAsyncIO() {
454   disk_cache::Entry* entry;
455   ASSERT_EQ(net::OK, CreateEntry("the first key", &entry));
456 
457   int expected = 0;
458 
459   MessageLoopHelper helper;
460   // Let's verify that each IO goes to the right callback object.
461   CallbackTest callback1(&helper, false);
462   CallbackTest callback2(&helper, false);
463   CallbackTest callback3(&helper, false);
464   CallbackTest callback4(&helper, false);
465   CallbackTest callback5(&helper, false);
466   CallbackTest callback6(&helper, false);
467   CallbackTest callback7(&helper, false);
468   CallbackTest callback8(&helper, false);
469   CallbackTest callback9(&helper, false);
470 
471   const int kSize1 = 17000;
472   const int kSize2 = 25000;
473   const int kSize3 = 25000;
474   scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
475   scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize2));
476   scoped_refptr<net::IOBuffer> buffer3(new net::IOBuffer(kSize3));
477   CacheTestFillBuffer(buffer1->data(), kSize1, false);
478   CacheTestFillBuffer(buffer2->data(), kSize2, false);
479   CacheTestFillBuffer(buffer3->data(), kSize3, false);
480   base::strlcpy(buffer1->data(), "the data", kSize1);
481   int ret = entry->WriteData(
482       0,
483       0,
484       buffer1.get(),
485       kSize1,
486       base::Bind(&CallbackTest::Run, base::Unretained(&callback1)),
487       false);
488   EXPECT_TRUE(17000 == ret || net::ERR_IO_PENDING == ret);
489   if (net::ERR_IO_PENDING == ret)
490     expected++;
491 
492   EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
493 
494   memset(buffer2->data(), 0, kSize1);
495   ret = entry->ReadData(
496       0,
497       0,
498       buffer2.get(),
499       kSize1,
500       base::Bind(&CallbackTest::Run, base::Unretained(&callback2)));
501   EXPECT_TRUE(17000 == ret || net::ERR_IO_PENDING == ret);
502   if (net::ERR_IO_PENDING == ret)
503     expected++;
504 
505   EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
506   EXPECT_STREQ("the data", buffer2->data());
507 
508   base::strlcpy(buffer2->data(), "The really big data goes here", kSize2);
509   ret = entry->WriteData(
510       1,
511       10000,
512       buffer2.get(),
513       kSize2,
514       base::Bind(&CallbackTest::Run, base::Unretained(&callback3)),
515       false);
516   EXPECT_TRUE(25000 == ret || net::ERR_IO_PENDING == ret);
517   if (net::ERR_IO_PENDING == ret)
518     expected++;
519 
520   EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
521 
522   memset(buffer3->data(), 0, kSize3);
523   ret = entry->ReadData(
524       1,
525       10011,
526       buffer3.get(),
527       kSize3,
528       base::Bind(&CallbackTest::Run, base::Unretained(&callback4)));
529   EXPECT_TRUE(24989 == ret || net::ERR_IO_PENDING == ret);
530   if (net::ERR_IO_PENDING == ret)
531     expected++;
532 
533   EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
534   EXPECT_STREQ("big data goes here", buffer3->data());
535   ret = entry->ReadData(
536       1,
537       0,
538       buffer2.get(),
539       kSize2,
540       base::Bind(&CallbackTest::Run, base::Unretained(&callback5)));
541   EXPECT_TRUE(25000 == ret || net::ERR_IO_PENDING == ret);
542   if (net::ERR_IO_PENDING == ret)
543     expected++;
544 
545   EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
546   memset(buffer3->data(), 0, kSize3);
547   EXPECT_EQ(0, memcmp(buffer2->data(), buffer3->data(), 10000));
548   ret = entry->ReadData(
549       1,
550       30000,
551       buffer2.get(),
552       kSize2,
553       base::Bind(&CallbackTest::Run, base::Unretained(&callback6)));
554   EXPECT_TRUE(5000 == ret || net::ERR_IO_PENDING == ret);
555   if (net::ERR_IO_PENDING == ret)
556     expected++;
557 
558   EXPECT_EQ(0,
559             entry->ReadData(
560                 1,
561                 35000,
562                 buffer2.get(),
563                 kSize2,
564                 base::Bind(&CallbackTest::Run, base::Unretained(&callback7))));
565   ret = entry->ReadData(
566       1,
567       0,
568       buffer1.get(),
569       kSize1,
570       base::Bind(&CallbackTest::Run, base::Unretained(&callback8)));
571   EXPECT_TRUE(17000 == ret || net::ERR_IO_PENDING == ret);
572   if (net::ERR_IO_PENDING == ret)
573     expected++;
574   ret = entry->WriteData(
575       1,
576       20000,
577       buffer3.get(),
578       kSize1,
579       base::Bind(&CallbackTest::Run, base::Unretained(&callback9)),
580       false);
581   EXPECT_TRUE(17000 == ret || net::ERR_IO_PENDING == ret);
582   if (net::ERR_IO_PENDING == ret)
583     expected++;
584 
585   EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
586   EXPECT_EQ(37000, entry->GetDataSize(1));
587 
588   EXPECT_FALSE(helper.callback_reused_error());
589 
590   entry->Doom();
591   entry->Close();
592   FlushQueueForTest();
593   EXPECT_EQ(0, cache_->GetEntryCount());
594 }
595 
TEST_F(DiskCacheEntryTest,ExternalAsyncIO)596 TEST_F(DiskCacheEntryTest, ExternalAsyncIO) {
597   InitCache();
598   ExternalAsyncIO();
599 }
600 
TEST_F(DiskCacheEntryTest,ExternalAsyncIONoBuffer)601 TEST_F(DiskCacheEntryTest, ExternalAsyncIONoBuffer) {
602   InitCache();
603   cache_impl_->SetFlags(disk_cache::kNoBuffering);
604   ExternalAsyncIO();
605 }
606 
TEST_F(DiskCacheEntryTest,MemoryOnlyExternalAsyncIO)607 TEST_F(DiskCacheEntryTest, MemoryOnlyExternalAsyncIO) {
608   SetMemoryOnlyMode();
609   InitCache();
610   ExternalAsyncIO();
611 }
612 
613 // Tests that IOBuffers are not referenced after IO completes.
ReleaseBuffer()614 void DiskCacheEntryTest::ReleaseBuffer() {
615   disk_cache::Entry* entry = NULL;
616   ASSERT_EQ(net::OK, CreateEntry("the first key", &entry));
617   ASSERT_TRUE(NULL != entry);
618 
619   const int kBufferSize = 1024;
620   scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kBufferSize));
621   CacheTestFillBuffer(buffer->data(), kBufferSize, false);
622 
623   net::ReleaseBufferCompletionCallback cb(buffer.get());
624   int rv =
625       entry->WriteData(0, 0, buffer.get(), kBufferSize, cb.callback(), false);
626   EXPECT_EQ(kBufferSize, cb.GetResult(rv));
627   entry->Close();
628 }
629 
TEST_F(DiskCacheEntryTest,ReleaseBuffer)630 TEST_F(DiskCacheEntryTest, ReleaseBuffer) {
631   InitCache();
632   cache_impl_->SetFlags(disk_cache::kNoBuffering);
633   ReleaseBuffer();
634 }
635 
TEST_F(DiskCacheEntryTest,MemoryOnlyReleaseBuffer)636 TEST_F(DiskCacheEntryTest, MemoryOnlyReleaseBuffer) {
637   SetMemoryOnlyMode();
638   InitCache();
639   ReleaseBuffer();
640 }
641 
StreamAccess()642 void DiskCacheEntryTest::StreamAccess() {
643   disk_cache::Entry* entry = NULL;
644   ASSERT_EQ(net::OK, CreateEntry("the first key", &entry));
645   ASSERT_TRUE(NULL != entry);
646 
647   const int kBufferSize = 1024;
648   const int kNumStreams = 3;
649   scoped_refptr<net::IOBuffer> reference_buffers[kNumStreams];
650   for (int i = 0; i < kNumStreams; i++) {
651     reference_buffers[i] = new net::IOBuffer(kBufferSize);
652     CacheTestFillBuffer(reference_buffers[i]->data(), kBufferSize, false);
653   }
654   scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kBufferSize));
655   for (int i = 0; i < kNumStreams; i++) {
656     EXPECT_EQ(
657         kBufferSize,
658         WriteData(entry, i, 0, reference_buffers[i].get(), kBufferSize, false));
659     memset(buffer1->data(), 0, kBufferSize);
660     EXPECT_EQ(kBufferSize, ReadData(entry, i, 0, buffer1.get(), kBufferSize));
661     EXPECT_EQ(
662         0, memcmp(reference_buffers[i]->data(), buffer1->data(), kBufferSize));
663   }
664   EXPECT_EQ(net::ERR_INVALID_ARGUMENT,
665             ReadData(entry, kNumStreams, 0, buffer1.get(), kBufferSize));
666   entry->Close();
667 
668   // Open the entry and read it in chunks, including a read past the end.
669   ASSERT_EQ(net::OK, OpenEntry("the first key", &entry));
670   ASSERT_TRUE(NULL != entry);
671   const int kReadBufferSize = 600;
672   const int kFinalReadSize = kBufferSize - kReadBufferSize;
673   COMPILE_ASSERT(kFinalReadSize < kReadBufferSize, should_be_exactly_two_reads);
674   scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kReadBufferSize));
675   for (int i = 0; i < kNumStreams; i++) {
676     memset(buffer2->data(), 0, kReadBufferSize);
677     EXPECT_EQ(kReadBufferSize,
678               ReadData(entry, i, 0, buffer2.get(), kReadBufferSize));
679     EXPECT_EQ(
680         0,
681         memcmp(reference_buffers[i]->data(), buffer2->data(), kReadBufferSize));
682 
683     memset(buffer2->data(), 0, kReadBufferSize);
684     EXPECT_EQ(
685         kFinalReadSize,
686         ReadData(entry, i, kReadBufferSize, buffer2.get(), kReadBufferSize));
687     EXPECT_EQ(0,
688               memcmp(reference_buffers[i]->data() + kReadBufferSize,
689                      buffer2->data(),
690                      kFinalReadSize));
691   }
692 
693   entry->Close();
694 }
695 
TEST_F(DiskCacheEntryTest,StreamAccess)696 TEST_F(DiskCacheEntryTest, StreamAccess) {
697   InitCache();
698   StreamAccess();
699 }
700 
TEST_F(DiskCacheEntryTest,MemoryOnlyStreamAccess)701 TEST_F(DiskCacheEntryTest, MemoryOnlyStreamAccess) {
702   SetMemoryOnlyMode();
703   InitCache();
704   StreamAccess();
705 }
706 
GetKey()707 void DiskCacheEntryTest::GetKey() {
708   std::string key("the first key");
709   disk_cache::Entry* entry;
710   ASSERT_EQ(net::OK, CreateEntry(key, &entry));
711   EXPECT_EQ(key, entry->GetKey()) << "short key";
712   entry->Close();
713 
714   int seed = static_cast<int>(Time::Now().ToInternalValue());
715   srand(seed);
716   char key_buffer[20000];
717 
718   CacheTestFillBuffer(key_buffer, 3000, true);
719   key_buffer[1000] = '\0';
720 
721   key = key_buffer;
722   ASSERT_EQ(net::OK, CreateEntry(key, &entry));
723   EXPECT_TRUE(key == entry->GetKey()) << "1000 bytes key";
724   entry->Close();
725 
726   key_buffer[1000] = 'p';
727   key_buffer[3000] = '\0';
728   key = key_buffer;
729   ASSERT_EQ(net::OK, CreateEntry(key, &entry));
730   EXPECT_TRUE(key == entry->GetKey()) << "medium size key";
731   entry->Close();
732 
733   CacheTestFillBuffer(key_buffer, sizeof(key_buffer), true);
734   key_buffer[19999] = '\0';
735 
736   key = key_buffer;
737   ASSERT_EQ(net::OK, CreateEntry(key, &entry));
738   EXPECT_TRUE(key == entry->GetKey()) << "long key";
739   entry->Close();
740 
741   CacheTestFillBuffer(key_buffer, 0x4000, true);
742   key_buffer[0x4000] = '\0';
743 
744   key = key_buffer;
745   ASSERT_EQ(net::OK, CreateEntry(key, &entry));
746   EXPECT_TRUE(key == entry->GetKey()) << "16KB key";
747   entry->Close();
748 }
749 
TEST_F(DiskCacheEntryTest,GetKey)750 TEST_F(DiskCacheEntryTest, GetKey) {
751   InitCache();
752   GetKey();
753 }
754 
TEST_F(DiskCacheEntryTest,MemoryOnlyGetKey)755 TEST_F(DiskCacheEntryTest, MemoryOnlyGetKey) {
756   SetMemoryOnlyMode();
757   InitCache();
758   GetKey();
759 }
760 
GetTimes()761 void DiskCacheEntryTest::GetTimes() {
762   std::string key("the first key");
763   disk_cache::Entry* entry;
764 
765   Time t1 = Time::Now();
766   ASSERT_EQ(net::OK, CreateEntry(key, &entry));
767   EXPECT_TRUE(entry->GetLastModified() >= t1);
768   EXPECT_TRUE(entry->GetLastModified() == entry->GetLastUsed());
769 
770   AddDelay();
771   Time t2 = Time::Now();
772   EXPECT_TRUE(t2 > t1);
773   EXPECT_EQ(0, WriteData(entry, 0, 200, NULL, 0, false));
774   if (type_ == net::APP_CACHE) {
775     EXPECT_TRUE(entry->GetLastModified() < t2);
776   } else {
777     EXPECT_TRUE(entry->GetLastModified() >= t2);
778   }
779   EXPECT_TRUE(entry->GetLastModified() == entry->GetLastUsed());
780 
781   AddDelay();
782   Time t3 = Time::Now();
783   EXPECT_TRUE(t3 > t2);
784   const int kSize = 200;
785   scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
786   EXPECT_EQ(kSize, ReadData(entry, 0, 0, buffer.get(), kSize));
787   if (type_ == net::APP_CACHE) {
788     EXPECT_TRUE(entry->GetLastUsed() < t2);
789     EXPECT_TRUE(entry->GetLastModified() < t2);
790   } else if (type_ == net::SHADER_CACHE) {
791     EXPECT_TRUE(entry->GetLastUsed() < t3);
792     EXPECT_TRUE(entry->GetLastModified() < t3);
793   } else {
794     EXPECT_TRUE(entry->GetLastUsed() >= t3);
795     EXPECT_TRUE(entry->GetLastModified() < t3);
796   }
797   entry->Close();
798 }
799 
TEST_F(DiskCacheEntryTest,GetTimes)800 TEST_F(DiskCacheEntryTest, GetTimes) {
801   InitCache();
802   GetTimes();
803 }
804 
TEST_F(DiskCacheEntryTest,MemoryOnlyGetTimes)805 TEST_F(DiskCacheEntryTest, MemoryOnlyGetTimes) {
806   SetMemoryOnlyMode();
807   InitCache();
808   GetTimes();
809 }
810 
TEST_F(DiskCacheEntryTest,AppCacheGetTimes)811 TEST_F(DiskCacheEntryTest, AppCacheGetTimes) {
812   SetCacheType(net::APP_CACHE);
813   InitCache();
814   GetTimes();
815 }
816 
TEST_F(DiskCacheEntryTest,ShaderCacheGetTimes)817 TEST_F(DiskCacheEntryTest, ShaderCacheGetTimes) {
818   SetCacheType(net::SHADER_CACHE);
819   InitCache();
820   GetTimes();
821 }
822 
GrowData()823 void DiskCacheEntryTest::GrowData() {
824   std::string key1("the first key");
825   disk_cache::Entry* entry;
826   ASSERT_EQ(net::OK, CreateEntry(key1, &entry));
827 
828   const int kSize = 20000;
829   scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
830   scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize));
831   CacheTestFillBuffer(buffer1->data(), kSize, false);
832   memset(buffer2->data(), 0, kSize);
833 
834   base::strlcpy(buffer1->data(), "the data", kSize);
835   EXPECT_EQ(10, WriteData(entry, 0, 0, buffer1.get(), 10, false));
836   EXPECT_EQ(10, ReadData(entry, 0, 0, buffer2.get(), 10));
837   EXPECT_STREQ("the data", buffer2->data());
838   EXPECT_EQ(10, entry->GetDataSize(0));
839 
840   EXPECT_EQ(2000, WriteData(entry, 0, 0, buffer1.get(), 2000, false));
841   EXPECT_EQ(2000, entry->GetDataSize(0));
842   EXPECT_EQ(2000, ReadData(entry, 0, 0, buffer2.get(), 2000));
843   EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), 2000));
844 
845   EXPECT_EQ(20000, WriteData(entry, 0, 0, buffer1.get(), kSize, false));
846   EXPECT_EQ(20000, entry->GetDataSize(0));
847   EXPECT_EQ(20000, ReadData(entry, 0, 0, buffer2.get(), kSize));
848   EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), kSize));
849   entry->Close();
850 
851   memset(buffer2->data(), 0, kSize);
852   std::string key2("Second key");
853   ASSERT_EQ(net::OK, CreateEntry(key2, &entry));
854   EXPECT_EQ(10, WriteData(entry, 0, 0, buffer1.get(), 10, false));
855   EXPECT_EQ(10, entry->GetDataSize(0));
856   entry->Close();
857 
858   // Go from an internal address to a bigger block size.
859   ASSERT_EQ(net::OK, OpenEntry(key2, &entry));
860   EXPECT_EQ(2000, WriteData(entry, 0, 0, buffer1.get(), 2000, false));
861   EXPECT_EQ(2000, entry->GetDataSize(0));
862   EXPECT_EQ(2000, ReadData(entry, 0, 0, buffer2.get(), 2000));
863   EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), 2000));
864   entry->Close();
865   memset(buffer2->data(), 0, kSize);
866 
867   // Go from an internal address to an external one.
868   ASSERT_EQ(net::OK, OpenEntry(key2, &entry));
869   EXPECT_EQ(20000, WriteData(entry, 0, 0, buffer1.get(), kSize, false));
870   EXPECT_EQ(20000, entry->GetDataSize(0));
871   EXPECT_EQ(20000, ReadData(entry, 0, 0, buffer2.get(), kSize));
872   EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), kSize));
873   entry->Close();
874 
875   // Double check the size from disk.
876   ASSERT_EQ(net::OK, OpenEntry(key2, &entry));
877   EXPECT_EQ(20000, entry->GetDataSize(0));
878 
879   // Now extend the entry without actual data.
880   EXPECT_EQ(0, WriteData(entry, 0, 45500, buffer1.get(), 0, false));
881   entry->Close();
882 
883   // And check again from disk.
884   ASSERT_EQ(net::OK, OpenEntry(key2, &entry));
885   EXPECT_EQ(45500, entry->GetDataSize(0));
886   entry->Close();
887 }
888 
TEST_F(DiskCacheEntryTest,GrowData)889 TEST_F(DiskCacheEntryTest, GrowData) {
890   InitCache();
891   GrowData();
892 }
893 
TEST_F(DiskCacheEntryTest,GrowDataNoBuffer)894 TEST_F(DiskCacheEntryTest, GrowDataNoBuffer) {
895   InitCache();
896   cache_impl_->SetFlags(disk_cache::kNoBuffering);
897   GrowData();
898 }
899 
TEST_F(DiskCacheEntryTest,MemoryOnlyGrowData)900 TEST_F(DiskCacheEntryTest, MemoryOnlyGrowData) {
901   SetMemoryOnlyMode();
902   InitCache();
903   GrowData();
904 }
905 
TruncateData()906 void DiskCacheEntryTest::TruncateData() {
907   std::string key("the first key");
908   disk_cache::Entry* entry;
909   ASSERT_EQ(net::OK, CreateEntry(key, &entry));
910 
911   const int kSize1 = 20000;
912   const int kSize2 = 20000;
913   scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
914   scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize2));
915 
916   CacheTestFillBuffer(buffer1->data(), kSize1, false);
917   memset(buffer2->data(), 0, kSize2);
918 
919   // Simple truncation:
920   EXPECT_EQ(200, WriteData(entry, 0, 0, buffer1.get(), 200, false));
921   EXPECT_EQ(200, entry->GetDataSize(0));
922   EXPECT_EQ(100, WriteData(entry, 0, 0, buffer1.get(), 100, false));
923   EXPECT_EQ(200, entry->GetDataSize(0));
924   EXPECT_EQ(100, WriteData(entry, 0, 0, buffer1.get(), 100, true));
925   EXPECT_EQ(100, entry->GetDataSize(0));
926   EXPECT_EQ(0, WriteData(entry, 0, 50, buffer1.get(), 0, true));
927   EXPECT_EQ(50, entry->GetDataSize(0));
928   EXPECT_EQ(0, WriteData(entry, 0, 0, buffer1.get(), 0, true));
929   EXPECT_EQ(0, entry->GetDataSize(0));
930   entry->Close();
931   ASSERT_EQ(net::OK, OpenEntry(key, &entry));
932 
933   // Go to an external file.
934   EXPECT_EQ(20000, WriteData(entry, 0, 0, buffer1.get(), 20000, true));
935   EXPECT_EQ(20000, entry->GetDataSize(0));
936   EXPECT_EQ(20000, ReadData(entry, 0, 0, buffer2.get(), 20000));
937   EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), 20000));
938   memset(buffer2->data(), 0, kSize2);
939 
940   // External file truncation
941   EXPECT_EQ(18000, WriteData(entry, 0, 0, buffer1.get(), 18000, false));
942   EXPECT_EQ(20000, entry->GetDataSize(0));
943   EXPECT_EQ(18000, WriteData(entry, 0, 0, buffer1.get(), 18000, true));
944   EXPECT_EQ(18000, entry->GetDataSize(0));
945   EXPECT_EQ(0, WriteData(entry, 0, 17500, buffer1.get(), 0, true));
946   EXPECT_EQ(17500, entry->GetDataSize(0));
947 
948   // And back to an internal block.
949   EXPECT_EQ(600, WriteData(entry, 0, 1000, buffer1.get(), 600, true));
950   EXPECT_EQ(1600, entry->GetDataSize(0));
951   EXPECT_EQ(600, ReadData(entry, 0, 1000, buffer2.get(), 600));
952   EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), 600));
953   EXPECT_EQ(1000, ReadData(entry, 0, 0, buffer2.get(), 1000));
954   EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), 1000))
955       << "Preserves previous data";
956 
957   // Go from external file to zero length.
958   EXPECT_EQ(20000, WriteData(entry, 0, 0, buffer1.get(), 20000, true));
959   EXPECT_EQ(20000, entry->GetDataSize(0));
960   EXPECT_EQ(0, WriteData(entry, 0, 0, buffer1.get(), 0, true));
961   EXPECT_EQ(0, entry->GetDataSize(0));
962 
963   entry->Close();
964 }
965 
TEST_F(DiskCacheEntryTest,TruncateData)966 TEST_F(DiskCacheEntryTest, TruncateData) {
967   InitCache();
968   TruncateData();
969 }
970 
TEST_F(DiskCacheEntryTest,TruncateDataNoBuffer)971 TEST_F(DiskCacheEntryTest, TruncateDataNoBuffer) {
972   InitCache();
973   cache_impl_->SetFlags(disk_cache::kNoBuffering);
974   TruncateData();
975 }
976 
TEST_F(DiskCacheEntryTest,MemoryOnlyTruncateData)977 TEST_F(DiskCacheEntryTest, MemoryOnlyTruncateData) {
978   SetMemoryOnlyMode();
979   InitCache();
980   TruncateData();
981 }
982 
ZeroLengthIO()983 void DiskCacheEntryTest::ZeroLengthIO() {
984   std::string key("the first key");
985   disk_cache::Entry* entry;
986   ASSERT_EQ(net::OK, CreateEntry(key, &entry));
987 
988   EXPECT_EQ(0, ReadData(entry, 0, 0, NULL, 0));
989   EXPECT_EQ(0, WriteData(entry, 0, 0, NULL, 0, false));
990 
991   // This write should extend the entry.
992   EXPECT_EQ(0, WriteData(entry, 0, 1000, NULL, 0, false));
993   EXPECT_EQ(0, ReadData(entry, 0, 500, NULL, 0));
994   EXPECT_EQ(0, ReadData(entry, 0, 2000, NULL, 0));
995   EXPECT_EQ(1000, entry->GetDataSize(0));
996 
997   EXPECT_EQ(0, WriteData(entry, 0, 100000, NULL, 0, true));
998   EXPECT_EQ(0, ReadData(entry, 0, 50000, NULL, 0));
999   EXPECT_EQ(100000, entry->GetDataSize(0));
1000 
1001   // Let's verify the actual content.
1002   const int kSize = 20;
1003   const char zeros[kSize] = {};
1004   scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
1005 
1006   CacheTestFillBuffer(buffer->data(), kSize, false);
1007   EXPECT_EQ(kSize, ReadData(entry, 0, 500, buffer.get(), kSize));
1008   EXPECT_TRUE(!memcmp(buffer->data(), zeros, kSize));
1009 
1010   CacheTestFillBuffer(buffer->data(), kSize, false);
1011   EXPECT_EQ(kSize, ReadData(entry, 0, 5000, buffer.get(), kSize));
1012   EXPECT_TRUE(!memcmp(buffer->data(), zeros, kSize));
1013 
1014   CacheTestFillBuffer(buffer->data(), kSize, false);
1015   EXPECT_EQ(kSize, ReadData(entry, 0, 50000, buffer.get(), kSize));
1016   EXPECT_TRUE(!memcmp(buffer->data(), zeros, kSize));
1017 
1018   entry->Close();
1019 }
1020 
TEST_F(DiskCacheEntryTest,ZeroLengthIO)1021 TEST_F(DiskCacheEntryTest, ZeroLengthIO) {
1022   InitCache();
1023   ZeroLengthIO();
1024 }
1025 
TEST_F(DiskCacheEntryTest,ZeroLengthIONoBuffer)1026 TEST_F(DiskCacheEntryTest, ZeroLengthIONoBuffer) {
1027   InitCache();
1028   cache_impl_->SetFlags(disk_cache::kNoBuffering);
1029   ZeroLengthIO();
1030 }
1031 
TEST_F(DiskCacheEntryTest,MemoryOnlyZeroLengthIO)1032 TEST_F(DiskCacheEntryTest, MemoryOnlyZeroLengthIO) {
1033   SetMemoryOnlyMode();
1034   InitCache();
1035   ZeroLengthIO();
1036 }
1037 
1038 // Tests that we handle the content correctly when buffering, a feature of the
1039 // standard cache that permits fast responses to certain reads.
Buffering()1040 void DiskCacheEntryTest::Buffering() {
1041   std::string key("the first key");
1042   disk_cache::Entry* entry;
1043   ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1044 
1045   const int kSize = 200;
1046   scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
1047   scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize));
1048   CacheTestFillBuffer(buffer1->data(), kSize, true);
1049   CacheTestFillBuffer(buffer2->data(), kSize, true);
1050 
1051   EXPECT_EQ(kSize, WriteData(entry, 1, 0, buffer1.get(), kSize, false));
1052   entry->Close();
1053 
1054   // Write a little more and read what we wrote before.
1055   ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1056   EXPECT_EQ(kSize, WriteData(entry, 1, 5000, buffer1.get(), kSize, false));
1057   EXPECT_EQ(kSize, ReadData(entry, 1, 0, buffer2.get(), kSize));
1058   EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data(), kSize));
1059 
1060   // Now go to an external file.
1061   EXPECT_EQ(kSize, WriteData(entry, 1, 18000, buffer1.get(), kSize, false));
1062   entry->Close();
1063 
1064   // Write something else and verify old data.
1065   ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1066   EXPECT_EQ(kSize, WriteData(entry, 1, 10000, buffer1.get(), kSize, false));
1067   CacheTestFillBuffer(buffer2->data(), kSize, true);
1068   EXPECT_EQ(kSize, ReadData(entry, 1, 5000, buffer2.get(), kSize));
1069   EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data(), kSize));
1070   CacheTestFillBuffer(buffer2->data(), kSize, true);
1071   EXPECT_EQ(kSize, ReadData(entry, 1, 0, buffer2.get(), kSize));
1072   EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data(), kSize));
1073   CacheTestFillBuffer(buffer2->data(), kSize, true);
1074   EXPECT_EQ(kSize, ReadData(entry, 1, 18000, buffer2.get(), kSize));
1075   EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data(), kSize));
1076 
1077   // Extend the file some more.
1078   EXPECT_EQ(kSize, WriteData(entry, 1, 23000, buffer1.get(), kSize, false));
1079   entry->Close();
1080 
1081   // And now make sure that we can deal with data in both places (ram/disk).
1082   ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1083   EXPECT_EQ(kSize, WriteData(entry, 1, 17000, buffer1.get(), kSize, false));
1084 
1085   // We should not overwrite the data at 18000 with this.
1086   EXPECT_EQ(kSize, WriteData(entry, 1, 19000, buffer1.get(), kSize, false));
1087   CacheTestFillBuffer(buffer2->data(), kSize, true);
1088   EXPECT_EQ(kSize, ReadData(entry, 1, 18000, buffer2.get(), kSize));
1089   EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data(), kSize));
1090   CacheTestFillBuffer(buffer2->data(), kSize, true);
1091   EXPECT_EQ(kSize, ReadData(entry, 1, 17000, buffer2.get(), kSize));
1092   EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data(), kSize));
1093 
1094   EXPECT_EQ(kSize, WriteData(entry, 1, 22900, buffer1.get(), kSize, false));
1095   CacheTestFillBuffer(buffer2->data(), kSize, true);
1096   EXPECT_EQ(100, ReadData(entry, 1, 23000, buffer2.get(), kSize));
1097   EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data() + 100, 100));
1098 
1099   CacheTestFillBuffer(buffer2->data(), kSize, true);
1100   EXPECT_EQ(100, ReadData(entry, 1, 23100, buffer2.get(), kSize));
1101   EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data() + 100, 100));
1102 
1103   // Extend the file again and read before without closing the entry.
1104   EXPECT_EQ(kSize, WriteData(entry, 1, 25000, buffer1.get(), kSize, false));
1105   EXPECT_EQ(kSize, WriteData(entry, 1, 45000, buffer1.get(), kSize, false));
1106   CacheTestFillBuffer(buffer2->data(), kSize, true);
1107   EXPECT_EQ(kSize, ReadData(entry, 1, 25000, buffer2.get(), kSize));
1108   EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data(), kSize));
1109   CacheTestFillBuffer(buffer2->data(), kSize, true);
1110   EXPECT_EQ(kSize, ReadData(entry, 1, 45000, buffer2.get(), kSize));
1111   EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data(), kSize));
1112 
1113   entry->Close();
1114 }
1115 
TEST_F(DiskCacheEntryTest,Buffering)1116 TEST_F(DiskCacheEntryTest, Buffering) {
1117   InitCache();
1118   Buffering();
1119 }
1120 
TEST_F(DiskCacheEntryTest,BufferingNoBuffer)1121 TEST_F(DiskCacheEntryTest, BufferingNoBuffer) {
1122   InitCache();
1123   cache_impl_->SetFlags(disk_cache::kNoBuffering);
1124   Buffering();
1125 }
1126 
1127 // Checks that entries are zero length when created.
SizeAtCreate()1128 void DiskCacheEntryTest::SizeAtCreate() {
1129   const char key[]  = "the first key";
1130   disk_cache::Entry* entry;
1131   ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1132 
1133   const int kNumStreams = 3;
1134   for (int i = 0; i < kNumStreams; ++i)
1135     EXPECT_EQ(0, entry->GetDataSize(i));
1136   entry->Close();
1137 }
1138 
TEST_F(DiskCacheEntryTest,SizeAtCreate)1139 TEST_F(DiskCacheEntryTest, SizeAtCreate) {
1140   InitCache();
1141   SizeAtCreate();
1142 }
1143 
TEST_F(DiskCacheEntryTest,MemoryOnlySizeAtCreate)1144 TEST_F(DiskCacheEntryTest, MemoryOnlySizeAtCreate) {
1145   SetMemoryOnlyMode();
1146   InitCache();
1147   SizeAtCreate();
1148 }
1149 
1150 // Some extra tests to make sure that buffering works properly when changing
1151 // the entry size.
SizeChanges()1152 void DiskCacheEntryTest::SizeChanges() {
1153   std::string key("the first key");
1154   disk_cache::Entry* entry;
1155   ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1156 
1157   const int kSize = 200;
1158   const char zeros[kSize] = {};
1159   scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
1160   scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize));
1161   CacheTestFillBuffer(buffer1->data(), kSize, true);
1162   CacheTestFillBuffer(buffer2->data(), kSize, true);
1163 
1164   EXPECT_EQ(kSize, WriteData(entry, 1, 0, buffer1.get(), kSize, true));
1165   EXPECT_EQ(kSize, WriteData(entry, 1, 17000, buffer1.get(), kSize, true));
1166   EXPECT_EQ(kSize, WriteData(entry, 1, 23000, buffer1.get(), kSize, true));
1167   entry->Close();
1168 
1169   // Extend the file and read between the old size and the new write.
1170   ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1171   EXPECT_EQ(23000 + kSize, entry->GetDataSize(1));
1172   EXPECT_EQ(kSize, WriteData(entry, 1, 25000, buffer1.get(), kSize, true));
1173   EXPECT_EQ(25000 + kSize, entry->GetDataSize(1));
1174   EXPECT_EQ(kSize, ReadData(entry, 1, 24000, buffer2.get(), kSize));
1175   EXPECT_TRUE(!memcmp(buffer2->data(), zeros, kSize));
1176 
1177   // Read at the end of the old file size.
1178   EXPECT_EQ(kSize,
1179             ReadData(entry, 1, 23000 + kSize - 35, buffer2.get(), kSize));
1180   EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data() + kSize - 35, 35));
1181 
1182   // Read slightly before the last write.
1183   CacheTestFillBuffer(buffer2->data(), kSize, true);
1184   EXPECT_EQ(kSize, ReadData(entry, 1, 24900, buffer2.get(), kSize));
1185   EXPECT_TRUE(!memcmp(buffer2->data(), zeros, 100));
1186   EXPECT_TRUE(!memcmp(buffer2->data() + 100, buffer1->data(), kSize - 100));
1187 
1188   // Extend the entry a little more.
1189   EXPECT_EQ(kSize, WriteData(entry, 1, 26000, buffer1.get(), kSize, true));
1190   EXPECT_EQ(26000 + kSize, entry->GetDataSize(1));
1191   CacheTestFillBuffer(buffer2->data(), kSize, true);
1192   EXPECT_EQ(kSize, ReadData(entry, 1, 25900, buffer2.get(), kSize));
1193   EXPECT_TRUE(!memcmp(buffer2->data(), zeros, 100));
1194   EXPECT_TRUE(!memcmp(buffer2->data() + 100, buffer1->data(), kSize - 100));
1195 
1196   // And now reduce the size.
1197   EXPECT_EQ(kSize, WriteData(entry, 1, 25000, buffer1.get(), kSize, true));
1198   EXPECT_EQ(25000 + kSize, entry->GetDataSize(1));
1199   EXPECT_EQ(28, ReadData(entry, 1, 25000 + kSize - 28, buffer2.get(), kSize));
1200   EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data() + kSize - 28, 28));
1201 
1202   // Reduce the size with a buffer that is not extending the size.
1203   EXPECT_EQ(kSize, WriteData(entry, 1, 24000, buffer1.get(), kSize, false));
1204   EXPECT_EQ(25000 + kSize, entry->GetDataSize(1));
1205   EXPECT_EQ(kSize, WriteData(entry, 1, 24500, buffer1.get(), kSize, true));
1206   EXPECT_EQ(24500 + kSize, entry->GetDataSize(1));
1207   EXPECT_EQ(kSize, ReadData(entry, 1, 23900, buffer2.get(), kSize));
1208   EXPECT_TRUE(!memcmp(buffer2->data(), zeros, 100));
1209   EXPECT_TRUE(!memcmp(buffer2->data() + 100, buffer1->data(), kSize - 100));
1210 
1211   // And now reduce the size below the old size.
1212   EXPECT_EQ(kSize, WriteData(entry, 1, 19000, buffer1.get(), kSize, true));
1213   EXPECT_EQ(19000 + kSize, entry->GetDataSize(1));
1214   EXPECT_EQ(kSize, ReadData(entry, 1, 18900, buffer2.get(), kSize));
1215   EXPECT_TRUE(!memcmp(buffer2->data(), zeros, 100));
1216   EXPECT_TRUE(!memcmp(buffer2->data() + 100, buffer1->data(), kSize - 100));
1217 
1218   // Verify that the actual file is truncated.
1219   entry->Close();
1220   ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1221   EXPECT_EQ(19000 + kSize, entry->GetDataSize(1));
1222 
1223   // Extend the newly opened file with a zero length write, expect zero fill.
1224   EXPECT_EQ(0, WriteData(entry, 1, 20000 + kSize, buffer1.get(), 0, false));
1225   EXPECT_EQ(kSize, ReadData(entry, 1, 19000 + kSize, buffer1.get(), kSize));
1226   EXPECT_EQ(0, memcmp(buffer1->data(), zeros, kSize));
1227 
1228   entry->Close();
1229 }
1230 
TEST_F(DiskCacheEntryTest,SizeChanges)1231 TEST_F(DiskCacheEntryTest, SizeChanges) {
1232   InitCache();
1233   SizeChanges();
1234 }
1235 
TEST_F(DiskCacheEntryTest,SizeChangesNoBuffer)1236 TEST_F(DiskCacheEntryTest, SizeChangesNoBuffer) {
1237   InitCache();
1238   cache_impl_->SetFlags(disk_cache::kNoBuffering);
1239   SizeChanges();
1240 }
1241 
1242 // Write more than the total cache capacity but to a single entry. |size| is the
1243 // amount of bytes to write each time.
ReuseEntry(int size)1244 void DiskCacheEntryTest::ReuseEntry(int size) {
1245   std::string key1("the first key");
1246   disk_cache::Entry* entry;
1247   ASSERT_EQ(net::OK, CreateEntry(key1, &entry));
1248 
1249   entry->Close();
1250   std::string key2("the second key");
1251   ASSERT_EQ(net::OK, CreateEntry(key2, &entry));
1252 
1253   scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(size));
1254   CacheTestFillBuffer(buffer->data(), size, false);
1255 
1256   for (int i = 0; i < 15; i++) {
1257     EXPECT_EQ(0, WriteData(entry, 0, 0, buffer.get(), 0, true));
1258     EXPECT_EQ(size, WriteData(entry, 0, 0, buffer.get(), size, false));
1259     entry->Close();
1260     ASSERT_EQ(net::OK, OpenEntry(key2, &entry));
1261   }
1262 
1263   entry->Close();
1264   ASSERT_EQ(net::OK, OpenEntry(key1, &entry)) << "have not evicted this entry";
1265   entry->Close();
1266 }
1267 
TEST_F(DiskCacheEntryTest,ReuseExternalEntry)1268 TEST_F(DiskCacheEntryTest, ReuseExternalEntry) {
1269   SetMaxSize(200 * 1024);
1270   InitCache();
1271   ReuseEntry(20 * 1024);
1272 }
1273 
TEST_F(DiskCacheEntryTest,MemoryOnlyReuseExternalEntry)1274 TEST_F(DiskCacheEntryTest, MemoryOnlyReuseExternalEntry) {
1275   SetMemoryOnlyMode();
1276   SetMaxSize(200 * 1024);
1277   InitCache();
1278   ReuseEntry(20 * 1024);
1279 }
1280 
TEST_F(DiskCacheEntryTest,ReuseInternalEntry)1281 TEST_F(DiskCacheEntryTest, ReuseInternalEntry) {
1282   SetMaxSize(100 * 1024);
1283   InitCache();
1284   ReuseEntry(10 * 1024);
1285 }
1286 
TEST_F(DiskCacheEntryTest,MemoryOnlyReuseInternalEntry)1287 TEST_F(DiskCacheEntryTest, MemoryOnlyReuseInternalEntry) {
1288   SetMemoryOnlyMode();
1289   SetMaxSize(100 * 1024);
1290   InitCache();
1291   ReuseEntry(10 * 1024);
1292 }
1293 
1294 // Reading somewhere that was not written should return zeros.
InvalidData()1295 void DiskCacheEntryTest::InvalidData() {
1296   std::string key("the first key");
1297   disk_cache::Entry* entry;
1298   ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1299 
1300   const int kSize1 = 20000;
1301   const int kSize2 = 20000;
1302   const int kSize3 = 20000;
1303   scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
1304   scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize2));
1305   scoped_refptr<net::IOBuffer> buffer3(new net::IOBuffer(kSize3));
1306 
1307   CacheTestFillBuffer(buffer1->data(), kSize1, false);
1308   memset(buffer2->data(), 0, kSize2);
1309 
1310   // Simple data grow:
1311   EXPECT_EQ(200, WriteData(entry, 0, 400, buffer1.get(), 200, false));
1312   EXPECT_EQ(600, entry->GetDataSize(0));
1313   EXPECT_EQ(100, ReadData(entry, 0, 300, buffer3.get(), 100));
1314   EXPECT_TRUE(!memcmp(buffer3->data(), buffer2->data(), 100));
1315   entry->Close();
1316   ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1317 
1318   // The entry is now on disk. Load it and extend it.
1319   EXPECT_EQ(200, WriteData(entry, 0, 800, buffer1.get(), 200, false));
1320   EXPECT_EQ(1000, entry->GetDataSize(0));
1321   EXPECT_EQ(100, ReadData(entry, 0, 700, buffer3.get(), 100));
1322   EXPECT_TRUE(!memcmp(buffer3->data(), buffer2->data(), 100));
1323   entry->Close();
1324   ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1325 
1326   // This time using truncate.
1327   EXPECT_EQ(200, WriteData(entry, 0, 1800, buffer1.get(), 200, true));
1328   EXPECT_EQ(2000, entry->GetDataSize(0));
1329   EXPECT_EQ(100, ReadData(entry, 0, 1500, buffer3.get(), 100));
1330   EXPECT_TRUE(!memcmp(buffer3->data(), buffer2->data(), 100));
1331 
1332   // Go to an external file.
1333   EXPECT_EQ(200, WriteData(entry, 0, 19800, buffer1.get(), 200, false));
1334   EXPECT_EQ(20000, entry->GetDataSize(0));
1335   EXPECT_EQ(4000, ReadData(entry, 0, 14000, buffer3.get(), 4000));
1336   EXPECT_TRUE(!memcmp(buffer3->data(), buffer2->data(), 4000));
1337 
1338   // And back to an internal block.
1339   EXPECT_EQ(600, WriteData(entry, 0, 1000, buffer1.get(), 600, true));
1340   EXPECT_EQ(1600, entry->GetDataSize(0));
1341   EXPECT_EQ(600, ReadData(entry, 0, 1000, buffer3.get(), 600));
1342   EXPECT_TRUE(!memcmp(buffer3->data(), buffer1->data(), 600));
1343 
1344   // Extend it again.
1345   EXPECT_EQ(600, WriteData(entry, 0, 2000, buffer1.get(), 600, false));
1346   EXPECT_EQ(2600, entry->GetDataSize(0));
1347   EXPECT_EQ(200, ReadData(entry, 0, 1800, buffer3.get(), 200));
1348   EXPECT_TRUE(!memcmp(buffer3->data(), buffer2->data(), 200));
1349 
1350   // And again (with truncation flag).
1351   EXPECT_EQ(600, WriteData(entry, 0, 3000, buffer1.get(), 600, true));
1352   EXPECT_EQ(3600, entry->GetDataSize(0));
1353   EXPECT_EQ(200, ReadData(entry, 0, 2800, buffer3.get(), 200));
1354   EXPECT_TRUE(!memcmp(buffer3->data(), buffer2->data(), 200));
1355 
1356   entry->Close();
1357 }
1358 
TEST_F(DiskCacheEntryTest,InvalidData)1359 TEST_F(DiskCacheEntryTest, InvalidData) {
1360   InitCache();
1361   InvalidData();
1362 }
1363 
TEST_F(DiskCacheEntryTest,InvalidDataNoBuffer)1364 TEST_F(DiskCacheEntryTest, InvalidDataNoBuffer) {
1365   InitCache();
1366   cache_impl_->SetFlags(disk_cache::kNoBuffering);
1367   InvalidData();
1368 }
1369 
TEST_F(DiskCacheEntryTest,MemoryOnlyInvalidData)1370 TEST_F(DiskCacheEntryTest, MemoryOnlyInvalidData) {
1371   SetMemoryOnlyMode();
1372   InitCache();
1373   InvalidData();
1374 }
1375 
1376 // Tests that the cache preserves the buffer of an IO operation.
ReadWriteDestroyBuffer()1377 void DiskCacheEntryTest::ReadWriteDestroyBuffer() {
1378   std::string key("the first key");
1379   disk_cache::Entry* entry;
1380   ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1381 
1382   const int kSize = 200;
1383   scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
1384   CacheTestFillBuffer(buffer->data(), kSize, false);
1385 
1386   net::TestCompletionCallback cb;
1387   EXPECT_EQ(net::ERR_IO_PENDING,
1388             entry->WriteData(0, 0, buffer.get(), kSize, cb.callback(), false));
1389 
1390   // Release our reference to the buffer.
1391   buffer = NULL;
1392   EXPECT_EQ(kSize, cb.WaitForResult());
1393 
1394   // And now test with a Read().
1395   buffer = new net::IOBuffer(kSize);
1396   CacheTestFillBuffer(buffer->data(), kSize, false);
1397 
1398   EXPECT_EQ(net::ERR_IO_PENDING,
1399             entry->ReadData(0, 0, buffer.get(), kSize, cb.callback()));
1400   buffer = NULL;
1401   EXPECT_EQ(kSize, cb.WaitForResult());
1402 
1403   entry->Close();
1404 }
1405 
TEST_F(DiskCacheEntryTest,ReadWriteDestroyBuffer)1406 TEST_F(DiskCacheEntryTest, ReadWriteDestroyBuffer) {
1407   InitCache();
1408   ReadWriteDestroyBuffer();
1409 }
1410 
DoomNormalEntry()1411 void DiskCacheEntryTest::DoomNormalEntry() {
1412   std::string key("the first key");
1413   disk_cache::Entry* entry;
1414   ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1415   entry->Doom();
1416   entry->Close();
1417 
1418   const int kSize = 20000;
1419   scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
1420   CacheTestFillBuffer(buffer->data(), kSize, true);
1421   buffer->data()[19999] = '\0';
1422 
1423   key = buffer->data();
1424   ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1425   EXPECT_EQ(20000, WriteData(entry, 0, 0, buffer.get(), kSize, false));
1426   EXPECT_EQ(20000, WriteData(entry, 1, 0, buffer.get(), kSize, false));
1427   entry->Doom();
1428   entry->Close();
1429 
1430   FlushQueueForTest();
1431   EXPECT_EQ(0, cache_->GetEntryCount());
1432 }
1433 
TEST_F(DiskCacheEntryTest,DoomEntry)1434 TEST_F(DiskCacheEntryTest, DoomEntry) {
1435   InitCache();
1436   DoomNormalEntry();
1437 }
1438 
TEST_F(DiskCacheEntryTest,MemoryOnlyDoomEntry)1439 TEST_F(DiskCacheEntryTest, MemoryOnlyDoomEntry) {
1440   SetMemoryOnlyMode();
1441   InitCache();
1442   DoomNormalEntry();
1443 }
1444 
1445 // Tests dooming an entry that's linked to an open entry.
DoomEntryNextToOpenEntry()1446 void DiskCacheEntryTest::DoomEntryNextToOpenEntry() {
1447   disk_cache::Entry* entry1;
1448   disk_cache::Entry* entry2;
1449   ASSERT_EQ(net::OK, CreateEntry("fixed", &entry1));
1450   entry1->Close();
1451   ASSERT_EQ(net::OK, CreateEntry("foo", &entry1));
1452   entry1->Close();
1453   ASSERT_EQ(net::OK, CreateEntry("bar", &entry1));
1454   entry1->Close();
1455 
1456   ASSERT_EQ(net::OK, OpenEntry("foo", &entry1));
1457   ASSERT_EQ(net::OK, OpenEntry("bar", &entry2));
1458   entry2->Doom();
1459   entry2->Close();
1460 
1461   ASSERT_EQ(net::OK, OpenEntry("foo", &entry2));
1462   entry2->Doom();
1463   entry2->Close();
1464   entry1->Close();
1465 
1466   ASSERT_EQ(net::OK, OpenEntry("fixed", &entry1));
1467   entry1->Close();
1468 }
1469 
TEST_F(DiskCacheEntryTest,DoomEntryNextToOpenEntry)1470 TEST_F(DiskCacheEntryTest, DoomEntryNextToOpenEntry) {
1471   InitCache();
1472   DoomEntryNextToOpenEntry();
1473 }
1474 
TEST_F(DiskCacheEntryTest,NewEvictionDoomEntryNextToOpenEntry)1475 TEST_F(DiskCacheEntryTest, NewEvictionDoomEntryNextToOpenEntry) {
1476   SetNewEviction();
1477   InitCache();
1478   DoomEntryNextToOpenEntry();
1479 }
1480 
TEST_F(DiskCacheEntryTest,AppCacheDoomEntryNextToOpenEntry)1481 TEST_F(DiskCacheEntryTest, AppCacheDoomEntryNextToOpenEntry) {
1482   SetCacheType(net::APP_CACHE);
1483   InitCache();
1484   DoomEntryNextToOpenEntry();
1485 }
1486 
1487 // Verify that basic operations work as expected with doomed entries.
DoomedEntry()1488 void DiskCacheEntryTest::DoomedEntry() {
1489   std::string key("the first key");
1490   disk_cache::Entry* entry;
1491   ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1492   entry->Doom();
1493 
1494   FlushQueueForTest();
1495   EXPECT_EQ(0, cache_->GetEntryCount());
1496   Time initial = Time::Now();
1497   AddDelay();
1498 
1499   const int kSize1 = 2000;
1500   const int kSize2 = 2000;
1501   scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
1502   scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize2));
1503   CacheTestFillBuffer(buffer1->data(), kSize1, false);
1504   memset(buffer2->data(), 0, kSize2);
1505 
1506   EXPECT_EQ(2000, WriteData(entry, 0, 0, buffer1.get(), 2000, false));
1507   EXPECT_EQ(2000, ReadData(entry, 0, 0, buffer2.get(), 2000));
1508   EXPECT_EQ(0, memcmp(buffer1->data(), buffer2->data(), kSize1));
1509   EXPECT_EQ(key, entry->GetKey());
1510   EXPECT_TRUE(initial < entry->GetLastModified());
1511   EXPECT_TRUE(initial < entry->GetLastUsed());
1512 
1513   entry->Close();
1514 }
1515 
TEST_F(DiskCacheEntryTest,DoomedEntry)1516 TEST_F(DiskCacheEntryTest, DoomedEntry) {
1517   InitCache();
1518   DoomedEntry();
1519 }
1520 
TEST_F(DiskCacheEntryTest,MemoryOnlyDoomedEntry)1521 TEST_F(DiskCacheEntryTest, MemoryOnlyDoomedEntry) {
1522   SetMemoryOnlyMode();
1523   InitCache();
1524   DoomedEntry();
1525 }
1526 
1527 // Tests that we discard entries if the data is missing.
TEST_F(DiskCacheEntryTest,MissingData)1528 TEST_F(DiskCacheEntryTest, MissingData) {
1529   InitCache();
1530 
1531   std::string key("the first key");
1532   disk_cache::Entry* entry;
1533   ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1534 
1535   // Write to an external file.
1536   const int kSize = 20000;
1537   scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
1538   CacheTestFillBuffer(buffer->data(), kSize, false);
1539   EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
1540   entry->Close();
1541   FlushQueueForTest();
1542 
1543   disk_cache::Addr address(0x80000001);
1544   base::FilePath name = cache_impl_->GetFileName(address);
1545   EXPECT_TRUE(base::DeleteFile(name, false));
1546 
1547   // Attempt to read the data.
1548   ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1549   EXPECT_EQ(net::ERR_FILE_NOT_FOUND,
1550             ReadData(entry, 0, 0, buffer.get(), kSize));
1551   entry->Close();
1552 
1553   // The entry should be gone.
1554   ASSERT_NE(net::OK, OpenEntry(key, &entry));
1555 }
1556 
1557 // Test that child entries in a memory cache backend are not visible from
1558 // enumerations.
TEST_F(DiskCacheEntryTest,MemoryOnlyEnumerationWithSparseEntries)1559 TEST_F(DiskCacheEntryTest, MemoryOnlyEnumerationWithSparseEntries) {
1560   SetMemoryOnlyMode();
1561   InitCache();
1562 
1563   const int kSize = 4096;
1564   scoped_refptr<net::IOBuffer> buf(new net::IOBuffer(kSize));
1565   CacheTestFillBuffer(buf->data(), kSize, false);
1566 
1567   std::string key("the first key");
1568   disk_cache::Entry* parent_entry;
1569   ASSERT_EQ(net::OK, CreateEntry(key, &parent_entry));
1570 
1571   // Writes to the parent entry.
1572   EXPECT_EQ(kSize,
1573             parent_entry->WriteSparseData(
1574                 0, buf.get(), kSize, net::CompletionCallback()));
1575 
1576   // This write creates a child entry and writes to it.
1577   EXPECT_EQ(kSize,
1578             parent_entry->WriteSparseData(
1579                 8192, buf.get(), kSize, net::CompletionCallback()));
1580 
1581   parent_entry->Close();
1582 
1583   // Perform the enumerations.
1584   void* iter = NULL;
1585   disk_cache::Entry* entry = NULL;
1586   int count = 0;
1587   while (OpenNextEntry(&iter, &entry) == net::OK) {
1588     ASSERT_TRUE(entry != NULL);
1589     ++count;
1590     disk_cache::MemEntryImpl* mem_entry =
1591         reinterpret_cast<disk_cache::MemEntryImpl*>(entry);
1592     EXPECT_EQ(disk_cache::MemEntryImpl::kParentEntry, mem_entry->type());
1593     mem_entry->Close();
1594   }
1595   EXPECT_EQ(1, count);
1596 }
1597 
1598 // Writes |buf_1| to offset and reads it back as |buf_2|.
VerifySparseIO(disk_cache::Entry * entry,int64 offset,net::IOBuffer * buf_1,int size,net::IOBuffer * buf_2)1599 void VerifySparseIO(disk_cache::Entry* entry, int64 offset,
1600                     net::IOBuffer* buf_1, int size, net::IOBuffer* buf_2) {
1601   net::TestCompletionCallback cb;
1602 
1603   memset(buf_2->data(), 0, size);
1604   int ret = entry->ReadSparseData(offset, buf_2, size, cb.callback());
1605   EXPECT_EQ(0, cb.GetResult(ret));
1606 
1607   ret = entry->WriteSparseData(offset, buf_1, size, cb.callback());
1608   EXPECT_EQ(size, cb.GetResult(ret));
1609 
1610   ret = entry->ReadSparseData(offset, buf_2, size, cb.callback());
1611   EXPECT_EQ(size, cb.GetResult(ret));
1612 
1613   EXPECT_EQ(0, memcmp(buf_1->data(), buf_2->data(), size));
1614 }
1615 
1616 // Reads |size| bytes from |entry| at |offset| and verifies that they are the
1617 // same as the content of the provided |buffer|.
VerifyContentSparseIO(disk_cache::Entry * entry,int64 offset,char * buffer,int size)1618 void VerifyContentSparseIO(disk_cache::Entry* entry, int64 offset, char* buffer,
1619                            int size) {
1620   net::TestCompletionCallback cb;
1621 
1622   scoped_refptr<net::IOBuffer> buf_1(new net::IOBuffer(size));
1623   memset(buf_1->data(), 0, size);
1624   int ret = entry->ReadSparseData(offset, buf_1.get(), size, cb.callback());
1625   EXPECT_EQ(size, cb.GetResult(ret));
1626   EXPECT_EQ(0, memcmp(buf_1->data(), buffer, size));
1627 }
1628 
BasicSparseIO()1629 void DiskCacheEntryTest::BasicSparseIO() {
1630   std::string key("the first key");
1631   disk_cache::Entry* entry;
1632   ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1633 
1634   const int kSize = 2048;
1635   scoped_refptr<net::IOBuffer> buf_1(new net::IOBuffer(kSize));
1636   scoped_refptr<net::IOBuffer> buf_2(new net::IOBuffer(kSize));
1637   CacheTestFillBuffer(buf_1->data(), kSize, false);
1638 
1639   // Write at offset 0.
1640   VerifySparseIO(entry, 0, buf_1.get(), kSize, buf_2.get());
1641 
1642   // Write at offset 0x400000 (4 MB).
1643   VerifySparseIO(entry, 0x400000, buf_1.get(), kSize, buf_2.get());
1644 
1645   // Write at offset 0x800000000 (32 GB).
1646   VerifySparseIO(entry, 0x800000000LL, buf_1.get(), kSize, buf_2.get());
1647 
1648   entry->Close();
1649 
1650   // Check everything again.
1651   ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1652   VerifyContentSparseIO(entry, 0, buf_1->data(), kSize);
1653   VerifyContentSparseIO(entry, 0x400000, buf_1->data(), kSize);
1654   VerifyContentSparseIO(entry, 0x800000000LL, buf_1->data(), kSize);
1655   entry->Close();
1656 }
1657 
TEST_F(DiskCacheEntryTest,BasicSparseIO)1658 TEST_F(DiskCacheEntryTest, BasicSparseIO) {
1659   InitCache();
1660   BasicSparseIO();
1661 }
1662 
TEST_F(DiskCacheEntryTest,MemoryOnlyBasicSparseIO)1663 TEST_F(DiskCacheEntryTest, MemoryOnlyBasicSparseIO) {
1664   SetMemoryOnlyMode();
1665   InitCache();
1666   BasicSparseIO();
1667 }
1668 
HugeSparseIO()1669 void DiskCacheEntryTest::HugeSparseIO() {
1670   std::string key("the first key");
1671   disk_cache::Entry* entry;
1672   ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1673 
1674   // Write 1.2 MB so that we cover multiple entries.
1675   const int kSize = 1200 * 1024;
1676   scoped_refptr<net::IOBuffer> buf_1(new net::IOBuffer(kSize));
1677   scoped_refptr<net::IOBuffer> buf_2(new net::IOBuffer(kSize));
1678   CacheTestFillBuffer(buf_1->data(), kSize, false);
1679 
1680   // Write at offset 0x20F0000 (33 MB - 64 KB).
1681   VerifySparseIO(entry, 0x20F0000, buf_1.get(), kSize, buf_2.get());
1682   entry->Close();
1683 
1684   // Check it again.
1685   ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1686   VerifyContentSparseIO(entry, 0x20F0000, buf_1->data(), kSize);
1687   entry->Close();
1688 }
1689 
TEST_F(DiskCacheEntryTest,HugeSparseIO)1690 TEST_F(DiskCacheEntryTest, HugeSparseIO) {
1691   InitCache();
1692   HugeSparseIO();
1693 }
1694 
TEST_F(DiskCacheEntryTest,MemoryOnlyHugeSparseIO)1695 TEST_F(DiskCacheEntryTest, MemoryOnlyHugeSparseIO) {
1696   SetMemoryOnlyMode();
1697   InitCache();
1698   HugeSparseIO();
1699 }
1700 
GetAvailableRange()1701 void DiskCacheEntryTest::GetAvailableRange() {
1702   std::string key("the first key");
1703   disk_cache::Entry* entry;
1704   ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1705 
1706   const int kSize = 16 * 1024;
1707   scoped_refptr<net::IOBuffer> buf(new net::IOBuffer(kSize));
1708   CacheTestFillBuffer(buf->data(), kSize, false);
1709 
1710   // Write at offset 0x20F0000 (33 MB - 64 KB), and 0x20F4400 (33 MB - 47 KB).
1711   EXPECT_EQ(kSize, WriteSparseData(entry, 0x20F0000, buf.get(), kSize));
1712   EXPECT_EQ(kSize, WriteSparseData(entry, 0x20F4400, buf.get(), kSize));
1713 
1714   // We stop at the first empty block.
1715   int64 start;
1716   net::TestCompletionCallback cb;
1717   int rv = entry->GetAvailableRange(
1718       0x20F0000, kSize * 2, &start, cb.callback());
1719   EXPECT_EQ(kSize, cb.GetResult(rv));
1720   EXPECT_EQ(0x20F0000, start);
1721 
1722   start = 0;
1723   rv = entry->GetAvailableRange(0, kSize, &start, cb.callback());
1724   EXPECT_EQ(0, cb.GetResult(rv));
1725   rv = entry->GetAvailableRange(
1726       0x20F0000 - kSize, kSize, &start, cb.callback());
1727   EXPECT_EQ(0, cb.GetResult(rv));
1728   rv = entry->GetAvailableRange(0, 0x2100000, &start, cb.callback());
1729   EXPECT_EQ(kSize, cb.GetResult(rv));
1730   EXPECT_EQ(0x20F0000, start);
1731 
1732   // We should be able to Read based on the results of GetAvailableRange.
1733   start = -1;
1734   rv = entry->GetAvailableRange(0x2100000, kSize, &start, cb.callback());
1735   EXPECT_EQ(0, cb.GetResult(rv));
1736   rv = entry->ReadSparseData(start, buf.get(), kSize, cb.callback());
1737   EXPECT_EQ(0, cb.GetResult(rv));
1738 
1739   start = 0;
1740   rv = entry->GetAvailableRange(0x20F2000, kSize, &start, cb.callback());
1741   EXPECT_EQ(0x2000, cb.GetResult(rv));
1742   EXPECT_EQ(0x20F2000, start);
1743   EXPECT_EQ(0x2000, ReadSparseData(entry, start, buf.get(), kSize));
1744 
1745   // Make sure that we respect the |len| argument.
1746   start = 0;
1747   rv = entry->GetAvailableRange(
1748       0x20F0001 - kSize, kSize, &start, cb.callback());
1749   EXPECT_EQ(1, cb.GetResult(rv));
1750   EXPECT_EQ(0x20F0000, start);
1751 
1752   entry->Close();
1753 }
1754 
TEST_F(DiskCacheEntryTest,GetAvailableRange)1755 TEST_F(DiskCacheEntryTest, GetAvailableRange) {
1756   InitCache();
1757   GetAvailableRange();
1758 }
1759 
TEST_F(DiskCacheEntryTest,MemoryOnlyGetAvailableRange)1760 TEST_F(DiskCacheEntryTest, MemoryOnlyGetAvailableRange) {
1761   SetMemoryOnlyMode();
1762   InitCache();
1763   GetAvailableRange();
1764 }
1765 
CouldBeSparse()1766 void DiskCacheEntryTest::CouldBeSparse() {
1767   std::string key("the first key");
1768   disk_cache::Entry* entry;
1769   ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1770 
1771   const int kSize = 16 * 1024;
1772   scoped_refptr<net::IOBuffer> buf(new net::IOBuffer(kSize));
1773   CacheTestFillBuffer(buf->data(), kSize, false);
1774 
1775   // Write at offset 0x20F0000 (33 MB - 64 KB).
1776   EXPECT_EQ(kSize, WriteSparseData(entry, 0x20F0000, buf.get(), kSize));
1777 
1778   EXPECT_TRUE(entry->CouldBeSparse());
1779   entry->Close();
1780 
1781   ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1782   EXPECT_TRUE(entry->CouldBeSparse());
1783   entry->Close();
1784 
1785   // Now verify a regular entry.
1786   key.assign("another key");
1787   ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1788   EXPECT_FALSE(entry->CouldBeSparse());
1789 
1790   EXPECT_EQ(kSize, WriteData(entry, 0, 0, buf.get(), kSize, false));
1791   EXPECT_EQ(kSize, WriteData(entry, 1, 0, buf.get(), kSize, false));
1792   EXPECT_EQ(kSize, WriteData(entry, 2, 0, buf.get(), kSize, false));
1793 
1794   EXPECT_FALSE(entry->CouldBeSparse());
1795   entry->Close();
1796 
1797   ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1798   EXPECT_FALSE(entry->CouldBeSparse());
1799   entry->Close();
1800 }
1801 
TEST_F(DiskCacheEntryTest,CouldBeSparse)1802 TEST_F(DiskCacheEntryTest, CouldBeSparse) {
1803   InitCache();
1804   CouldBeSparse();
1805 }
1806 
TEST_F(DiskCacheEntryTest,MemoryCouldBeSparse)1807 TEST_F(DiskCacheEntryTest, MemoryCouldBeSparse) {
1808   SetMemoryOnlyMode();
1809   InitCache();
1810   CouldBeSparse();
1811 }
1812 
TEST_F(DiskCacheEntryTest,MemoryOnlyMisalignedSparseIO)1813 TEST_F(DiskCacheEntryTest, MemoryOnlyMisalignedSparseIO) {
1814   SetMemoryOnlyMode();
1815   InitCache();
1816 
1817   const int kSize = 8192;
1818   scoped_refptr<net::IOBuffer> buf_1(new net::IOBuffer(kSize));
1819   scoped_refptr<net::IOBuffer> buf_2(new net::IOBuffer(kSize));
1820   CacheTestFillBuffer(buf_1->data(), kSize, false);
1821 
1822   std::string key("the first key");
1823   disk_cache::Entry* entry;
1824   ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1825 
1826   // This loop writes back to back starting from offset 0 and 9000.
1827   for (int i = 0; i < kSize; i += 1024) {
1828     scoped_refptr<net::WrappedIOBuffer> buf_3(
1829       new net::WrappedIOBuffer(buf_1->data() + i));
1830     VerifySparseIO(entry, i, buf_3.get(), 1024, buf_2.get());
1831     VerifySparseIO(entry, 9000 + i, buf_3.get(), 1024, buf_2.get());
1832   }
1833 
1834   // Make sure we have data written.
1835   VerifyContentSparseIO(entry, 0, buf_1->data(), kSize);
1836   VerifyContentSparseIO(entry, 9000, buf_1->data(), kSize);
1837 
1838   // This tests a large write that spans 3 entries from a misaligned offset.
1839   VerifySparseIO(entry, 20481, buf_1.get(), 8192, buf_2.get());
1840 
1841   entry->Close();
1842 }
1843 
TEST_F(DiskCacheEntryTest,MemoryOnlyMisalignedGetAvailableRange)1844 TEST_F(DiskCacheEntryTest, MemoryOnlyMisalignedGetAvailableRange) {
1845   SetMemoryOnlyMode();
1846   InitCache();
1847 
1848   const int kSize = 8192;
1849   scoped_refptr<net::IOBuffer> buf(new net::IOBuffer(kSize));
1850   CacheTestFillBuffer(buf->data(), kSize, false);
1851 
1852   disk_cache::Entry* entry;
1853   std::string key("the first key");
1854   ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1855 
1856   // Writes in the middle of an entry.
1857   EXPECT_EQ(
1858       1024,
1859       entry->WriteSparseData(0, buf.get(), 1024, net::CompletionCallback()));
1860   EXPECT_EQ(
1861       1024,
1862       entry->WriteSparseData(5120, buf.get(), 1024, net::CompletionCallback()));
1863   EXPECT_EQ(1024,
1864             entry->WriteSparseData(
1865                 10000, buf.get(), 1024, net::CompletionCallback()));
1866 
1867   // Writes in the middle of an entry and spans 2 child entries.
1868   EXPECT_EQ(8192,
1869             entry->WriteSparseData(
1870                 50000, buf.get(), 8192, net::CompletionCallback()));
1871 
1872   int64 start;
1873   net::TestCompletionCallback cb;
1874   // Test that we stop at a discontinuous child at the second block.
1875   int rv = entry->GetAvailableRange(0, 10000, &start, cb.callback());
1876   EXPECT_EQ(1024, cb.GetResult(rv));
1877   EXPECT_EQ(0, start);
1878 
1879   // Test that number of bytes is reported correctly when we start from the
1880   // middle of a filled region.
1881   rv = entry->GetAvailableRange(512, 10000, &start, cb.callback());
1882   EXPECT_EQ(512, cb.GetResult(rv));
1883   EXPECT_EQ(512, start);
1884 
1885   // Test that we found bytes in the child of next block.
1886   rv = entry->GetAvailableRange(1024, 10000, &start, cb.callback());
1887   EXPECT_EQ(1024, cb.GetResult(rv));
1888   EXPECT_EQ(5120, start);
1889 
1890   // Test that the desired length is respected. It starts within a filled
1891   // region.
1892   rv = entry->GetAvailableRange(5500, 512, &start, cb.callback());
1893   EXPECT_EQ(512, cb.GetResult(rv));
1894   EXPECT_EQ(5500, start);
1895 
1896   // Test that the desired length is respected. It starts before a filled
1897   // region.
1898   rv = entry->GetAvailableRange(5000, 620, &start, cb.callback());
1899   EXPECT_EQ(500, cb.GetResult(rv));
1900   EXPECT_EQ(5120, start);
1901 
1902   // Test that multiple blocks are scanned.
1903   rv = entry->GetAvailableRange(40000, 20000, &start, cb.callback());
1904   EXPECT_EQ(8192, cb.GetResult(rv));
1905   EXPECT_EQ(50000, start);
1906 
1907   entry->Close();
1908 }
1909 
UpdateSparseEntry()1910 void DiskCacheEntryTest::UpdateSparseEntry() {
1911   std::string key("the first key");
1912   disk_cache::Entry* entry1;
1913   ASSERT_EQ(net::OK, CreateEntry(key, &entry1));
1914 
1915   const int kSize = 2048;
1916   scoped_refptr<net::IOBuffer> buf_1(new net::IOBuffer(kSize));
1917   scoped_refptr<net::IOBuffer> buf_2(new net::IOBuffer(kSize));
1918   CacheTestFillBuffer(buf_1->data(), kSize, false);
1919 
1920   // Write at offset 0.
1921   VerifySparseIO(entry1, 0, buf_1.get(), kSize, buf_2.get());
1922   entry1->Close();
1923 
1924   // Write at offset 2048.
1925   ASSERT_EQ(net::OK, OpenEntry(key, &entry1));
1926   VerifySparseIO(entry1, 2048, buf_1.get(), kSize, buf_2.get());
1927 
1928   disk_cache::Entry* entry2;
1929   ASSERT_EQ(net::OK, CreateEntry("the second key", &entry2));
1930 
1931   entry1->Close();
1932   entry2->Close();
1933   FlushQueueForTest();
1934   if (memory_only_ || simple_cache_mode_)
1935     EXPECT_EQ(2, cache_->GetEntryCount());
1936   else
1937     EXPECT_EQ(3, cache_->GetEntryCount());
1938 }
1939 
TEST_F(DiskCacheEntryTest,UpdateSparseEntry)1940 TEST_F(DiskCacheEntryTest, UpdateSparseEntry) {
1941   SetCacheType(net::MEDIA_CACHE);
1942   InitCache();
1943   UpdateSparseEntry();
1944 }
1945 
TEST_F(DiskCacheEntryTest,MemoryOnlyUpdateSparseEntry)1946 TEST_F(DiskCacheEntryTest, MemoryOnlyUpdateSparseEntry) {
1947   SetMemoryOnlyMode();
1948   SetCacheType(net::MEDIA_CACHE);
1949   InitCache();
1950   UpdateSparseEntry();
1951 }
1952 
DoomSparseEntry()1953 void DiskCacheEntryTest::DoomSparseEntry() {
1954   std::string key1("the first key");
1955   std::string key2("the second key");
1956   disk_cache::Entry *entry1, *entry2;
1957   ASSERT_EQ(net::OK, CreateEntry(key1, &entry1));
1958   ASSERT_EQ(net::OK, CreateEntry(key2, &entry2));
1959 
1960   const int kSize = 4 * 1024;
1961   scoped_refptr<net::IOBuffer> buf(new net::IOBuffer(kSize));
1962   CacheTestFillBuffer(buf->data(), kSize, false);
1963 
1964   int64 offset = 1024;
1965   // Write to a bunch of ranges.
1966   for (int i = 0; i < 12; i++) {
1967     EXPECT_EQ(kSize, WriteSparseData(entry1, offset, buf.get(), kSize));
1968     // Keep the second map under the default size.
1969     if (i < 9)
1970       EXPECT_EQ(kSize, WriteSparseData(entry2, offset, buf.get(), kSize));
1971 
1972     offset *= 4;
1973   }
1974 
1975   if (memory_only_ || simple_cache_mode_)
1976     EXPECT_EQ(2, cache_->GetEntryCount());
1977   else
1978     EXPECT_EQ(15, cache_->GetEntryCount());
1979 
1980   // Doom the first entry while it's still open.
1981   entry1->Doom();
1982   entry1->Close();
1983   entry2->Close();
1984 
1985   // Doom the second entry after it's fully saved.
1986   EXPECT_EQ(net::OK, DoomEntry(key2));
1987 
1988   // Make sure we do all needed work. This may fail for entry2 if between Close
1989   // and DoomEntry the system decides to remove all traces of the file from the
1990   // system cache so we don't see that there is pending IO.
1991   base::MessageLoop::current()->RunUntilIdle();
1992 
1993   if (memory_only_) {
1994     EXPECT_EQ(0, cache_->GetEntryCount());
1995   } else {
1996     if (5 == cache_->GetEntryCount()) {
1997       // Most likely we are waiting for the result of reading the sparse info
1998       // (it's always async on Posix so it is easy to miss). Unfortunately we
1999       // don't have any signal to watch for so we can only wait.
2000       base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(500));
2001       base::MessageLoop::current()->RunUntilIdle();
2002     }
2003     EXPECT_EQ(0, cache_->GetEntryCount());
2004   }
2005 }
2006 
TEST_F(DiskCacheEntryTest,DoomSparseEntry)2007 TEST_F(DiskCacheEntryTest, DoomSparseEntry) {
2008   UseCurrentThread();
2009   InitCache();
2010   DoomSparseEntry();
2011 }
2012 
TEST_F(DiskCacheEntryTest,MemoryOnlyDoomSparseEntry)2013 TEST_F(DiskCacheEntryTest, MemoryOnlyDoomSparseEntry) {
2014   SetMemoryOnlyMode();
2015   InitCache();
2016   DoomSparseEntry();
2017 }
2018 
2019 // A CompletionCallback wrapper that deletes the cache from within the callback.
2020 // The way a CompletionCallback works means that all tasks (even new ones)
2021 // are executed by the message loop before returning to the caller so the only
2022 // way to simulate a race is to execute what we want on the callback.
2023 class SparseTestCompletionCallback: public net::TestCompletionCallback {
2024  public:
SparseTestCompletionCallback(scoped_ptr<disk_cache::Backend> cache)2025   explicit SparseTestCompletionCallback(scoped_ptr<disk_cache::Backend> cache)
2026       : cache_(cache.Pass()) {
2027   }
2028 
2029  private:
SetResult(int result)2030   virtual void SetResult(int result) OVERRIDE {
2031     cache_.reset();
2032     TestCompletionCallback::SetResult(result);
2033   }
2034 
2035   scoped_ptr<disk_cache::Backend> cache_;
2036   DISALLOW_COPY_AND_ASSIGN(SparseTestCompletionCallback);
2037 };
2038 
2039 // Tests that we don't crash when the backend is deleted while we are working
2040 // deleting the sub-entries of a sparse entry.
TEST_F(DiskCacheEntryTest,DoomSparseEntry2)2041 TEST_F(DiskCacheEntryTest, DoomSparseEntry2) {
2042   UseCurrentThread();
2043   InitCache();
2044   std::string key("the key");
2045   disk_cache::Entry* entry;
2046   ASSERT_EQ(net::OK, CreateEntry(key, &entry));
2047 
2048   const int kSize = 4 * 1024;
2049   scoped_refptr<net::IOBuffer> buf(new net::IOBuffer(kSize));
2050   CacheTestFillBuffer(buf->data(), kSize, false);
2051 
2052   int64 offset = 1024;
2053   // Write to a bunch of ranges.
2054   for (int i = 0; i < 12; i++) {
2055     EXPECT_EQ(kSize,
2056               entry->WriteSparseData(
2057                   offset, buf.get(), kSize, net::CompletionCallback()));
2058     offset *= 4;
2059   }
2060   EXPECT_EQ(9, cache_->GetEntryCount());
2061 
2062   entry->Close();
2063   disk_cache::Backend* cache = cache_.get();
2064   SparseTestCompletionCallback cb(cache_.Pass());
2065   int rv = cache->DoomEntry(key, cb.callback());
2066   EXPECT_EQ(net::ERR_IO_PENDING, rv);
2067   EXPECT_EQ(net::OK, cb.WaitForResult());
2068 }
2069 
PartialSparseEntry()2070 void DiskCacheEntryTest::PartialSparseEntry() {
2071   std::string key("the first key");
2072   disk_cache::Entry* entry;
2073   ASSERT_EQ(net::OK, CreateEntry(key, &entry));
2074 
2075   // We should be able to deal with IO that is not aligned to the block size
2076   // of a sparse entry, at least to write a big range without leaving holes.
2077   const int kSize = 4 * 1024;
2078   const int kSmallSize = 128;
2079   scoped_refptr<net::IOBuffer> buf1(new net::IOBuffer(kSize));
2080   CacheTestFillBuffer(buf1->data(), kSize, false);
2081 
2082   // The first write is just to extend the entry. The third write occupies
2083   // a 1KB block partially, it may not be written internally depending on the
2084   // implementation.
2085   EXPECT_EQ(kSize, WriteSparseData(entry, 20000, buf1.get(), kSize));
2086   EXPECT_EQ(kSize, WriteSparseData(entry, 500, buf1.get(), kSize));
2087   EXPECT_EQ(kSmallSize,
2088             WriteSparseData(entry, 1080321, buf1.get(), kSmallSize));
2089   entry->Close();
2090   ASSERT_EQ(net::OK, OpenEntry(key, &entry));
2091 
2092   scoped_refptr<net::IOBuffer> buf2(new net::IOBuffer(kSize));
2093   memset(buf2->data(), 0, kSize);
2094   EXPECT_EQ(0, ReadSparseData(entry, 8000, buf2.get(), kSize));
2095 
2096   EXPECT_EQ(500, ReadSparseData(entry, kSize, buf2.get(), kSize));
2097   EXPECT_EQ(0, memcmp(buf2->data(), buf1->data() + kSize - 500, 500));
2098   EXPECT_EQ(0, ReadSparseData(entry, 0, buf2.get(), kSize));
2099 
2100   // This read should not change anything.
2101   EXPECT_EQ(96, ReadSparseData(entry, 24000, buf2.get(), kSize));
2102   EXPECT_EQ(500, ReadSparseData(entry, kSize, buf2.get(), kSize));
2103   EXPECT_EQ(0, ReadSparseData(entry, 99, buf2.get(), kSize));
2104 
2105   int rv;
2106   int64 start;
2107   net::TestCompletionCallback cb;
2108   if (memory_only_ || simple_cache_mode_) {
2109     rv = entry->GetAvailableRange(0, 600, &start, cb.callback());
2110     EXPECT_EQ(100, cb.GetResult(rv));
2111     EXPECT_EQ(500, start);
2112   } else {
2113     rv = entry->GetAvailableRange(0, 2048, &start, cb.callback());
2114     EXPECT_EQ(1024, cb.GetResult(rv));
2115     EXPECT_EQ(1024, start);
2116   }
2117   rv = entry->GetAvailableRange(kSize, kSize, &start, cb.callback());
2118   EXPECT_EQ(500, cb.GetResult(rv));
2119   EXPECT_EQ(kSize, start);
2120   rv = entry->GetAvailableRange(20 * 1024, 10000, &start, cb.callback());
2121   EXPECT_EQ(3616, cb.GetResult(rv));
2122   EXPECT_EQ(20 * 1024, start);
2123 
2124   // 1. Query before a filled 1KB block.
2125   // 2. Query within a filled 1KB block.
2126   // 3. Query beyond a filled 1KB block.
2127   if (memory_only_ || simple_cache_mode_) {
2128     rv = entry->GetAvailableRange(19400, kSize, &start, cb.callback());
2129     EXPECT_EQ(3496, cb.GetResult(rv));
2130     EXPECT_EQ(20000, start);
2131   } else {
2132     rv = entry->GetAvailableRange(19400, kSize, &start, cb.callback());
2133     EXPECT_EQ(3016, cb.GetResult(rv));
2134     EXPECT_EQ(20480, start);
2135   }
2136   rv = entry->GetAvailableRange(3073, kSize, &start, cb.callback());
2137   EXPECT_EQ(1523, cb.GetResult(rv));
2138   EXPECT_EQ(3073, start);
2139   rv = entry->GetAvailableRange(4600, kSize, &start, cb.callback());
2140   EXPECT_EQ(0, cb.GetResult(rv));
2141   EXPECT_EQ(4600, start);
2142 
2143   // Now make another write and verify that there is no hole in between.
2144   EXPECT_EQ(kSize, WriteSparseData(entry, 500 + kSize, buf1.get(), kSize));
2145   rv = entry->GetAvailableRange(1024, 10000, &start, cb.callback());
2146   EXPECT_EQ(7 * 1024 + 500, cb.GetResult(rv));
2147   EXPECT_EQ(1024, start);
2148   EXPECT_EQ(kSize, ReadSparseData(entry, kSize, buf2.get(), kSize));
2149   EXPECT_EQ(0, memcmp(buf2->data(), buf1->data() + kSize - 500, 500));
2150   EXPECT_EQ(0, memcmp(buf2->data() + 500, buf1->data(), kSize - 500));
2151 
2152   entry->Close();
2153 }
2154 
TEST_F(DiskCacheEntryTest,PartialSparseEntry)2155 TEST_F(DiskCacheEntryTest, PartialSparseEntry) {
2156   InitCache();
2157   PartialSparseEntry();
2158 }
2159 
TEST_F(DiskCacheEntryTest,MemoryPartialSparseEntry)2160 TEST_F(DiskCacheEntryTest, MemoryPartialSparseEntry) {
2161   SetMemoryOnlyMode();
2162   InitCache();
2163   PartialSparseEntry();
2164 }
2165 
2166 // Tests that corrupt sparse children are removed automatically.
TEST_F(DiskCacheEntryTest,CleanupSparseEntry)2167 TEST_F(DiskCacheEntryTest, CleanupSparseEntry) {
2168   InitCache();
2169   std::string key("the first key");
2170   disk_cache::Entry* entry;
2171   ASSERT_EQ(net::OK, CreateEntry(key, &entry));
2172 
2173   const int kSize = 4 * 1024;
2174   scoped_refptr<net::IOBuffer> buf1(new net::IOBuffer(kSize));
2175   CacheTestFillBuffer(buf1->data(), kSize, false);
2176 
2177   const int k1Meg = 1024 * 1024;
2178   EXPECT_EQ(kSize, WriteSparseData(entry, 8192, buf1.get(), kSize));
2179   EXPECT_EQ(kSize, WriteSparseData(entry, k1Meg + 8192, buf1.get(), kSize));
2180   EXPECT_EQ(kSize, WriteSparseData(entry, 2 * k1Meg + 8192, buf1.get(), kSize));
2181   entry->Close();
2182   EXPECT_EQ(4, cache_->GetEntryCount());
2183 
2184   void* iter = NULL;
2185   int count = 0;
2186   std::string child_key[2];
2187   while (OpenNextEntry(&iter, &entry) == net::OK) {
2188     ASSERT_TRUE(entry != NULL);
2189     // Writing to an entry will alter the LRU list and invalidate the iterator.
2190     if (entry->GetKey() != key && count < 2)
2191       child_key[count++] = entry->GetKey();
2192     entry->Close();
2193   }
2194   for (int i = 0; i < 2; i++) {
2195     ASSERT_EQ(net::OK, OpenEntry(child_key[i], &entry));
2196     // Overwrite the header's magic and signature.
2197     EXPECT_EQ(12, WriteData(entry, 2, 0, buf1.get(), 12, false));
2198     entry->Close();
2199   }
2200 
2201   EXPECT_EQ(4, cache_->GetEntryCount());
2202   ASSERT_EQ(net::OK, OpenEntry(key, &entry));
2203 
2204   // Two children should be gone. One while reading and one while writing.
2205   EXPECT_EQ(0, ReadSparseData(entry, 2 * k1Meg + 8192, buf1.get(), kSize));
2206   EXPECT_EQ(kSize, WriteSparseData(entry, k1Meg + 16384, buf1.get(), kSize));
2207   EXPECT_EQ(0, ReadSparseData(entry, k1Meg + 8192, buf1.get(), kSize));
2208 
2209   // We never touched this one.
2210   EXPECT_EQ(kSize, ReadSparseData(entry, 8192, buf1.get(), kSize));
2211   entry->Close();
2212 
2213   // We re-created one of the corrupt children.
2214   EXPECT_EQ(3, cache_->GetEntryCount());
2215 }
2216 
TEST_F(DiskCacheEntryTest,CancelSparseIO)2217 TEST_F(DiskCacheEntryTest, CancelSparseIO) {
2218   UseCurrentThread();
2219   InitCache();
2220   std::string key("the first key");
2221   disk_cache::Entry* entry;
2222   ASSERT_EQ(net::OK, CreateEntry(key, &entry));
2223 
2224   const int kSize = 40 * 1024;
2225   scoped_refptr<net::IOBuffer> buf(new net::IOBuffer(kSize));
2226   CacheTestFillBuffer(buf->data(), kSize, false);
2227 
2228   // This will open and write two "real" entries.
2229   net::TestCompletionCallback cb1, cb2, cb3, cb4, cb5;
2230   int rv = entry->WriteSparseData(
2231       1024 * 1024 - 4096, buf.get(), kSize, cb1.callback());
2232   EXPECT_EQ(net::ERR_IO_PENDING, rv);
2233 
2234   int64 offset = 0;
2235   rv = entry->GetAvailableRange(offset, kSize, &offset, cb5.callback());
2236   rv = cb5.GetResult(rv);
2237   if (!cb1.have_result()) {
2238     // We may or may not have finished writing to the entry. If we have not,
2239     // we cannot start another operation at this time.
2240     EXPECT_EQ(net::ERR_CACHE_OPERATION_NOT_SUPPORTED, rv);
2241   }
2242 
2243   // We cancel the pending operation, and register multiple notifications.
2244   entry->CancelSparseIO();
2245   EXPECT_EQ(net::ERR_IO_PENDING, entry->ReadyForSparseIO(cb2.callback()));
2246   EXPECT_EQ(net::ERR_IO_PENDING, entry->ReadyForSparseIO(cb3.callback()));
2247   entry->CancelSparseIO();  // Should be a no op at this point.
2248   EXPECT_EQ(net::ERR_IO_PENDING, entry->ReadyForSparseIO(cb4.callback()));
2249 
2250   if (!cb1.have_result()) {
2251     EXPECT_EQ(net::ERR_CACHE_OPERATION_NOT_SUPPORTED,
2252               entry->ReadSparseData(
2253                   offset, buf.get(), kSize, net::CompletionCallback()));
2254     EXPECT_EQ(net::ERR_CACHE_OPERATION_NOT_SUPPORTED,
2255               entry->WriteSparseData(
2256                   offset, buf.get(), kSize, net::CompletionCallback()));
2257   }
2258 
2259   // Now see if we receive all notifications. Note that we should not be able
2260   // to write everything (unless the timing of the system is really weird).
2261   rv = cb1.WaitForResult();
2262   EXPECT_TRUE(rv == 4096 || rv == kSize);
2263   EXPECT_EQ(net::OK, cb2.WaitForResult());
2264   EXPECT_EQ(net::OK, cb3.WaitForResult());
2265   EXPECT_EQ(net::OK, cb4.WaitForResult());
2266 
2267   rv = entry->GetAvailableRange(offset, kSize, &offset, cb5.callback());
2268   EXPECT_EQ(0, cb5.GetResult(rv));
2269   entry->Close();
2270 }
2271 
2272 // Tests that we perform sanity checks on an entry's key. Note that there are
2273 // other tests that exercise sanity checks by using saved corrupt files.
TEST_F(DiskCacheEntryTest,KeySanityCheck)2274 TEST_F(DiskCacheEntryTest, KeySanityCheck) {
2275   UseCurrentThread();
2276   InitCache();
2277   std::string key("the first key");
2278   disk_cache::Entry* entry;
2279   ASSERT_EQ(net::OK, CreateEntry(key, &entry));
2280 
2281   disk_cache::EntryImpl* entry_impl =
2282       static_cast<disk_cache::EntryImpl*>(entry);
2283   disk_cache::EntryStore* store = entry_impl->entry()->Data();
2284 
2285   // We have reserved space for a short key (one block), let's say that the key
2286   // takes more than one block, and remove the NULLs after the actual key.
2287   store->key_len = 800;
2288   memset(store->key + key.size(), 'k', sizeof(store->key) - key.size());
2289   entry_impl->entry()->set_modified();
2290   entry->Close();
2291 
2292   // We have a corrupt entry. Now reload it. We should NOT read beyond the
2293   // allocated buffer here.
2294   ASSERT_NE(net::OK, OpenEntry(key, &entry));
2295   DisableIntegrityCheck();
2296 }
2297 
2298 // The Simple Cache backend requires a few guarantees from the filesystem like
2299 // atomic renaming of recently open files. Those guarantees are not provided in
2300 // general on Windows.
2301 #if defined(OS_POSIX)
2302 
TEST_F(DiskCacheEntryTest,SimpleCacheInternalAsyncIO)2303 TEST_F(DiskCacheEntryTest, SimpleCacheInternalAsyncIO) {
2304   SetSimpleCacheMode();
2305   InitCache();
2306   InternalAsyncIO();
2307 }
2308 
TEST_F(DiskCacheEntryTest,SimpleCacheExternalAsyncIO)2309 TEST_F(DiskCacheEntryTest, SimpleCacheExternalAsyncIO) {
2310   SetSimpleCacheMode();
2311   InitCache();
2312   ExternalAsyncIO();
2313 }
2314 
TEST_F(DiskCacheEntryTest,SimpleCacheReleaseBuffer)2315 TEST_F(DiskCacheEntryTest, SimpleCacheReleaseBuffer) {
2316   SetSimpleCacheMode();
2317   InitCache();
2318   ReleaseBuffer();
2319 }
2320 
TEST_F(DiskCacheEntryTest,SimpleCacheStreamAccess)2321 TEST_F(DiskCacheEntryTest, SimpleCacheStreamAccess) {
2322   SetSimpleCacheMode();
2323   InitCache();
2324   StreamAccess();
2325 }
2326 
TEST_F(DiskCacheEntryTest,SimpleCacheGetKey)2327 TEST_F(DiskCacheEntryTest, SimpleCacheGetKey) {
2328   SetSimpleCacheMode();
2329   InitCache();
2330   GetKey();
2331 }
2332 
TEST_F(DiskCacheEntryTest,SimpleCacheGetTimes)2333 TEST_F(DiskCacheEntryTest, SimpleCacheGetTimes) {
2334   SetSimpleCacheMode();
2335   InitCache();
2336   GetTimes();
2337 }
2338 
TEST_F(DiskCacheEntryTest,SimpleCacheGrowData)2339 TEST_F(DiskCacheEntryTest, SimpleCacheGrowData) {
2340   SetSimpleCacheMode();
2341   InitCache();
2342   GrowData();
2343 }
2344 
TEST_F(DiskCacheEntryTest,SimpleCacheTruncateData)2345 TEST_F(DiskCacheEntryTest, SimpleCacheTruncateData) {
2346   SetSimpleCacheMode();
2347   InitCache();
2348   TruncateData();
2349 }
2350 
TEST_F(DiskCacheEntryTest,SimpleCacheZeroLengthIO)2351 TEST_F(DiskCacheEntryTest, SimpleCacheZeroLengthIO) {
2352   SetSimpleCacheMode();
2353   InitCache();
2354   ZeroLengthIO();
2355 }
2356 
TEST_F(DiskCacheEntryTest,SimpleCacheSizeAtCreate)2357 TEST_F(DiskCacheEntryTest, SimpleCacheSizeAtCreate) {
2358   SetSimpleCacheMode();
2359   InitCache();
2360   SizeAtCreate();
2361 }
2362 
TEST_F(DiskCacheEntryTest,SimpleCacheReuseExternalEntry)2363 TEST_F(DiskCacheEntryTest, SimpleCacheReuseExternalEntry) {
2364   SetSimpleCacheMode();
2365   SetMaxSize(200 * 1024);
2366   InitCache();
2367   ReuseEntry(20 * 1024);
2368 }
2369 
TEST_F(DiskCacheEntryTest,SimpleCacheReuseInternalEntry)2370 TEST_F(DiskCacheEntryTest, SimpleCacheReuseInternalEntry) {
2371   SetSimpleCacheMode();
2372   SetMaxSize(100 * 1024);
2373   InitCache();
2374   ReuseEntry(10 * 1024);
2375 }
2376 
TEST_F(DiskCacheEntryTest,SimpleCacheSizeChanges)2377 TEST_F(DiskCacheEntryTest, SimpleCacheSizeChanges) {
2378   SetSimpleCacheMode();
2379   InitCache();
2380   SizeChanges();
2381 }
2382 
TEST_F(DiskCacheEntryTest,SimpleCacheInvalidData)2383 TEST_F(DiskCacheEntryTest, SimpleCacheInvalidData) {
2384   SetSimpleCacheMode();
2385   InitCache();
2386   InvalidData();
2387 }
2388 
TEST_F(DiskCacheEntryTest,SimpleCacheReadWriteDestroyBuffer)2389 TEST_F(DiskCacheEntryTest, SimpleCacheReadWriteDestroyBuffer) {
2390   SetSimpleCacheMode();
2391   InitCache();
2392   ReadWriteDestroyBuffer();
2393 }
2394 
TEST_F(DiskCacheEntryTest,SimpleCacheDoomEntry)2395 TEST_F(DiskCacheEntryTest, SimpleCacheDoomEntry) {
2396   SetSimpleCacheMode();
2397   InitCache();
2398   DoomNormalEntry();
2399 }
2400 
TEST_F(DiskCacheEntryTest,SimpleCacheDoomEntryNextToOpenEntry)2401 TEST_F(DiskCacheEntryTest, SimpleCacheDoomEntryNextToOpenEntry) {
2402   SetSimpleCacheMode();
2403   InitCache();
2404   DoomEntryNextToOpenEntry();
2405 }
2406 
TEST_F(DiskCacheEntryTest,SimpleCacheDoomedEntry)2407 TEST_F(DiskCacheEntryTest, SimpleCacheDoomedEntry) {
2408   SetSimpleCacheMode();
2409   InitCache();
2410   DoomedEntry();
2411 }
2412 
2413 // Creates an entry with corrupted last byte in stream 0.
2414 // Requires SimpleCacheMode.
SimpleCacheMakeBadChecksumEntry(const std::string & key,int * data_size)2415 bool DiskCacheEntryTest::SimpleCacheMakeBadChecksumEntry(const std::string& key,
2416                                                          int* data_size) {
2417   disk_cache::Entry* entry = NULL;
2418 
2419   if (CreateEntry(key, &entry) != net::OK || !entry) {
2420     LOG(ERROR) << "Could not create entry";
2421     return false;
2422   }
2423 
2424   const char data[] = "this is very good data";
2425   const int kDataSize = arraysize(data);
2426   scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kDataSize));
2427   base::strlcpy(buffer->data(), data, kDataSize);
2428 
2429   EXPECT_EQ(kDataSize, WriteData(entry, 1, 0, buffer.get(), kDataSize, false));
2430   entry->Close();
2431   entry = NULL;
2432 
2433   // Corrupt the last byte of the data.
2434   base::FilePath entry_file0_path = cache_path_.AppendASCII(
2435       disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, 0));
2436   int flags = base::PLATFORM_FILE_WRITE | base::PLATFORM_FILE_OPEN;
2437   base::PlatformFile entry_file0 =
2438       base::CreatePlatformFile(entry_file0_path, flags, NULL, NULL);
2439   if (entry_file0 == base::kInvalidPlatformFileValue)
2440     return false;
2441 
2442   int64 file_offset =
2443       sizeof(disk_cache::SimpleFileHeader) + key.size() + kDataSize - 2;
2444   EXPECT_EQ(1, base::WritePlatformFile(entry_file0, file_offset, "X", 1));
2445   if (!base::ClosePlatformFile(entry_file0))
2446     return false;
2447   *data_size = kDataSize;
2448   return true;
2449 }
2450 
2451 // Tests that the simple cache can detect entries that have bad data.
TEST_F(DiskCacheEntryTest,SimpleCacheBadChecksum)2452 TEST_F(DiskCacheEntryTest, SimpleCacheBadChecksum) {
2453   SetSimpleCacheMode();
2454   InitCache();
2455 
2456   const char key[] = "the first key";
2457   int size_unused;
2458   ASSERT_TRUE(SimpleCacheMakeBadChecksumEntry(key, &size_unused));
2459 
2460   disk_cache::Entry* entry = NULL;
2461 
2462   // Open the entry.
2463   ASSERT_EQ(net::OK, OpenEntry(key, &entry));
2464   ScopedEntryPtr entry_closer(entry);
2465 
2466   const int kReadBufferSize = 200;
2467   EXPECT_GE(kReadBufferSize, entry->GetDataSize(1));
2468   scoped_refptr<net::IOBuffer> read_buffer(new net::IOBuffer(kReadBufferSize));
2469   EXPECT_EQ(net::ERR_CACHE_CHECKSUM_MISMATCH,
2470             ReadData(entry, 1, 0, read_buffer.get(), kReadBufferSize));
2471 }
2472 
2473 // Tests that an entry that has had an IO error occur can still be Doomed().
TEST_F(DiskCacheEntryTest,SimpleCacheErrorThenDoom)2474 TEST_F(DiskCacheEntryTest, SimpleCacheErrorThenDoom) {
2475   SetSimpleCacheMode();
2476   InitCache();
2477 
2478   const char key[] = "the first key";
2479   int size_unused;
2480   ASSERT_TRUE(SimpleCacheMakeBadChecksumEntry(key, &size_unused));
2481 
2482   disk_cache::Entry* entry = NULL;
2483 
2484   // Open the entry, forcing an IO error.
2485   ASSERT_EQ(net::OK, OpenEntry(key, &entry));
2486   ScopedEntryPtr entry_closer(entry);
2487 
2488   const int kReadBufferSize = 200;
2489   EXPECT_GE(kReadBufferSize, entry->GetDataSize(1));
2490   scoped_refptr<net::IOBuffer> read_buffer(new net::IOBuffer(kReadBufferSize));
2491   EXPECT_EQ(net::ERR_CACHE_CHECKSUM_MISMATCH,
2492             ReadData(entry, 1, 0, read_buffer.get(), kReadBufferSize));
2493 
2494   entry->Doom();  // Should not crash.
2495 }
2496 
TruncatePath(const base::FilePath & file_path,int64 length)2497 bool TruncatePath(const base::FilePath& file_path, int64 length)  {
2498   const int flags = base::PLATFORM_FILE_WRITE | base::PLATFORM_FILE_OPEN;
2499   base::PlatformFile file =
2500       base::CreatePlatformFile(file_path, flags, NULL, NULL);
2501   if (base::kInvalidPlatformFileValue == file)
2502     return false;
2503   const bool result = base::TruncatePlatformFile(file, length);
2504   base::ClosePlatformFile(file);
2505   return result;
2506 }
2507 
TEST_F(DiskCacheEntryTest,SimpleCacheNoEOF)2508 TEST_F(DiskCacheEntryTest, SimpleCacheNoEOF) {
2509   SetSimpleCacheMode();
2510   InitCache();
2511 
2512   const char key[] = "the first key";
2513 
2514   disk_cache::Entry* entry = NULL;
2515   ASSERT_EQ(net::OK, CreateEntry(key, &entry));
2516   disk_cache::Entry* null = NULL;
2517   EXPECT_NE(null, entry);
2518   entry->Close();
2519   entry = NULL;
2520 
2521   // Force the entry to flush to disk, so subsequent platform file operations
2522   // succed.
2523   ASSERT_EQ(net::OK, OpenEntry(key, &entry));
2524   entry->Close();
2525   entry = NULL;
2526 
2527   // Truncate the file such that the length isn't sufficient to have an EOF
2528   // record.
2529   int kTruncationBytes = -implicit_cast<int>(sizeof(disk_cache::SimpleFileEOF));
2530   const base::FilePath entry_path = cache_path_.AppendASCII(
2531       disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, 0));
2532   const int64 invalid_size =
2533       disk_cache::simple_util::GetFileSizeFromKeyAndDataSize(key,
2534                                                              kTruncationBytes);
2535   EXPECT_TRUE(TruncatePath(entry_path, invalid_size));
2536   EXPECT_EQ(net::ERR_FAILED, OpenEntry(key, &entry));
2537   DisableIntegrityCheck();
2538 }
2539 
TEST_F(DiskCacheEntryTest,SimpleCacheNonOptimisticOperationsBasic)2540 TEST_F(DiskCacheEntryTest, SimpleCacheNonOptimisticOperationsBasic) {
2541   // Test sequence:
2542   // Create, Write, Read, Close.
2543   SetCacheType(net::APP_CACHE);  // APP_CACHE doesn't use optimistic operations.
2544   SetSimpleCacheMode();
2545   InitCache();
2546   disk_cache::Entry* const null_entry = NULL;
2547 
2548   disk_cache::Entry* entry = NULL;
2549   EXPECT_EQ(net::OK, CreateEntry("my key", &entry));
2550   ASSERT_NE(null_entry, entry);
2551   ScopedEntryPtr entry_closer(entry);
2552 
2553   const int kBufferSize = 10;
2554   scoped_refptr<net::IOBufferWithSize> write_buffer(
2555       new net::IOBufferWithSize(kBufferSize));
2556   CacheTestFillBuffer(write_buffer->data(), write_buffer->size(), false);
2557   EXPECT_EQ(
2558       write_buffer->size(),
2559       WriteData(entry, 1, 0, write_buffer.get(), write_buffer->size(), false));
2560 
2561   scoped_refptr<net::IOBufferWithSize> read_buffer(
2562       new net::IOBufferWithSize(kBufferSize));
2563   EXPECT_EQ(read_buffer->size(),
2564             ReadData(entry, 1, 0, read_buffer.get(), read_buffer->size()));
2565 }
2566 
TEST_F(DiskCacheEntryTest,SimpleCacheNonOptimisticOperationsDontBlock)2567 TEST_F(DiskCacheEntryTest, SimpleCacheNonOptimisticOperationsDontBlock) {
2568   // Test sequence:
2569   // Create, Write, Close.
2570   SetCacheType(net::APP_CACHE);  // APP_CACHE doesn't use optimistic operations.
2571   SetSimpleCacheMode();
2572   InitCache();
2573   disk_cache::Entry* const null_entry = NULL;
2574 
2575   MessageLoopHelper helper;
2576   CallbackTest create_callback(&helper, false);
2577 
2578   int expected_callback_runs = 0;
2579   const int kBufferSize = 10;
2580   scoped_refptr<net::IOBufferWithSize> write_buffer(
2581       new net::IOBufferWithSize(kBufferSize));
2582 
2583   disk_cache::Entry* entry = NULL;
2584   EXPECT_EQ(net::OK, CreateEntry("my key", &entry));
2585   ASSERT_NE(null_entry, entry);
2586   ScopedEntryPtr entry_closer(entry);
2587 
2588   CacheTestFillBuffer(write_buffer->data(), write_buffer->size(), false);
2589   CallbackTest write_callback(&helper, false);
2590   int ret = entry->WriteData(
2591       1,
2592       0,
2593       write_buffer.get(),
2594       write_buffer->size(),
2595       base::Bind(&CallbackTest::Run, base::Unretained(&write_callback)),
2596       false);
2597   ASSERT_EQ(net::ERR_IO_PENDING, ret);
2598   helper.WaitUntilCacheIoFinished(++expected_callback_runs);
2599 }
2600 
TEST_F(DiskCacheEntryTest,SimpleCacheNonOptimisticOperationsBasicsWithoutWaiting)2601 TEST_F(DiskCacheEntryTest,
2602        SimpleCacheNonOptimisticOperationsBasicsWithoutWaiting) {
2603   // Test sequence:
2604   // Create, Write, Read, Close.
2605   SetCacheType(net::APP_CACHE);  // APP_CACHE doesn't use optimistic operations.
2606   SetSimpleCacheMode();
2607   InitCache();
2608   disk_cache::Entry* const null_entry = NULL;
2609   MessageLoopHelper helper;
2610 
2611   disk_cache::Entry* entry = NULL;
2612   // Note that |entry| is only set once CreateEntry() completed which is why we
2613   // have to wait (i.e. use the helper CreateEntry() function).
2614   EXPECT_EQ(net::OK, CreateEntry("my key", &entry));
2615   ASSERT_NE(null_entry, entry);
2616   ScopedEntryPtr entry_closer(entry);
2617 
2618   const int kBufferSize = 10;
2619   scoped_refptr<net::IOBufferWithSize> write_buffer(
2620       new net::IOBufferWithSize(kBufferSize));
2621   CacheTestFillBuffer(write_buffer->data(), write_buffer->size(), false);
2622   CallbackTest write_callback(&helper, false);
2623   int ret = entry->WriteData(
2624       1,
2625       0,
2626       write_buffer.get(),
2627       write_buffer->size(),
2628       base::Bind(&CallbackTest::Run, base::Unretained(&write_callback)),
2629       false);
2630   EXPECT_EQ(net::ERR_IO_PENDING, ret);
2631   int expected_callback_runs = 1;
2632 
2633   scoped_refptr<net::IOBufferWithSize> read_buffer(
2634       new net::IOBufferWithSize(kBufferSize));
2635   CallbackTest read_callback(&helper, false);
2636   ret = entry->ReadData(
2637       1,
2638       0,
2639       read_buffer.get(),
2640       read_buffer->size(),
2641       base::Bind(&CallbackTest::Run, base::Unretained(&read_callback)));
2642   EXPECT_EQ(net::ERR_IO_PENDING, ret);
2643   ++expected_callback_runs;
2644 
2645   helper.WaitUntilCacheIoFinished(expected_callback_runs);
2646   ASSERT_EQ(read_buffer->size(), write_buffer->size());
2647   EXPECT_EQ(
2648       0,
2649       memcmp(read_buffer->data(), write_buffer->data(), read_buffer->size()));
2650 }
2651 
TEST_F(DiskCacheEntryTest,SimpleCacheOptimistic)2652 TEST_F(DiskCacheEntryTest, SimpleCacheOptimistic) {
2653   // Test sequence:
2654   // Create, Write, Read, Write, Read, Close.
2655   SetSimpleCacheMode();
2656   InitCache();
2657   disk_cache::Entry* null = NULL;
2658   const char key[] = "the first key";
2659 
2660   MessageLoopHelper helper;
2661   CallbackTest callback1(&helper, false);
2662   CallbackTest callback2(&helper, false);
2663   CallbackTest callback3(&helper, false);
2664   CallbackTest callback4(&helper, false);
2665   CallbackTest callback5(&helper, false);
2666 
2667   int expected = 0;
2668   const int kSize1 = 10;
2669   const int kSize2 = 20;
2670   scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
2671   scoped_refptr<net::IOBuffer> buffer1_read(new net::IOBuffer(kSize1));
2672   scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize2));
2673   scoped_refptr<net::IOBuffer> buffer2_read(new net::IOBuffer(kSize2));
2674   CacheTestFillBuffer(buffer1->data(), kSize1, false);
2675   CacheTestFillBuffer(buffer2->data(), kSize2, false);
2676 
2677   disk_cache::Entry* entry = NULL;
2678   // Create is optimistic, must return OK.
2679   ASSERT_EQ(net::OK,
2680             cache_->CreateEntry(key, &entry,
2681                                 base::Bind(&CallbackTest::Run,
2682                                            base::Unretained(&callback1))));
2683   EXPECT_NE(null, entry);
2684   ScopedEntryPtr entry_closer(entry);
2685 
2686   // This write may or may not be optimistic (it depends if the previous
2687   // optimistic create already finished by the time we call the write here).
2688   int ret = entry->WriteData(
2689       1,
2690       0,
2691       buffer1.get(),
2692       kSize1,
2693       base::Bind(&CallbackTest::Run, base::Unretained(&callback2)),
2694       false);
2695   EXPECT_TRUE(kSize1 == ret || net::ERR_IO_PENDING == ret);
2696   if (net::ERR_IO_PENDING == ret)
2697     expected++;
2698 
2699   // This Read must not be optimistic, since we don't support that yet.
2700   EXPECT_EQ(net::ERR_IO_PENDING,
2701             entry->ReadData(
2702                 1,
2703                 0,
2704                 buffer1_read.get(),
2705                 kSize1,
2706                 base::Bind(&CallbackTest::Run, base::Unretained(&callback3))));
2707   expected++;
2708   EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
2709   EXPECT_EQ(0, memcmp(buffer1->data(), buffer1_read->data(), kSize1));
2710 
2711   // At this point after waiting, the pending operations queue on the entry
2712   // should be empty, so the next Write operation must run as optimistic.
2713   EXPECT_EQ(kSize2,
2714             entry->WriteData(
2715                 1,
2716                 0,
2717                 buffer2.get(),
2718                 kSize2,
2719                 base::Bind(&CallbackTest::Run, base::Unretained(&callback4)),
2720                 false));
2721 
2722   // Lets do another read so we block until both the write and the read
2723   // operation finishes and we can then test for HasOneRef() below.
2724   EXPECT_EQ(net::ERR_IO_PENDING,
2725             entry->ReadData(
2726                 1,
2727                 0,
2728                 buffer2_read.get(),
2729                 kSize2,
2730                 base::Bind(&CallbackTest::Run, base::Unretained(&callback5))));
2731   expected++;
2732 
2733   EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
2734   EXPECT_EQ(0, memcmp(buffer2->data(), buffer2_read->data(), kSize2));
2735 
2736   // Check that we are not leaking.
2737   EXPECT_NE(entry, null);
2738   EXPECT_TRUE(
2739       static_cast<disk_cache::SimpleEntryImpl*>(entry)->HasOneRef());
2740 }
2741 
TEST_F(DiskCacheEntryTest,SimpleCacheOptimistic2)2742 TEST_F(DiskCacheEntryTest, SimpleCacheOptimistic2) {
2743   // Test sequence:
2744   // Create, Open, Close, Close.
2745   SetSimpleCacheMode();
2746   InitCache();
2747   disk_cache::Entry* null = NULL;
2748   const char key[] = "the first key";
2749 
2750   MessageLoopHelper helper;
2751   CallbackTest callback1(&helper, false);
2752   CallbackTest callback2(&helper, false);
2753 
2754   disk_cache::Entry* entry = NULL;
2755   ASSERT_EQ(net::OK,
2756             cache_->CreateEntry(key, &entry,
2757                                 base::Bind(&CallbackTest::Run,
2758                                            base::Unretained(&callback1))));
2759   EXPECT_NE(null, entry);
2760   ScopedEntryPtr entry_closer(entry);
2761 
2762   disk_cache::Entry* entry2 = NULL;
2763   ASSERT_EQ(net::ERR_IO_PENDING,
2764             cache_->OpenEntry(key, &entry2,
2765                               base::Bind(&CallbackTest::Run,
2766                                          base::Unretained(&callback2))));
2767   ASSERT_TRUE(helper.WaitUntilCacheIoFinished(1));
2768 
2769   EXPECT_NE(null, entry2);
2770   EXPECT_EQ(entry, entry2);
2771 
2772   // We have to call close twice, since we called create and open above.
2773   entry->Close();
2774 
2775   // Check that we are not leaking.
2776   EXPECT_TRUE(
2777       static_cast<disk_cache::SimpleEntryImpl*>(entry)->HasOneRef());
2778 }
2779 
TEST_F(DiskCacheEntryTest,SimpleCacheOptimistic3)2780 TEST_F(DiskCacheEntryTest, SimpleCacheOptimistic3) {
2781   // Test sequence:
2782   // Create, Close, Open, Close.
2783   SetSimpleCacheMode();
2784   InitCache();
2785   disk_cache::Entry* null = NULL;
2786   const char key[] = "the first key";
2787 
2788   disk_cache::Entry* entry = NULL;
2789   ASSERT_EQ(net::OK,
2790             cache_->CreateEntry(key, &entry, net::CompletionCallback()));
2791   EXPECT_NE(null, entry);
2792   entry->Close();
2793 
2794   net::TestCompletionCallback cb;
2795   disk_cache::Entry* entry2 = NULL;
2796   ASSERT_EQ(net::ERR_IO_PENDING,
2797             cache_->OpenEntry(key, &entry2, cb.callback()));
2798   ASSERT_EQ(net::OK, cb.GetResult(net::ERR_IO_PENDING));
2799   ScopedEntryPtr entry_closer(entry2);
2800 
2801   EXPECT_NE(null, entry2);
2802   EXPECT_EQ(entry, entry2);
2803 
2804   // Check that we are not leaking.
2805   EXPECT_TRUE(
2806       static_cast<disk_cache::SimpleEntryImpl*>(entry2)->HasOneRef());
2807 }
2808 
TEST_F(DiskCacheEntryTest,SimpleCacheOptimistic4)2809 TEST_F(DiskCacheEntryTest, SimpleCacheOptimistic4) {
2810   // Test sequence:
2811   // Create, Close, Write, Open, Open, Close, Write, Read, Close.
2812   SetSimpleCacheMode();
2813   InitCache();
2814   disk_cache::Entry* null = NULL;
2815   const char key[] = "the first key";
2816 
2817   net::TestCompletionCallback cb;
2818   const int kSize1 = 10;
2819   scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
2820   CacheTestFillBuffer(buffer1->data(), kSize1, false);
2821   disk_cache::Entry* entry = NULL;
2822 
2823   ASSERT_EQ(net::OK,
2824             cache_->CreateEntry(key, &entry, net::CompletionCallback()));
2825   EXPECT_NE(null, entry);
2826   entry->Close();
2827 
2828   // Lets do a Write so we block until both the Close and the Write
2829   // operation finishes. Write must fail since we are writing in a closed entry.
2830   EXPECT_EQ(
2831       net::ERR_IO_PENDING,
2832       entry->WriteData(1, 0, buffer1.get(), kSize1, cb.callback(), false));
2833   EXPECT_EQ(net::ERR_FAILED, cb.GetResult(net::ERR_IO_PENDING));
2834 
2835   // Finish running the pending tasks so that we fully complete the close
2836   // operation and destroy the entry object.
2837   base::MessageLoop::current()->RunUntilIdle();
2838 
2839   // At this point the |entry| must have been destroyed, and called
2840   // RemoveSelfFromBackend().
2841   disk_cache::Entry* entry2 = NULL;
2842   ASSERT_EQ(net::ERR_IO_PENDING,
2843             cache_->OpenEntry(key, &entry2, cb.callback()));
2844   ASSERT_EQ(net::OK, cb.GetResult(net::ERR_IO_PENDING));
2845   EXPECT_NE(null, entry2);
2846 
2847   disk_cache::Entry* entry3 = NULL;
2848   ASSERT_EQ(net::ERR_IO_PENDING,
2849             cache_->OpenEntry(key, &entry3, cb.callback()));
2850   ASSERT_EQ(net::OK, cb.GetResult(net::ERR_IO_PENDING));
2851   EXPECT_NE(null, entry3);
2852   EXPECT_EQ(entry2, entry3);
2853   entry3->Close();
2854 
2855   // The previous Close doesn't actually closes the entry since we opened it
2856   // twice, so the next Write operation must succeed and it must be able to
2857   // perform it optimistically, since there is no operation running on this
2858   // entry.
2859   EXPECT_EQ(kSize1,
2860             entry2->WriteData(
2861                 1, 0, buffer1.get(), kSize1, net::CompletionCallback(), false));
2862 
2863   // Lets do another read so we block until both the write and the read
2864   // operation finishes and we can then test for HasOneRef() below.
2865   EXPECT_EQ(net::ERR_IO_PENDING,
2866             entry2->ReadData(1, 0, buffer1.get(), kSize1, cb.callback()));
2867   EXPECT_EQ(kSize1, cb.GetResult(net::ERR_IO_PENDING));
2868 
2869   // Check that we are not leaking.
2870   EXPECT_TRUE(
2871       static_cast<disk_cache::SimpleEntryImpl*>(entry2)->HasOneRef());
2872   entry2->Close();
2873 }
2874 
TEST_F(DiskCacheEntryTest,SimpleCacheOptimistic5)2875 TEST_F(DiskCacheEntryTest, SimpleCacheOptimistic5) {
2876   // Test sequence:
2877   // Create, Doom, Write, Read, Close.
2878   SetSimpleCacheMode();
2879   InitCache();
2880   disk_cache::Entry* null = NULL;
2881   const char key[] = "the first key";
2882 
2883   net::TestCompletionCallback cb;
2884   const int kSize1 = 10;
2885   scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
2886   CacheTestFillBuffer(buffer1->data(), kSize1, false);
2887   disk_cache::Entry* entry = NULL;
2888 
2889   ASSERT_EQ(net::OK,
2890             cache_->CreateEntry(key, &entry, net::CompletionCallback()));
2891   EXPECT_NE(null, entry);
2892   ScopedEntryPtr entry_closer(entry);
2893   entry->Doom();
2894 
2895   EXPECT_EQ(
2896       net::ERR_IO_PENDING,
2897       entry->WriteData(1, 0, buffer1.get(), kSize1, cb.callback(), false));
2898   EXPECT_EQ(kSize1, cb.GetResult(net::ERR_IO_PENDING));
2899 
2900   EXPECT_EQ(net::ERR_IO_PENDING,
2901             entry->ReadData(1, 0, buffer1.get(), kSize1, cb.callback()));
2902   EXPECT_EQ(kSize1, cb.GetResult(net::ERR_IO_PENDING));
2903 
2904   // Check that we are not leaking.
2905   EXPECT_TRUE(
2906       static_cast<disk_cache::SimpleEntryImpl*>(entry)->HasOneRef());
2907 }
2908 
TEST_F(DiskCacheEntryTest,SimpleCacheOptimistic6)2909 TEST_F(DiskCacheEntryTest, SimpleCacheOptimistic6) {
2910   // Test sequence:
2911   // Create, Write, Doom, Doom, Read, Doom, Close.
2912   SetSimpleCacheMode();
2913   InitCache();
2914   disk_cache::Entry* null = NULL;
2915   const char key[] = "the first key";
2916 
2917   net::TestCompletionCallback cb;
2918   const int kSize1 = 10;
2919   scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
2920   scoped_refptr<net::IOBuffer> buffer1_read(new net::IOBuffer(kSize1));
2921   CacheTestFillBuffer(buffer1->data(), kSize1, false);
2922   disk_cache::Entry* entry = NULL;
2923 
2924   ASSERT_EQ(net::OK,
2925             cache_->CreateEntry(key, &entry, net::CompletionCallback()));
2926   EXPECT_NE(null, entry);
2927   ScopedEntryPtr entry_closer(entry);
2928 
2929   EXPECT_EQ(
2930       net::ERR_IO_PENDING,
2931       entry->WriteData(1, 0, buffer1.get(), kSize1, cb.callback(), false));
2932   EXPECT_EQ(kSize1, cb.GetResult(net::ERR_IO_PENDING));
2933 
2934   entry->Doom();
2935   entry->Doom();
2936 
2937   // This Read must not be optimistic, since we don't support that yet.
2938   EXPECT_EQ(net::ERR_IO_PENDING,
2939             entry->ReadData(1, 0, buffer1_read.get(), kSize1, cb.callback()));
2940   EXPECT_EQ(kSize1, cb.GetResult(net::ERR_IO_PENDING));
2941   EXPECT_EQ(0, memcmp(buffer1->data(), buffer1_read->data(), kSize1));
2942 
2943   entry->Doom();
2944 }
2945 
2946 // Confirm that IO buffers are not referenced by the Simple Cache after a write
2947 // completes.
TEST_F(DiskCacheEntryTest,SimpleCacheOptimisticWriteReleases)2948 TEST_F(DiskCacheEntryTest, SimpleCacheOptimisticWriteReleases) {
2949   SetSimpleCacheMode();
2950   InitCache();
2951 
2952   const char key[] = "the first key";
2953   disk_cache::Entry* entry = NULL;
2954 
2955   // First, an optimistic create.
2956   ASSERT_EQ(net::OK,
2957             cache_->CreateEntry(key, &entry, net::CompletionCallback()));
2958   ASSERT_TRUE(entry);
2959   ScopedEntryPtr entry_closer(entry);
2960 
2961   const int kWriteSize = 512;
2962   scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kWriteSize));
2963   EXPECT_TRUE(buffer1->HasOneRef());
2964   CacheTestFillBuffer(buffer1->data(), kWriteSize, false);
2965 
2966   // An optimistic write happens only when there is an empty queue of pending
2967   // operations. To ensure the queue is empty, we issue a write and wait until
2968   // it completes.
2969   EXPECT_EQ(kWriteSize,
2970             WriteData(entry, 1, 0, buffer1.get(), kWriteSize, false));
2971   EXPECT_TRUE(buffer1->HasOneRef());
2972 
2973   // Finally, we should perform an optimistic write and confirm that all
2974   // references to the IO buffer have been released.
2975   EXPECT_EQ(
2976       kWriteSize,
2977       entry->WriteData(
2978           1, 0, buffer1.get(), kWriteSize, net::CompletionCallback(), false));
2979   EXPECT_TRUE(buffer1->HasOneRef());
2980 }
2981 
TEST_F(DiskCacheEntryTest,SimpleCacheCreateDoomRace)2982 TEST_F(DiskCacheEntryTest, SimpleCacheCreateDoomRace) {
2983   // Test sequence:
2984   // Create, Doom, Write, Close, Check files are not on disk anymore.
2985   SetSimpleCacheMode();
2986   InitCache();
2987   disk_cache::Entry* null = NULL;
2988   const char key[] = "the first key";
2989 
2990   net::TestCompletionCallback cb;
2991   const int kSize1 = 10;
2992   scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
2993   CacheTestFillBuffer(buffer1->data(), kSize1, false);
2994   disk_cache::Entry* entry = NULL;
2995 
2996   ASSERT_EQ(net::OK,
2997             cache_->CreateEntry(key, &entry, net::CompletionCallback()));
2998   EXPECT_NE(null, entry);
2999 
3000   EXPECT_EQ(net::ERR_IO_PENDING, cache_->DoomEntry(key, cb.callback()));
3001   EXPECT_EQ(net::OK, cb.GetResult(net::ERR_IO_PENDING));
3002 
3003   EXPECT_EQ(
3004       kSize1,
3005       entry->WriteData(0, 0, buffer1.get(), kSize1, cb.callback(), false));
3006 
3007   entry->Close();
3008 
3009   // Finish running the pending tasks so that we fully complete the close
3010   // operation and destroy the entry object.
3011   base::MessageLoop::current()->RunUntilIdle();
3012 
3013   for (int i = 0; i < disk_cache::kSimpleEntryFileCount; ++i) {
3014     base::FilePath entry_file_path = cache_path_.AppendASCII(
3015         disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, i));
3016     base::PlatformFileInfo info;
3017     EXPECT_FALSE(base::GetFileInfo(entry_file_path, &info));
3018   }
3019 }
3020 
TEST_F(DiskCacheEntryTest,SimpleCacheDoomCreateRace)3021 TEST_F(DiskCacheEntryTest, SimpleCacheDoomCreateRace) {
3022   // This test runs as APP_CACHE to make operations more synchronous. Test
3023   // sequence:
3024   // Create, Doom, Create.
3025   SetCacheType(net::APP_CACHE);
3026   SetSimpleCacheMode();
3027   InitCache();
3028   disk_cache::Entry* null = NULL;
3029   const char key[] = "the first key";
3030 
3031   net::TestCompletionCallback create_callback;
3032 
3033   disk_cache::Entry* entry1 = NULL;
3034   ASSERT_EQ(net::OK,
3035             create_callback.GetResult(
3036                 cache_->CreateEntry(key, &entry1, create_callback.callback())));
3037   ScopedEntryPtr entry1_closer(entry1);
3038   EXPECT_NE(null, entry1);
3039 
3040   net::TestCompletionCallback doom_callback;
3041   EXPECT_EQ(net::ERR_IO_PENDING,
3042             cache_->DoomEntry(key, doom_callback.callback()));
3043 
3044   disk_cache::Entry* entry2 = NULL;
3045   ASSERT_EQ(net::OK,
3046             create_callback.GetResult(
3047                 cache_->CreateEntry(key, &entry2, create_callback.callback())));
3048   ScopedEntryPtr entry2_closer(entry2);
3049   EXPECT_EQ(net::OK, doom_callback.GetResult(net::ERR_IO_PENDING));
3050 }
3051 
TEST_F(DiskCacheEntryTest,SimpleCacheDoomDoom)3052 TEST_F(DiskCacheEntryTest, SimpleCacheDoomDoom) {
3053   // Test sequence:
3054   // Create, Doom, Create, Doom (1st entry), Open.
3055   SetSimpleCacheMode();
3056   InitCache();
3057   disk_cache::Entry* null = NULL;
3058 
3059   const char key[] = "the first key";
3060 
3061   disk_cache::Entry* entry1 = NULL;
3062   ASSERT_EQ(net::OK, CreateEntry(key, &entry1));
3063   ScopedEntryPtr entry1_closer(entry1);
3064   EXPECT_NE(null, entry1);
3065 
3066   EXPECT_EQ(net::OK, DoomEntry(key));
3067 
3068   disk_cache::Entry* entry2 = NULL;
3069   ASSERT_EQ(net::OK, CreateEntry(key, &entry2));
3070   ScopedEntryPtr entry2_closer(entry2);
3071   EXPECT_NE(null, entry2);
3072 
3073   // Redundantly dooming entry1 should not delete entry2.
3074   disk_cache::SimpleEntryImpl* simple_entry1 =
3075       static_cast<disk_cache::SimpleEntryImpl*>(entry1);
3076   net::TestCompletionCallback cb;
3077   EXPECT_EQ(net::OK,
3078             cb.GetResult(simple_entry1->DoomEntry(cb.callback())));
3079 
3080   disk_cache::Entry* entry3 = NULL;
3081   ASSERT_EQ(net::OK, OpenEntry(key, &entry3));
3082   ScopedEntryPtr entry3_closer(entry3);
3083   EXPECT_NE(null, entry3);
3084 }
3085 
TEST_F(DiskCacheEntryTest,SimpleCacheDoomCreateDoom)3086 TEST_F(DiskCacheEntryTest, SimpleCacheDoomCreateDoom) {
3087   // Test sequence:
3088   // Create, Doom, Create, Doom.
3089   SetSimpleCacheMode();
3090   InitCache();
3091 
3092   disk_cache::Entry* null = NULL;
3093 
3094   const char key[] = "the first key";
3095 
3096   disk_cache::Entry* entry1 = NULL;
3097   ASSERT_EQ(net::OK, CreateEntry(key, &entry1));
3098   ScopedEntryPtr entry1_closer(entry1);
3099   EXPECT_NE(null, entry1);
3100 
3101   entry1->Doom();
3102 
3103   disk_cache::Entry* entry2 = NULL;
3104   ASSERT_EQ(net::OK, CreateEntry(key, &entry2));
3105   ScopedEntryPtr entry2_closer(entry2);
3106   EXPECT_NE(null, entry2);
3107 
3108   entry2->Doom();
3109 
3110   // This test passes if it doesn't crash.
3111 }
3112 
3113 // Checks that an optimistic Create would fail later on a racing Open.
TEST_F(DiskCacheEntryTest,SimpleCacheOptimisticCreateFailsOnOpen)3114 TEST_F(DiskCacheEntryTest, SimpleCacheOptimisticCreateFailsOnOpen) {
3115   SetSimpleCacheMode();
3116   InitCache();
3117 
3118   // Create a corrupt file in place of a future entry. Optimistic create should
3119   // initially succeed, but realize later that creation failed.
3120   const std::string key = "the key";
3121   net::TestCompletionCallback cb;
3122   disk_cache::Entry* entry = NULL;
3123   disk_cache::Entry* entry2 = NULL;
3124 
3125   EXPECT_TRUE(disk_cache::simple_util::CreateCorruptFileForTests(
3126       key, cache_path_));
3127   EXPECT_EQ(net::OK, cache_->CreateEntry(key, &entry, cb.callback()));
3128   ASSERT_TRUE(entry);
3129   ScopedEntryPtr entry_closer(entry);
3130   ASSERT_NE(net::OK, OpenEntry(key, &entry2));
3131 
3132   // Check that we are not leaking.
3133   EXPECT_TRUE(
3134       static_cast<disk_cache::SimpleEntryImpl*>(entry)->HasOneRef());
3135 
3136   DisableIntegrityCheck();
3137 }
3138 
3139 // Tests that old entries are evicted while new entries remain in the index.
3140 // This test relies on non-mandatory properties of the simple Cache Backend:
3141 // LRU eviction, specific values of high-watermark and low-watermark etc.
3142 // When changing the eviction algorithm, the test will have to be re-engineered.
TEST_F(DiskCacheEntryTest,SimpleCacheEvictOldEntries)3143 TEST_F(DiskCacheEntryTest, SimpleCacheEvictOldEntries) {
3144   const int kMaxSize = 200 * 1024;
3145   const int kWriteSize = kMaxSize / 10;
3146   const int kNumExtraEntries = 12;
3147   SetSimpleCacheMode();
3148   SetMaxSize(kMaxSize);
3149   InitCache();
3150 
3151   std::string key1("the first key");
3152   disk_cache::Entry* entry;
3153   ASSERT_EQ(net::OK, CreateEntry(key1, &entry));
3154   scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kWriteSize));
3155   CacheTestFillBuffer(buffer->data(), kWriteSize, false);
3156   EXPECT_EQ(kWriteSize,
3157             WriteData(entry, 1, 0, buffer.get(), kWriteSize, false));
3158   entry->Close();
3159   AddDelay();
3160 
3161   std::string key2("the key prefix");
3162   for (int i = 0; i < kNumExtraEntries; i++) {
3163     ASSERT_EQ(net::OK, CreateEntry(key2 + base::StringPrintf("%d", i), &entry));
3164     ScopedEntryPtr entry_closer(entry);
3165     EXPECT_EQ(kWriteSize,
3166               WriteData(entry, 1, 0, buffer.get(), kWriteSize, false));
3167   }
3168 
3169   // TODO(pasko): Find a way to wait for the eviction task(s) to finish by using
3170   // the internal knowledge about |SimpleBackendImpl|.
3171   ASSERT_NE(net::OK, OpenEntry(key1, &entry))
3172       << "Should have evicted the old entry";
3173   for (int i = 0; i < 2; i++) {
3174     int entry_no = kNumExtraEntries - i - 1;
3175     // Generally there is no guarantee that at this point the backround eviction
3176     // is finished. We are testing the positive case, i.e. when the eviction
3177     // never reaches this entry, should be non-flaky.
3178     ASSERT_EQ(net::OK, OpenEntry(key2 + base::StringPrintf("%d", entry_no),
3179                                  &entry))
3180         << "Should not have evicted fresh entry " << entry_no;
3181     entry->Close();
3182   }
3183 }
3184 
3185 // Tests that if a read and a following in-flight truncate are both in progress
3186 // simultaniously that they both can occur successfully. See
3187 // http://crbug.com/239223
TEST_F(DiskCacheEntryTest,SimpleCacheInFlightTruncate)3188 TEST_F(DiskCacheEntryTest, SimpleCacheInFlightTruncate)  {
3189   SetSimpleCacheMode();
3190   InitCache();
3191 
3192   const char key[] = "the first key";
3193 
3194   const int kBufferSize = 1024;
3195   scoped_refptr<net::IOBuffer> write_buffer(new net::IOBuffer(kBufferSize));
3196   CacheTestFillBuffer(write_buffer->data(), kBufferSize, false);
3197 
3198   disk_cache::Entry* entry = NULL;
3199   ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3200 
3201   EXPECT_EQ(kBufferSize,
3202             WriteData(entry, 1, 0, write_buffer.get(), kBufferSize, false));
3203   entry->Close();
3204   entry = NULL;
3205 
3206   ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3207   ScopedEntryPtr entry_closer(entry);
3208 
3209   MessageLoopHelper helper;
3210   int expected = 0;
3211 
3212   // Make a short read.
3213   const int kReadBufferSize = 512;
3214   scoped_refptr<net::IOBuffer> read_buffer(new net::IOBuffer(kReadBufferSize));
3215   CallbackTest read_callback(&helper, false);
3216   EXPECT_EQ(net::ERR_IO_PENDING,
3217             entry->ReadData(1,
3218                             0,
3219                             read_buffer.get(),
3220                             kReadBufferSize,
3221                             base::Bind(&CallbackTest::Run,
3222                                        base::Unretained(&read_callback))));
3223   ++expected;
3224 
3225   // Truncate the entry to the length of that read.
3226   scoped_refptr<net::IOBuffer>
3227       truncate_buffer(new net::IOBuffer(kReadBufferSize));
3228   CacheTestFillBuffer(truncate_buffer->data(), kReadBufferSize, false);
3229   CallbackTest truncate_callback(&helper, false);
3230   EXPECT_EQ(net::ERR_IO_PENDING,
3231             entry->WriteData(1,
3232                              0,
3233                              truncate_buffer.get(),
3234                              kReadBufferSize,
3235                              base::Bind(&CallbackTest::Run,
3236                                         base::Unretained(&truncate_callback)),
3237                              true));
3238   ++expected;
3239 
3240   // Wait for both the read and truncation to finish, and confirm that both
3241   // succeeded.
3242   EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
3243   EXPECT_EQ(kReadBufferSize, read_callback.last_result());
3244   EXPECT_EQ(kReadBufferSize, truncate_callback.last_result());
3245   EXPECT_EQ(0,
3246             memcmp(write_buffer->data(), read_buffer->data(), kReadBufferSize));
3247 }
3248 
3249 // Tests that if a write and a read dependant on it are both in flight
3250 // simultaneiously that they both can complete successfully without erroneous
3251 // early returns. See http://crbug.com/239223
TEST_F(DiskCacheEntryTest,SimpleCacheInFlightRead)3252 TEST_F(DiskCacheEntryTest, SimpleCacheInFlightRead) {
3253   SetSimpleCacheMode();
3254   InitCache();
3255 
3256   const char key[] = "the first key";
3257   disk_cache::Entry* entry = NULL;
3258   ASSERT_EQ(net::OK,
3259             cache_->CreateEntry(key, &entry, net::CompletionCallback()));
3260   ScopedEntryPtr entry_closer(entry);
3261 
3262   const int kBufferSize = 1024;
3263   scoped_refptr<net::IOBuffer> write_buffer(new net::IOBuffer(kBufferSize));
3264   CacheTestFillBuffer(write_buffer->data(), kBufferSize, false);
3265 
3266   MessageLoopHelper helper;
3267   int expected = 0;
3268 
3269   CallbackTest write_callback(&helper, false);
3270   EXPECT_EQ(net::ERR_IO_PENDING,
3271             entry->WriteData(1,
3272                              0,
3273                              write_buffer.get(),
3274                              kBufferSize,
3275                              base::Bind(&CallbackTest::Run,
3276                                         base::Unretained(&write_callback)),
3277                              true));
3278   ++expected;
3279 
3280   scoped_refptr<net::IOBuffer> read_buffer(new net::IOBuffer(kBufferSize));
3281   CallbackTest read_callback(&helper, false);
3282   EXPECT_EQ(net::ERR_IO_PENDING,
3283             entry->ReadData(1,
3284                             0,
3285                             read_buffer.get(),
3286                             kBufferSize,
3287                             base::Bind(&CallbackTest::Run,
3288                                        base::Unretained(&read_callback))));
3289   ++expected;
3290 
3291   EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
3292   EXPECT_EQ(kBufferSize, write_callback.last_result());
3293   EXPECT_EQ(kBufferSize, read_callback.last_result());
3294   EXPECT_EQ(0, memcmp(write_buffer->data(), read_buffer->data(), kBufferSize));
3295 }
3296 
TEST_F(DiskCacheEntryTest,SimpleCacheOpenCreateRaceWithNoIndex)3297 TEST_F(DiskCacheEntryTest, SimpleCacheOpenCreateRaceWithNoIndex) {
3298   SetSimpleCacheMode();
3299   DisableSimpleCacheWaitForIndex();
3300   DisableIntegrityCheck();
3301   InitCache();
3302 
3303   // Assume the index is not initialized, which is likely, since we are blocking
3304   // the IO thread from executing the index finalization step.
3305   disk_cache::Entry* entry1;
3306   net::TestCompletionCallback cb1;
3307   disk_cache::Entry* entry2;
3308   net::TestCompletionCallback cb2;
3309   int rv1 = cache_->OpenEntry("key", &entry1, cb1.callback());
3310   int rv2 = cache_->CreateEntry("key", &entry2, cb2.callback());
3311 
3312   EXPECT_EQ(net::ERR_FAILED, cb1.GetResult(rv1));
3313   ASSERT_EQ(net::OK, cb2.GetResult(rv2));
3314   entry2->Close();
3315 }
3316 
3317 // Checks that reading two entries simultaneously does not discard a CRC check.
3318 // TODO(pasko): make it work with Simple Cache.
TEST_F(DiskCacheEntryTest,DISABLED_SimpleCacheMultipleReadersCheckCRC)3319 TEST_F(DiskCacheEntryTest, DISABLED_SimpleCacheMultipleReadersCheckCRC) {
3320   SetSimpleCacheMode();
3321   InitCache();
3322 
3323   const char key[] = "key";
3324 
3325   int size;
3326   ASSERT_TRUE(SimpleCacheMakeBadChecksumEntry(key, &size));
3327 
3328   scoped_refptr<net::IOBuffer> read_buffer1(new net::IOBuffer(size));
3329   scoped_refptr<net::IOBuffer> read_buffer2(new net::IOBuffer(size));
3330 
3331   // Advance the first reader a little.
3332   disk_cache::Entry* entry = NULL;
3333   ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3334   EXPECT_EQ(1, ReadData(entry, 0, 0, read_buffer1.get(), 1));
3335 
3336   // Make the second reader pass the point where the first one is, and close.
3337   disk_cache::Entry* entry2 = NULL;
3338   EXPECT_EQ(net::OK, OpenEntry(key, &entry2));
3339   EXPECT_EQ(1, ReadData(entry2, 0, 0, read_buffer2.get(), 1));
3340   EXPECT_EQ(1, ReadData(entry2, 0, 1, read_buffer2.get(), 1));
3341   entry2->Close();
3342 
3343   // Read the data till the end should produce an error.
3344   EXPECT_GT(0, ReadData(entry, 0, 1, read_buffer1.get(), size));
3345   entry->Close();
3346   DisableIntegrityCheck();
3347 }
3348 
3349 // Checking one more scenario of overlapped reading of a bad entry.
3350 // Differs from the |SimpleCacheMultipleReadersCheckCRC| only by the order of
3351 // last two reads.
TEST_F(DiskCacheEntryTest,SimpleCacheMultipleReadersCheckCRC2)3352 TEST_F(DiskCacheEntryTest, SimpleCacheMultipleReadersCheckCRC2) {
3353   SetSimpleCacheMode();
3354   InitCache();
3355 
3356   const char key[] = "key";
3357   int size;
3358   ASSERT_TRUE(SimpleCacheMakeBadChecksumEntry(key, &size));
3359 
3360   scoped_refptr<net::IOBuffer> read_buffer1(new net::IOBuffer(size));
3361   scoped_refptr<net::IOBuffer> read_buffer2(new net::IOBuffer(size));
3362 
3363   // Advance the first reader a little.
3364   disk_cache::Entry* entry = NULL;
3365   ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3366   ScopedEntryPtr entry_closer(entry);
3367   EXPECT_EQ(1, ReadData(entry, 1, 0, read_buffer1.get(), 1));
3368 
3369   // Advance the 2nd reader by the same amount.
3370   disk_cache::Entry* entry2 = NULL;
3371   EXPECT_EQ(net::OK, OpenEntry(key, &entry2));
3372   ScopedEntryPtr entry2_closer(entry2);
3373   EXPECT_EQ(1, ReadData(entry2, 1, 0, read_buffer2.get(), 1));
3374 
3375   // Continue reading 1st.
3376   EXPECT_GT(0, ReadData(entry, 1, 1, read_buffer1.get(), size));
3377 
3378   // This read should fail as well because we have previous read failures.
3379   EXPECT_GT(0, ReadData(entry2, 1, 1, read_buffer2.get(), 1));
3380   DisableIntegrityCheck();
3381 }
3382 
3383 // Test if we can sequentially read each subset of the data until all the data
3384 // is read, then the CRC is calculated correctly and the reads are successful.
TEST_F(DiskCacheEntryTest,SimpleCacheReadCombineCRC)3385 TEST_F(DiskCacheEntryTest, SimpleCacheReadCombineCRC) {
3386   // Test sequence:
3387   // Create, Write, Read (first half of data), Read (second half of data),
3388   // Close.
3389   SetSimpleCacheMode();
3390   InitCache();
3391   disk_cache::Entry* null = NULL;
3392   const char key[] = "the first key";
3393 
3394   const int kHalfSize = 200;
3395   const int kSize = 2 * kHalfSize;
3396   scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
3397   CacheTestFillBuffer(buffer1->data(), kSize, false);
3398   disk_cache::Entry* entry = NULL;
3399 
3400   ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3401   EXPECT_NE(null, entry);
3402 
3403   EXPECT_EQ(kSize, WriteData(entry, 1, 0, buffer1.get(), kSize, false));
3404   entry->Close();
3405 
3406   disk_cache::Entry* entry2 = NULL;
3407   ASSERT_EQ(net::OK, OpenEntry(key, &entry2));
3408   EXPECT_EQ(entry, entry2);
3409 
3410   // Read the first half of the data.
3411   int offset = 0;
3412   int buf_len = kHalfSize;
3413   scoped_refptr<net::IOBuffer> buffer1_read1(new net::IOBuffer(buf_len));
3414   EXPECT_EQ(buf_len, ReadData(entry2, 1, offset, buffer1_read1.get(), buf_len));
3415   EXPECT_EQ(0, memcmp(buffer1->data(), buffer1_read1->data(), buf_len));
3416 
3417   // Read the second half of the data.
3418   offset = buf_len;
3419   buf_len = kHalfSize;
3420   scoped_refptr<net::IOBuffer> buffer1_read2(new net::IOBuffer(buf_len));
3421   EXPECT_EQ(buf_len, ReadData(entry2, 1, offset, buffer1_read2.get(), buf_len));
3422   char* buffer1_data = buffer1->data() + offset;
3423   EXPECT_EQ(0, memcmp(buffer1_data, buffer1_read2->data(), buf_len));
3424 
3425   // Check that we are not leaking.
3426   EXPECT_NE(entry, null);
3427   EXPECT_TRUE(
3428       static_cast<disk_cache::SimpleEntryImpl*>(entry)->HasOneRef());
3429   entry->Close();
3430   entry = NULL;
3431 }
3432 
3433 // Test if we can write the data not in sequence and read correctly. In
3434 // this case the CRC will not be present.
TEST_F(DiskCacheEntryTest,SimpleCacheNonSequentialWrite)3435 TEST_F(DiskCacheEntryTest, SimpleCacheNonSequentialWrite) {
3436   // Test sequence:
3437   // Create, Write (second half of data), Write (first half of data), Read,
3438   // Close.
3439   SetSimpleCacheMode();
3440   InitCache();
3441   disk_cache::Entry* null = NULL;
3442   const char key[] = "the first key";
3443 
3444   const int kHalfSize = 200;
3445   const int kSize = 2 * kHalfSize;
3446   scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
3447   scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize));
3448   CacheTestFillBuffer(buffer1->data(), kSize, false);
3449   char* buffer1_data = buffer1->data() + kHalfSize;
3450   memcpy(buffer2->data(), buffer1_data, kHalfSize);
3451   disk_cache::Entry* entry = NULL;
3452 
3453   ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3454   EXPECT_NE(null, entry);
3455 
3456   int offset = kHalfSize;
3457   int buf_len = kHalfSize;
3458 
3459   EXPECT_EQ(buf_len,
3460             WriteData(entry, 0, offset, buffer2.get(), buf_len, false));
3461   offset = 0;
3462   buf_len = kHalfSize;
3463   EXPECT_EQ(buf_len,
3464             WriteData(entry, 0, offset, buffer1.get(), buf_len, false));
3465   entry->Close();
3466 
3467   disk_cache::Entry* entry2 = NULL;
3468   ASSERT_EQ(net::OK, OpenEntry(key, &entry2));
3469   EXPECT_EQ(entry, entry2);
3470 
3471   scoped_refptr<net::IOBuffer> buffer1_read1(new net::IOBuffer(kSize));
3472   EXPECT_EQ(kSize, ReadData(entry2, 0, 0, buffer1_read1.get(), kSize));
3473   EXPECT_EQ(0, memcmp(buffer1->data(), buffer1_read1->data(), kSize));
3474 
3475   // Check that we are not leaking.
3476   ASSERT_NE(entry, null);
3477   EXPECT_TRUE(
3478       static_cast<disk_cache::SimpleEntryImpl*>(entry)->HasOneRef());
3479   entry->Close();
3480   entry = NULL;
3481 }
3482 
3483 // Test that changing stream1 size does not affect stream0 (stream0 and stream1
3484 // are stored in the same file in Simple Cache).
TEST_F(DiskCacheEntryTest,SimpleCacheStream1SizeChanges)3485 TEST_F(DiskCacheEntryTest, SimpleCacheStream1SizeChanges) {
3486   SetSimpleCacheMode();
3487   InitCache();
3488   disk_cache::Entry* entry = NULL;
3489   const char key[] = "the key";
3490   const int kSize = 100;
3491   scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
3492   scoped_refptr<net::IOBuffer> buffer_read(new net::IOBuffer(kSize));
3493   CacheTestFillBuffer(buffer->data(), kSize, false);
3494 
3495   ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3496   EXPECT_TRUE(entry);
3497 
3498   // Write something into stream0.
3499   EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
3500   EXPECT_EQ(kSize, ReadData(entry, 0, 0, buffer_read.get(), kSize));
3501   EXPECT_EQ(0, memcmp(buffer->data(), buffer_read->data(), kSize));
3502   entry->Close();
3503 
3504   // Extend stream1.
3505   ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3506   int stream1_size = 100;
3507   EXPECT_EQ(0, WriteData(entry, 1, stream1_size, buffer.get(), 0, false));
3508   EXPECT_EQ(stream1_size, entry->GetDataSize(1));
3509   entry->Close();
3510 
3511   // Check that stream0 data has not been modified and that the EOF record for
3512   // stream 0 contains a crc.
3513   // The entry needs to be reopened before checking the crc: Open will perform
3514   // the synchronization with the previous Close. This ensures the EOF records
3515   // have been written to disk before we attempt to read them independently.
3516   ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3517   base::FilePath entry_file0_path = cache_path_.AppendASCII(
3518       disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, 0));
3519   int flags = base::PLATFORM_FILE_READ | base::PLATFORM_FILE_OPEN;
3520   base::PlatformFile entry_file0 =
3521       base::CreatePlatformFile(entry_file0_path, flags, NULL, NULL);
3522   ASSERT_TRUE(entry_file0 != base::kInvalidPlatformFileValue);
3523 
3524   int data_size[disk_cache::kSimpleEntryStreamCount] = {kSize, stream1_size, 0};
3525   int sparse_data_size = 0;
3526   disk_cache::SimpleEntryStat entry_stat(
3527       base::Time::Now(), base::Time::Now(), data_size, sparse_data_size);
3528   int eof_offset = entry_stat.GetEOFOffsetInFile(key, 0);
3529   disk_cache::SimpleFileEOF eof_record;
3530   ASSERT_EQ(static_cast<int>(sizeof(eof_record)), base::ReadPlatformFile(
3531       entry_file0,
3532       eof_offset,
3533       reinterpret_cast<char*>(&eof_record),
3534       sizeof(eof_record)));
3535   EXPECT_EQ(disk_cache::kSimpleFinalMagicNumber, eof_record.final_magic_number);
3536   EXPECT_TRUE((eof_record.flags & disk_cache::SimpleFileEOF::FLAG_HAS_CRC32) ==
3537               disk_cache::SimpleFileEOF::FLAG_HAS_CRC32);
3538 
3539   buffer_read = new net::IOBuffer(kSize);
3540   EXPECT_EQ(kSize, ReadData(entry, 0, 0, buffer_read.get(), kSize));
3541   EXPECT_EQ(0, memcmp(buffer->data(), buffer_read->data(), kSize));
3542 
3543   // Shrink stream1.
3544   stream1_size = 50;
3545   EXPECT_EQ(0, WriteData(entry, 1, stream1_size, buffer.get(), 0, true));
3546   EXPECT_EQ(stream1_size, entry->GetDataSize(1));
3547   entry->Close();
3548 
3549   // Check that stream0 data has not been modified.
3550   buffer_read = new net::IOBuffer(kSize);
3551   ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3552   EXPECT_EQ(kSize, ReadData(entry, 0, 0, buffer_read.get(), kSize));
3553   EXPECT_EQ(0, memcmp(buffer->data(), buffer_read->data(), kSize));
3554   entry->Close();
3555   entry = NULL;
3556 }
3557 
3558 // Test that writing within the range for which the crc has already been
3559 // computed will properly invalidate the computed crc.
TEST_F(DiskCacheEntryTest,SimpleCacheCRCRewrite)3560 TEST_F(DiskCacheEntryTest, SimpleCacheCRCRewrite) {
3561   // Test sequence:
3562   // Create, Write (big data), Write (small data in the middle), Close.
3563   // Open, Read (all), Close.
3564   SetSimpleCacheMode();
3565   InitCache();
3566   disk_cache::Entry* null = NULL;
3567   const char key[] = "the first key";
3568 
3569   const int kHalfSize = 200;
3570   const int kSize = 2 * kHalfSize;
3571   scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
3572   scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kHalfSize));
3573   CacheTestFillBuffer(buffer1->data(), kSize, false);
3574   CacheTestFillBuffer(buffer2->data(), kHalfSize, false);
3575 
3576   disk_cache::Entry* entry = NULL;
3577   ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3578   EXPECT_NE(null, entry);
3579   entry->Close();
3580 
3581   for (int i = 0; i < disk_cache::kSimpleEntryStreamCount; ++i) {
3582     ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3583     int offset = 0;
3584     int buf_len = kSize;
3585 
3586     EXPECT_EQ(buf_len,
3587               WriteData(entry, i, offset, buffer1.get(), buf_len, false));
3588     offset = kHalfSize;
3589     buf_len = kHalfSize;
3590     EXPECT_EQ(buf_len,
3591               WriteData(entry, i, offset, buffer2.get(), buf_len, false));
3592     entry->Close();
3593 
3594     ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3595 
3596     scoped_refptr<net::IOBuffer> buffer1_read1(new net::IOBuffer(kSize));
3597     EXPECT_EQ(kSize, ReadData(entry, i, 0, buffer1_read1.get(), kSize));
3598     EXPECT_EQ(0, memcmp(buffer1->data(), buffer1_read1->data(), kHalfSize));
3599     EXPECT_EQ(
3600         0,
3601         memcmp(buffer2->data(), buffer1_read1->data() + kHalfSize, kHalfSize));
3602 
3603     entry->Close();
3604   }
3605 }
3606 
SimpleCacheThirdStreamFileExists(const char * key)3607 bool DiskCacheEntryTest::SimpleCacheThirdStreamFileExists(const char* key) {
3608   int third_stream_file_index =
3609       disk_cache::simple_util::GetFileIndexFromStreamIndex(2);
3610   base::FilePath third_stream_file_path = cache_path_.AppendASCII(
3611       disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(
3612           key, third_stream_file_index));
3613   return PathExists(third_stream_file_path);
3614 }
3615 
SyncDoomEntry(const char * key)3616 void DiskCacheEntryTest::SyncDoomEntry(const char* key) {
3617   net::TestCompletionCallback callback;
3618   cache_->DoomEntry(key, callback.callback());
3619   callback.WaitForResult();
3620 }
3621 
3622 // Check that a newly-created entry with no third-stream writes omits the
3623 // third stream file.
TEST_F(DiskCacheEntryTest,SimpleCacheOmittedThirdStream1)3624 TEST_F(DiskCacheEntryTest, SimpleCacheOmittedThirdStream1) {
3625   SetSimpleCacheMode();
3626   InitCache();
3627 
3628   const char key[] = "key";
3629 
3630   disk_cache::Entry* entry;
3631 
3632   // Create entry and close without writing: third stream file should be
3633   // omitted, since the stream is empty.
3634   ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3635   entry->Close();
3636   EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key));
3637 
3638   SyncDoomEntry(key);
3639   EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key));
3640 }
3641 
3642 // Check that a newly-created entry with only a single zero-offset, zero-length
3643 // write omits the third stream file.
TEST_F(DiskCacheEntryTest,SimpleCacheOmittedThirdStream2)3644 TEST_F(DiskCacheEntryTest, SimpleCacheOmittedThirdStream2) {
3645   SetSimpleCacheMode();
3646   InitCache();
3647 
3648   const int kHalfSize = 8;
3649   const int kSize = kHalfSize * 2;
3650   const char key[] = "key";
3651   scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
3652   CacheTestFillBuffer(buffer->data(), kHalfSize, false);
3653 
3654   disk_cache::Entry* entry;
3655 
3656   // Create entry, write empty buffer to third stream, and close: third stream
3657   // should still be omitted, since the entry ignores writes that don't modify
3658   // data or change the length.
3659   ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3660   EXPECT_EQ(0, WriteData(entry, 2, 0, buffer, 0, true));
3661   entry->Close();
3662   EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key));
3663 
3664   SyncDoomEntry(key);
3665   EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key));
3666 }
3667 
3668 // Check that we can read back data written to the third stream.
TEST_F(DiskCacheEntryTest,SimpleCacheOmittedThirdStream3)3669 TEST_F(DiskCacheEntryTest, SimpleCacheOmittedThirdStream3) {
3670   SetSimpleCacheMode();
3671   InitCache();
3672 
3673   const int kHalfSize = 8;
3674   const int kSize = kHalfSize * 2;
3675   const char key[] = "key";
3676   scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
3677   scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize));
3678   CacheTestFillBuffer(buffer1->data(), kHalfSize, false);
3679 
3680   disk_cache::Entry* entry;
3681 
3682   // Create entry, write data to third stream, and close: third stream should
3683   // not be omitted, since it contains data.  Re-open entry and ensure there
3684   // are that many bytes in the third stream.
3685   ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3686   EXPECT_EQ(kHalfSize, WriteData(entry, 2, 0, buffer1, kHalfSize, true));
3687   entry->Close();
3688   EXPECT_TRUE(SimpleCacheThirdStreamFileExists(key));
3689 
3690   ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3691   EXPECT_EQ(kHalfSize, ReadData(entry, 2, 0, buffer2, kSize));
3692   EXPECT_EQ(0, memcmp(buffer1->data(), buffer2->data(), kHalfSize));
3693   entry->Close();
3694   EXPECT_TRUE(SimpleCacheThirdStreamFileExists(key));
3695 
3696   SyncDoomEntry(key);
3697   EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key));
3698 }
3699 
3700 // Check that we remove the third stream file upon opening an entry and finding
3701 // the third stream empty.  (This is the upgrade path for entries written
3702 // before the third stream was optional.)
TEST_F(DiskCacheEntryTest,SimpleCacheOmittedThirdStream4)3703 TEST_F(DiskCacheEntryTest, SimpleCacheOmittedThirdStream4) {
3704   SetSimpleCacheMode();
3705   InitCache();
3706 
3707   const int kHalfSize = 8;
3708   const int kSize = kHalfSize * 2;
3709   const char key[] = "key";
3710   scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
3711   scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize));
3712   CacheTestFillBuffer(buffer1->data(), kHalfSize, false);
3713 
3714   disk_cache::Entry* entry;
3715 
3716   // Create entry, write data to third stream, truncate third stream back to
3717   // empty, and close: third stream will not initially be omitted, since entry
3718   // creates the file when the first significant write comes in, and only
3719   // removes it on open if it is empty.  Reopen, ensure that the file is
3720   // deleted, and that there's no data in the third stream.
3721   ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3722   EXPECT_EQ(kHalfSize, WriteData(entry, 2, 0, buffer1, kHalfSize, true));
3723   EXPECT_EQ(0, WriteData(entry, 2, 0, buffer1, 0, true));
3724   entry->Close();
3725   EXPECT_TRUE(SimpleCacheThirdStreamFileExists(key));
3726 
3727   ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3728   EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key));
3729   EXPECT_EQ(0, ReadData(entry, 2, 0, buffer2, kSize));
3730   entry->Close();
3731   EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key));
3732 
3733   SyncDoomEntry(key);
3734   EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key));
3735 }
3736 
3737 // Check that we don't accidentally create the third stream file once the entry
3738 // has been doomed.
TEST_F(DiskCacheEntryTest,SimpleCacheOmittedThirdStream5)3739 TEST_F(DiskCacheEntryTest, SimpleCacheOmittedThirdStream5) {
3740   SetSimpleCacheMode();
3741   InitCache();
3742 
3743   const int kHalfSize = 8;
3744   const int kSize = kHalfSize * 2;
3745   const char key[] = "key";
3746   scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
3747   CacheTestFillBuffer(buffer->data(), kHalfSize, false);
3748 
3749   disk_cache::Entry* entry;
3750 
3751   // Create entry, doom entry, write data to third stream, and close: third
3752   // stream should not exist.  (Note: We don't care if the write fails, just
3753   // that it doesn't cause the file to be created on disk.)
3754   ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3755   entry->Doom();
3756   WriteData(entry, 2, 0, buffer, kHalfSize, true);
3757   entry->Close();
3758   EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key));
3759 }
3760 
3761 // There could be a race between Doom and an optimistic write.
TEST_F(DiskCacheEntryTest,SimpleCacheDoomOptimisticWritesRace)3762 TEST_F(DiskCacheEntryTest, SimpleCacheDoomOptimisticWritesRace) {
3763   // Test sequence:
3764   // Create, first Write, second Write, Close.
3765   // Open, Close.
3766   SetSimpleCacheMode();
3767   InitCache();
3768   disk_cache::Entry* null = NULL;
3769   const char key[] = "the first key";
3770 
3771   const int kSize = 200;
3772   scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
3773   scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize));
3774   CacheTestFillBuffer(buffer1->data(), kSize, false);
3775   CacheTestFillBuffer(buffer2->data(), kSize, false);
3776 
3777   // The race only happens on stream 1 and stream 2.
3778   for (int i = 0; i < disk_cache::kSimpleEntryStreamCount; ++i) {
3779     ASSERT_EQ(net::OK, DoomAllEntries());
3780     disk_cache::Entry* entry = NULL;
3781 
3782     ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3783     EXPECT_NE(null, entry);
3784     entry->Close();
3785     entry = NULL;
3786 
3787     ASSERT_EQ(net::OK, DoomAllEntries());
3788     ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3789     EXPECT_NE(null, entry);
3790 
3791     int offset = 0;
3792     int buf_len = kSize;
3793     // This write should not be optimistic (since create is).
3794     EXPECT_EQ(buf_len,
3795               WriteData(entry, i, offset, buffer1.get(), buf_len, false));
3796 
3797     offset = kSize;
3798     // This write should be optimistic.
3799     EXPECT_EQ(buf_len,
3800               WriteData(entry, i, offset, buffer2.get(), buf_len, false));
3801     entry->Close();
3802 
3803     ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3804     EXPECT_NE(null, entry);
3805 
3806     entry->Close();
3807     entry = NULL;
3808   }
3809 }
3810 
TEST_F(DiskCacheEntryTest,SimpleCacheBasicSparseIO)3811 TEST_F(DiskCacheEntryTest, SimpleCacheBasicSparseIO) {
3812   SetSimpleCacheMode();
3813   InitCache();
3814   BasicSparseIO();
3815 }
3816 
TEST_F(DiskCacheEntryTest,SimpleCacheHugeSparseIO)3817 TEST_F(DiskCacheEntryTest, SimpleCacheHugeSparseIO) {
3818   SetSimpleCacheMode();
3819   InitCache();
3820   HugeSparseIO();
3821 }
3822 
TEST_F(DiskCacheEntryTest,SimpleCacheGetAvailableRange)3823 TEST_F(DiskCacheEntryTest, SimpleCacheGetAvailableRange) {
3824   SetSimpleCacheMode();
3825   InitCache();
3826   GetAvailableRange();
3827 }
3828 
TEST_F(DiskCacheEntryTest,DISABLED_SimpleCacheCouldBeSparse)3829 TEST_F(DiskCacheEntryTest, DISABLED_SimpleCacheCouldBeSparse) {
3830   SetSimpleCacheMode();
3831   InitCache();
3832   CouldBeSparse();
3833 }
3834 
TEST_F(DiskCacheEntryTest,SimpleCacheUpdateSparseEntry)3835 TEST_F(DiskCacheEntryTest, SimpleCacheUpdateSparseEntry) {
3836   SetSimpleCacheMode();
3837   InitCache();
3838   UpdateSparseEntry();
3839 }
3840 
TEST_F(DiskCacheEntryTest,SimpleCacheDoomSparseEntry)3841 TEST_F(DiskCacheEntryTest, SimpleCacheDoomSparseEntry) {
3842   SetSimpleCacheMode();
3843   InitCache();
3844   DoomSparseEntry();
3845 }
3846 
TEST_F(DiskCacheEntryTest,SimpleCachePartialSparseEntry)3847 TEST_F(DiskCacheEntryTest, SimpleCachePartialSparseEntry) {
3848   SetSimpleCacheMode();
3849   InitCache();
3850   PartialSparseEntry();
3851 }
3852 
TEST_F(DiskCacheEntryTest,SimpleCacheTruncateLargeSparseFile)3853 TEST_F(DiskCacheEntryTest, SimpleCacheTruncateLargeSparseFile) {
3854   const int kSize = 1024;
3855 
3856   SetSimpleCacheMode();
3857   // An entry is allowed sparse data 1/10 the size of the cache, so this size
3858   // allows for one |kSize|-sized range plus overhead, but not two ranges.
3859   SetMaxSize(kSize * 15);
3860   InitCache();
3861 
3862   const char key[] = "key";
3863   disk_cache::Entry* null = NULL;
3864   disk_cache::Entry* entry;
3865   ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3866   EXPECT_NE(null, entry);
3867 
3868   scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
3869   CacheTestFillBuffer(buffer->data(), kSize, false);
3870   net::TestCompletionCallback callback;
3871   int ret;
3872 
3873   // Verify initial conditions.
3874   ret = entry->ReadSparseData(0, buffer, kSize, callback.callback());
3875   EXPECT_EQ(0, callback.GetResult(ret));
3876 
3877   ret = entry->ReadSparseData(kSize, buffer, kSize, callback.callback());
3878   EXPECT_EQ(0, callback.GetResult(ret));
3879 
3880   // Write a range and make sure it reads back.
3881   ret = entry->WriteSparseData(0, buffer, kSize, callback.callback());
3882   EXPECT_EQ(kSize, callback.GetResult(ret));
3883 
3884   ret = entry->ReadSparseData(0, buffer, kSize, callback.callback());
3885   EXPECT_EQ(kSize, callback.GetResult(ret));
3886 
3887   // Write another range and make sure it reads back.
3888   ret = entry->WriteSparseData(kSize, buffer, kSize, callback.callback());
3889   EXPECT_EQ(kSize, callback.GetResult(ret));
3890 
3891   ret = entry->ReadSparseData(kSize, buffer, kSize, callback.callback());
3892   EXPECT_EQ(kSize, callback.GetResult(ret));
3893 
3894   // Make sure the first range was removed when the second was written.
3895   ret = entry->ReadSparseData(0, buffer, kSize, callback.callback());
3896   EXPECT_EQ(0, callback.GetResult(ret));
3897 
3898   entry->Close();
3899 }
3900 
3901 #endif  // defined(OS_POSIX)
3902