1 // Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/basictypes.h"
6 #include "base/platform_thread.h"
7 #include "base/timer.h"
8 #include "base/string_util.h"
9 #include "net/base/io_buffer.h"
10 #include "net/base/net_errors.h"
11 #include "net/base/test_completion_callback.h"
12 #include "net/disk_cache/disk_cache_test_base.h"
13 #include "net/disk_cache/disk_cache_test_util.h"
14 #include "net/disk_cache/entry_impl.h"
15 #include "net/disk_cache/mem_entry_impl.h"
16 #include "testing/gtest/include/gtest/gtest.h"
17
18 using base::Time;
19
20 extern volatile int g_cache_tests_received;
21 extern volatile bool g_cache_tests_error;
22
23 // Tests that can run with different types of caches.
24 class DiskCacheEntryTest : public DiskCacheTestWithCache {
25 protected:
26 void InternalSyncIO();
27 void InternalAsyncIO();
28 void ExternalSyncIO();
29 void ExternalAsyncIO();
30 void StreamAccess();
31 void GetKey();
32 void GrowData();
33 void TruncateData();
34 void ZeroLengthIO();
35 void ReuseEntry(int size);
36 void InvalidData();
37 void DoomEntry();
38 void DoomedEntry();
39 void BasicSparseIO(bool async);
40 void HugeSparseIO(bool async);
41 void GetAvailableRange();
42 void DoomSparseEntry();
43 void PartialSparseEntry();
44 };
45
InternalSyncIO()46 void DiskCacheEntryTest::InternalSyncIO() {
47 disk_cache::Entry *entry1 = NULL;
48 ASSERT_TRUE(cache_->CreateEntry("the first key", &entry1));
49 ASSERT_TRUE(NULL != entry1);
50
51 const int kSize1 = 10;
52 scoped_refptr<net::IOBuffer> buffer1 = new net::IOBuffer(kSize1);
53 CacheTestFillBuffer(buffer1->data(), kSize1, false);
54 EXPECT_EQ(0, entry1->ReadData(0, 0, buffer1, kSize1, NULL));
55 base::strlcpy(buffer1->data(), "the data", kSize1);
56 EXPECT_EQ(10, entry1->WriteData(0, 0, buffer1, kSize1, NULL, false));
57 memset(buffer1->data(), 0, kSize1);
58 EXPECT_EQ(10, entry1->ReadData(0, 0, buffer1, kSize1, NULL));
59 EXPECT_STREQ("the data", buffer1->data());
60
61 const int kSize2 = 5000;
62 const int kSize3 = 10000;
63 scoped_refptr<net::IOBuffer> buffer2 = new net::IOBuffer(kSize2);
64 scoped_refptr<net::IOBuffer> buffer3 = new net::IOBuffer(kSize3);
65 memset(buffer3->data(), 0, kSize3);
66 CacheTestFillBuffer(buffer2->data(), kSize2, false);
67 base::strlcpy(buffer2->data(), "The really big data goes here", kSize2);
68 EXPECT_EQ(5000, entry1->WriteData(1, 1500, buffer2, kSize2, NULL, false));
69 memset(buffer2->data(), 0, kSize2);
70 EXPECT_EQ(4989, entry1->ReadData(1, 1511, buffer2, kSize2, NULL));
71 EXPECT_STREQ("big data goes here", buffer2->data());
72 EXPECT_EQ(5000, entry1->ReadData(1, 0, buffer2, kSize2, NULL));
73 EXPECT_EQ(0, memcmp(buffer2->data(), buffer3->data(), 1500));
74 EXPECT_EQ(1500, entry1->ReadData(1, 5000, buffer2, kSize2, NULL));
75
76 EXPECT_EQ(0, entry1->ReadData(1, 6500, buffer2, kSize2, NULL));
77 EXPECT_EQ(6500, entry1->ReadData(1, 0, buffer3, kSize3, NULL));
78 EXPECT_EQ(8192, entry1->WriteData(1, 0, buffer3, 8192, NULL, false));
79 EXPECT_EQ(8192, entry1->ReadData(1, 0, buffer3, kSize3, NULL));
80 EXPECT_EQ(8192, entry1->GetDataSize(1));
81
82 entry1->Doom();
83 entry1->Close();
84 EXPECT_EQ(0, cache_->GetEntryCount());
85 }
86
TEST_F(DiskCacheEntryTest,InternalSyncIO)87 TEST_F(DiskCacheEntryTest, InternalSyncIO) {
88 InitCache();
89 InternalSyncIO();
90 }
91
TEST_F(DiskCacheEntryTest,MemoryOnlyInternalSyncIO)92 TEST_F(DiskCacheEntryTest, MemoryOnlyInternalSyncIO) {
93 SetMemoryOnlyMode();
94 InitCache();
95 InternalSyncIO();
96 }
97
InternalAsyncIO()98 void DiskCacheEntryTest::InternalAsyncIO() {
99 disk_cache::Entry *entry1 = NULL;
100 ASSERT_TRUE(cache_->CreateEntry("the first key", &entry1));
101 ASSERT_TRUE(NULL != entry1);
102
103 // Avoid using internal buffers for the test. We have to write something to
104 // the entry and close it so that we flush the internal buffer to disk. After
105 // that, IO operations will be really hitting the disk. We don't care about
106 // the content, so just extending the entry is enough (all extensions zero-
107 // fill any holes).
108 EXPECT_EQ(0, entry1->WriteData(0, 15 * 1024, NULL, 0, NULL, false));
109 EXPECT_EQ(0, entry1->WriteData(1, 15 * 1024, NULL, 0, NULL, false));
110 entry1->Close();
111 ASSERT_TRUE(cache_->OpenEntry("the first key", &entry1));
112
113 // Let's verify that each IO goes to the right callback object.
114 CallbackTest callback1(false);
115 CallbackTest callback2(false);
116 CallbackTest callback3(false);
117 CallbackTest callback4(false);
118 CallbackTest callback5(false);
119 CallbackTest callback6(false);
120 CallbackTest callback7(false);
121 CallbackTest callback8(false);
122 CallbackTest callback9(false);
123 CallbackTest callback10(false);
124 CallbackTest callback11(false);
125 CallbackTest callback12(false);
126 CallbackTest callback13(false);
127
128 g_cache_tests_error = false;
129 g_cache_tests_received = 0;
130
131 MessageLoopHelper helper;
132
133 const int kSize1 = 10;
134 const int kSize2 = 5000;
135 const int kSize3 = 10000;
136 scoped_refptr<net::IOBuffer> buffer1 = new net::IOBuffer(kSize1);
137 scoped_refptr<net::IOBuffer> buffer2 = new net::IOBuffer(kSize2);
138 scoped_refptr<net::IOBuffer> buffer3 = new net::IOBuffer(kSize3);
139 CacheTestFillBuffer(buffer1->data(), kSize1, false);
140 CacheTestFillBuffer(buffer2->data(), kSize2, false);
141 CacheTestFillBuffer(buffer3->data(), kSize3, false);
142
143 EXPECT_EQ(0, entry1->ReadData(0, 15 * 1024, buffer1, kSize1, &callback1));
144 base::strlcpy(buffer1->data(), "the data", kSize1);
145 int expected = 0;
146 int ret = entry1->WriteData(0, 0, buffer1, kSize1, &callback2, false);
147 EXPECT_TRUE(10 == ret || net::ERR_IO_PENDING == ret);
148 if (net::ERR_IO_PENDING == ret)
149 expected++;
150
151 EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
152 memset(buffer2->data(), 0, kSize2);
153 ret = entry1->ReadData(0, 0, buffer2, kSize1, &callback3);
154 EXPECT_TRUE(10 == ret || net::ERR_IO_PENDING == ret);
155 if (net::ERR_IO_PENDING == ret)
156 expected++;
157
158 EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
159 EXPECT_STREQ("the data", buffer2->data());
160
161 base::strlcpy(buffer2->data(), "The really big data goes here", kSize2);
162 ret = entry1->WriteData(1, 1500, buffer2, kSize2, &callback4, true);
163 EXPECT_TRUE(5000 == ret || net::ERR_IO_PENDING == ret);
164 if (net::ERR_IO_PENDING == ret)
165 expected++;
166
167 EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
168 memset(buffer3->data(), 0, kSize3);
169 ret = entry1->ReadData(1, 1511, buffer3, kSize2, &callback5);
170 EXPECT_TRUE(4989 == ret || net::ERR_IO_PENDING == ret);
171 if (net::ERR_IO_PENDING == ret)
172 expected++;
173
174 EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
175 EXPECT_STREQ("big data goes here", buffer3->data());
176 ret = entry1->ReadData(1, 0, buffer2, kSize2, &callback6);
177 EXPECT_TRUE(5000 == ret || net::ERR_IO_PENDING == ret);
178 if (net::ERR_IO_PENDING == ret)
179 expected++;
180
181 memset(buffer3->data(), 0, kSize3);
182
183 EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
184 EXPECT_EQ(0, memcmp(buffer2->data(), buffer3->data(), 1500));
185 ret = entry1->ReadData(1, 5000, buffer2, kSize2, &callback7);
186 EXPECT_TRUE(1500 == ret || net::ERR_IO_PENDING == ret);
187 if (net::ERR_IO_PENDING == ret)
188 expected++;
189
190 ret = entry1->ReadData(1, 0, buffer3, kSize3, &callback9);
191 EXPECT_TRUE(6500 == ret || net::ERR_IO_PENDING == ret);
192 if (net::ERR_IO_PENDING == ret)
193 expected++;
194
195 ret = entry1->WriteData(1, 0, buffer3, 8192, &callback10, true);
196 EXPECT_TRUE(8192 == ret || net::ERR_IO_PENDING == ret);
197 if (net::ERR_IO_PENDING == ret)
198 expected++;
199
200 EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
201 ret = entry1->ReadData(1, 0, buffer3, kSize3, &callback11);
202 EXPECT_TRUE(8192 == ret || net::ERR_IO_PENDING == ret);
203 if (net::ERR_IO_PENDING == ret)
204 expected++;
205
206 EXPECT_EQ(8192, entry1->GetDataSize(1));
207
208 ret = entry1->ReadData(0, 0, buffer1, kSize1, &callback12);
209 EXPECT_TRUE(10 == ret || net::ERR_IO_PENDING == ret);
210 if (net::ERR_IO_PENDING == ret)
211 expected++;
212
213 ret = entry1->ReadData(1, 0, buffer2, kSize2, &callback13);
214 EXPECT_TRUE(5000 == ret || net::ERR_IO_PENDING == ret);
215 if (net::ERR_IO_PENDING == ret)
216 expected++;
217
218 EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
219
220 EXPECT_FALSE(g_cache_tests_error);
221 EXPECT_EQ(expected, g_cache_tests_received);
222
223 entry1->Doom();
224 entry1->Close();
225 EXPECT_EQ(0, cache_->GetEntryCount());
226 }
227
TEST_F(DiskCacheEntryTest,InternalAsyncIO)228 TEST_F(DiskCacheEntryTest, InternalAsyncIO) {
229 InitCache();
230 InternalAsyncIO();
231 }
232
TEST_F(DiskCacheEntryTest,MemoryOnlyInternalAsyncIO)233 TEST_F(DiskCacheEntryTest, MemoryOnlyInternalAsyncIO) {
234 SetMemoryOnlyMode();
235 InitCache();
236 InternalAsyncIO();
237 }
238
ExternalSyncIO()239 void DiskCacheEntryTest::ExternalSyncIO() {
240 disk_cache::Entry *entry1;
241 ASSERT_TRUE(cache_->CreateEntry("the first key", &entry1));
242
243 const int kSize1 = 17000;
244 const int kSize2 = 25000;
245 scoped_refptr<net::IOBuffer> buffer1 = new net::IOBuffer(kSize1);
246 scoped_refptr<net::IOBuffer> buffer2 = new net::IOBuffer(kSize2);
247 CacheTestFillBuffer(buffer1->data(), kSize1, false);
248 CacheTestFillBuffer(buffer2->data(), kSize2, false);
249 base::strlcpy(buffer1->data(), "the data", kSize1);
250 EXPECT_EQ(17000, entry1->WriteData(0, 0, buffer1, kSize1, NULL, false));
251 memset(buffer1->data(), 0, kSize1);
252 EXPECT_EQ(17000, entry1->ReadData(0, 0, buffer1, kSize1, NULL));
253 EXPECT_STREQ("the data", buffer1->data());
254
255 base::strlcpy(buffer2->data(), "The really big data goes here", kSize2);
256 EXPECT_EQ(25000, entry1->WriteData(1, 10000, buffer2, kSize2, NULL, false));
257 memset(buffer2->data(), 0, kSize2);
258 EXPECT_EQ(24989, entry1->ReadData(1, 10011, buffer2, kSize2, NULL));
259 EXPECT_STREQ("big data goes here", buffer2->data());
260 EXPECT_EQ(25000, entry1->ReadData(1, 0, buffer2, kSize2, NULL));
261 EXPECT_EQ(0, memcmp(buffer2->data(), buffer2->data(), 10000));
262 EXPECT_EQ(5000, entry1->ReadData(1, 30000, buffer2, kSize2, NULL));
263
264 EXPECT_EQ(0, entry1->ReadData(1, 35000, buffer2, kSize2, NULL));
265 EXPECT_EQ(17000, entry1->ReadData(1, 0, buffer1, kSize1, NULL));
266 EXPECT_EQ(17000, entry1->WriteData(1, 20000, buffer1, kSize1, NULL, false));
267 EXPECT_EQ(37000, entry1->GetDataSize(1));
268
269 entry1->Doom();
270 entry1->Close();
271 EXPECT_EQ(0, cache_->GetEntryCount());
272 }
273
TEST_F(DiskCacheEntryTest,ExternalSyncIO)274 TEST_F(DiskCacheEntryTest, ExternalSyncIO) {
275 InitCache();
276 ExternalSyncIO();
277 }
278
TEST_F(DiskCacheEntryTest,MemoryOnlyExternalSyncIO)279 TEST_F(DiskCacheEntryTest, MemoryOnlyExternalSyncIO) {
280 SetMemoryOnlyMode();
281 InitCache();
282 ExternalSyncIO();
283 }
284
ExternalAsyncIO()285 void DiskCacheEntryTest::ExternalAsyncIO() {
286 disk_cache::Entry *entry1;
287 ASSERT_TRUE(cache_->CreateEntry("the first key", &entry1));
288
289 // Let's verify that each IO goes to the right callback object.
290 CallbackTest callback1(false);
291 CallbackTest callback2(false);
292 CallbackTest callback3(false);
293 CallbackTest callback4(false);
294 CallbackTest callback5(false);
295 CallbackTest callback6(false);
296 CallbackTest callback7(false);
297 CallbackTest callback8(false);
298 CallbackTest callback9(false);
299
300 g_cache_tests_error = false;
301 g_cache_tests_received = 0;
302 int expected = 0;
303
304 MessageLoopHelper helper;
305
306 const int kSize1 = 17000;
307 const int kSize2 = 25000;
308 const int kSize3 = 25000;
309 scoped_refptr<net::IOBuffer> buffer1 = new net::IOBuffer(kSize1);
310 scoped_refptr<net::IOBuffer> buffer2 = new net::IOBuffer(kSize2);
311 scoped_refptr<net::IOBuffer> buffer3 = new net::IOBuffer(kSize3);
312 CacheTestFillBuffer(buffer1->data(), kSize1, false);
313 CacheTestFillBuffer(buffer2->data(), kSize2, false);
314 CacheTestFillBuffer(buffer3->data(), kSize3, false);
315 base::strlcpy(buffer1->data(), "the data", kSize1);
316 int ret = entry1->WriteData(0, 0, buffer1, kSize1, &callback1, false);
317 EXPECT_TRUE(17000 == ret || net::ERR_IO_PENDING == ret);
318 if (net::ERR_IO_PENDING == ret)
319 expected++;
320
321 EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
322
323 memset(buffer2->data(), 0, kSize1);
324 ret = entry1->ReadData(0, 0, buffer2, kSize1, &callback2);
325 EXPECT_TRUE(17000 == ret || net::ERR_IO_PENDING == ret);
326 if (net::ERR_IO_PENDING == ret)
327 expected++;
328
329 EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
330 EXPECT_STREQ("the data", buffer1->data());
331
332 base::strlcpy(buffer2->data(), "The really big data goes here", kSize2);
333 ret = entry1->WriteData(1, 10000, buffer2, kSize2, &callback3, false);
334 EXPECT_TRUE(25000 == ret || net::ERR_IO_PENDING == ret);
335 if (net::ERR_IO_PENDING == ret)
336 expected++;
337
338 EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
339
340 memset(buffer3->data(), 0, kSize3);
341 ret = entry1->ReadData(1, 10011, buffer3, kSize3, &callback4);
342 EXPECT_TRUE(24989 == ret || net::ERR_IO_PENDING == ret);
343 if (net::ERR_IO_PENDING == ret)
344 expected++;
345
346 EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
347 EXPECT_STREQ("big data goes here", buffer3->data());
348 ret = entry1->ReadData(1, 0, buffer2, kSize2, &callback5);
349 EXPECT_TRUE(25000 == ret || net::ERR_IO_PENDING == ret);
350 if (net::ERR_IO_PENDING == ret)
351 expected++;
352
353 EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
354 EXPECT_EQ(0, memcmp(buffer2->data(), buffer2->data(), 10000));
355 ret = entry1->ReadData(1, 30000, buffer2, kSize2, &callback6);
356 EXPECT_TRUE(5000 == ret || net::ERR_IO_PENDING == ret);
357 if (net::ERR_IO_PENDING == ret)
358 expected++;
359
360 EXPECT_EQ(0, entry1->ReadData(1, 35000, buffer2, kSize2, &callback7));
361 ret = entry1->ReadData(1, 0, buffer1, kSize1, &callback8);
362 EXPECT_TRUE(17000 == ret || net::ERR_IO_PENDING == ret);
363 if (net::ERR_IO_PENDING == ret)
364 expected++;
365 ret = entry1->WriteData(1, 20000, buffer1, kSize1, &callback9, false);
366 EXPECT_TRUE(17000 == ret || net::ERR_IO_PENDING == ret);
367 if (net::ERR_IO_PENDING == ret)
368 expected++;
369 EXPECT_EQ(37000, entry1->GetDataSize(1));
370
371 EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
372
373 EXPECT_FALSE(g_cache_tests_error);
374 EXPECT_EQ(expected, g_cache_tests_received);
375
376 entry1->Doom();
377 entry1->Close();
378 EXPECT_EQ(0, cache_->GetEntryCount());
379 }
380
TEST_F(DiskCacheEntryTest,ExternalAsyncIO)381 TEST_F(DiskCacheEntryTest, ExternalAsyncIO) {
382 InitCache();
383 ExternalAsyncIO();
384 }
385
TEST_F(DiskCacheEntryTest,MemoryOnlyExternalAsyncIO)386 TEST_F(DiskCacheEntryTest, MemoryOnlyExternalAsyncIO) {
387 SetMemoryOnlyMode();
388 InitCache();
389 ExternalAsyncIO();
390 }
391
StreamAccess()392 void DiskCacheEntryTest::StreamAccess() {
393 disk_cache::Entry *entry = NULL;
394 ASSERT_TRUE(cache_->CreateEntry("the first key", &entry));
395 ASSERT_TRUE(NULL != entry);
396
397 const int kBufferSize = 1024;
398 scoped_refptr<net::IOBuffer> buffer1 = new net::IOBuffer(kBufferSize);
399 scoped_refptr<net::IOBuffer> buffer2 = new net::IOBuffer(kBufferSize);
400
401 const int kNumStreams = 3;
402 for (int i = 0; i < kNumStreams; i++) {
403 CacheTestFillBuffer(buffer1->data(), kBufferSize, false);
404 EXPECT_EQ(kBufferSize, entry->WriteData(i, 0, buffer1, kBufferSize, NULL,
405 false));
406 memset(buffer2->data(), 0, kBufferSize);
407 EXPECT_EQ(kBufferSize, entry->ReadData(i, 0, buffer2, kBufferSize, NULL));
408 EXPECT_EQ(0, memcmp(buffer1->data(), buffer2->data(), kBufferSize));
409 }
410
411 EXPECT_EQ(net::ERR_INVALID_ARGUMENT,
412 entry->ReadData(kNumStreams, 0, buffer1, kBufferSize, NULL));
413 entry->Close();
414 }
415
TEST_F(DiskCacheEntryTest,StreamAccess)416 TEST_F(DiskCacheEntryTest, StreamAccess) {
417 InitCache();
418 StreamAccess();
419 }
420
TEST_F(DiskCacheEntryTest,MemoryOnlyStreamAccess)421 TEST_F(DiskCacheEntryTest, MemoryOnlyStreamAccess) {
422 SetMemoryOnlyMode();
423 InitCache();
424 StreamAccess();
425 }
426
GetKey()427 void DiskCacheEntryTest::GetKey() {
428 std::string key1("the first key");
429 disk_cache::Entry *entry1;
430 ASSERT_TRUE(cache_->CreateEntry(key1, &entry1));
431 EXPECT_EQ(key1, entry1->GetKey()) << "short key";
432 entry1->Close();
433
434 int seed = static_cast<int>(Time::Now().ToInternalValue());
435 srand(seed);
436 char key_buffer[20000];
437
438 CacheTestFillBuffer(key_buffer, 3000, true);
439 key_buffer[1000] = '\0';
440
441 key1 = key_buffer;
442 ASSERT_TRUE(cache_->CreateEntry(key1, &entry1));
443 EXPECT_TRUE(key1 == entry1->GetKey()) << "1000 bytes key";
444 entry1->Close();
445
446 key_buffer[1000] = 'p';
447 key_buffer[3000] = '\0';
448 key1 = key_buffer;
449 ASSERT_TRUE(cache_->CreateEntry(key1, &entry1));
450 EXPECT_TRUE(key1 == entry1->GetKey()) << "medium size key";
451 entry1->Close();
452
453 CacheTestFillBuffer(key_buffer, sizeof(key_buffer), true);
454 key_buffer[19999] = '\0';
455
456 key1 = key_buffer;
457 ASSERT_TRUE(cache_->CreateEntry(key1, &entry1));
458 EXPECT_TRUE(key1 == entry1->GetKey()) << "long key";
459 entry1->Close();
460 }
461
TEST_F(DiskCacheEntryTest,GetKey)462 TEST_F(DiskCacheEntryTest, GetKey) {
463 InitCache();
464 GetKey();
465 }
466
TEST_F(DiskCacheEntryTest,MemoryOnlyGetKey)467 TEST_F(DiskCacheEntryTest, MemoryOnlyGetKey) {
468 SetMemoryOnlyMode();
469 InitCache();
470 GetKey();
471 }
472
GrowData()473 void DiskCacheEntryTest::GrowData() {
474 std::string key1("the first key");
475 disk_cache::Entry *entry1, *entry2;
476 ASSERT_TRUE(cache_->CreateEntry(key1, &entry1));
477
478 const int kSize = 20000;
479 scoped_refptr<net::IOBuffer> buffer1 = new net::IOBuffer(kSize);
480 scoped_refptr<net::IOBuffer> buffer2 = new net::IOBuffer(kSize);
481 CacheTestFillBuffer(buffer1->data(), kSize, false);
482 memset(buffer2->data(), 0, kSize);
483
484 base::strlcpy(buffer1->data(), "the data", kSize);
485 EXPECT_EQ(10, entry1->WriteData(0, 0, buffer1, 10, NULL, false));
486 EXPECT_EQ(10, entry1->ReadData(0, 0, buffer2, 10, NULL));
487 EXPECT_STREQ("the data", buffer2->data());
488 EXPECT_EQ(10, entry1->GetDataSize(0));
489
490 EXPECT_EQ(2000, entry1->WriteData(0, 0, buffer1, 2000, NULL, false));
491 EXPECT_EQ(2000, entry1->GetDataSize(0));
492 EXPECT_EQ(2000, entry1->ReadData(0, 0, buffer2, 2000, NULL));
493 EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), 2000));
494
495 EXPECT_EQ(20000, entry1->WriteData(0, 0, buffer1, kSize, NULL, false));
496 EXPECT_EQ(20000, entry1->GetDataSize(0));
497 EXPECT_EQ(20000, entry1->ReadData(0, 0, buffer2, kSize, NULL));
498 EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), kSize));
499 entry1->Close();
500
501 memset(buffer2->data(), 0, kSize);
502 ASSERT_TRUE(cache_->CreateEntry("Second key", &entry2));
503 EXPECT_EQ(10, entry2->WriteData(0, 0, buffer1, 10, NULL, false));
504 EXPECT_EQ(10, entry2->GetDataSize(0));
505 entry2->Close();
506
507 // Go from an internal address to a bigger block size.
508 ASSERT_TRUE(cache_->OpenEntry("Second key", &entry2));
509 EXPECT_EQ(2000, entry2->WriteData(0, 0, buffer1, 2000, NULL, false));
510 EXPECT_EQ(2000, entry2->GetDataSize(0));
511 EXPECT_EQ(2000, entry2->ReadData(0, 0, buffer2, 2000, NULL));
512 EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), 2000));
513 entry2->Close();
514 memset(buffer2->data(), 0, kSize);
515
516 // Go from an internal address to an external one.
517 ASSERT_TRUE(cache_->OpenEntry("Second key", &entry2));
518 EXPECT_EQ(20000, entry2->WriteData(0, 0, buffer1, kSize, NULL, false));
519 EXPECT_EQ(20000, entry2->GetDataSize(0));
520 EXPECT_EQ(20000, entry2->ReadData(0, 0, buffer2, kSize, NULL));
521 EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), kSize));
522 entry2->Close();
523 }
524
TEST_F(DiskCacheEntryTest,GrowData)525 TEST_F(DiskCacheEntryTest, GrowData) {
526 InitCache();
527 GrowData();
528 }
529
TEST_F(DiskCacheEntryTest,MemoryOnlyGrowData)530 TEST_F(DiskCacheEntryTest, MemoryOnlyGrowData) {
531 SetMemoryOnlyMode();
532 InitCache();
533 GrowData();
534 }
535
TruncateData()536 void DiskCacheEntryTest::TruncateData() {
537 std::string key1("the first key");
538 disk_cache::Entry *entry1;
539 ASSERT_TRUE(cache_->CreateEntry(key1, &entry1));
540
541 const int kSize1 = 20000;
542 const int kSize2 = 20000;
543 scoped_refptr<net::IOBuffer> buffer1 = new net::IOBuffer(kSize1);
544 scoped_refptr<net::IOBuffer> buffer2 = new net::IOBuffer(kSize2);
545
546 CacheTestFillBuffer(buffer1->data(), kSize1, false);
547 memset(buffer2->data(), 0, kSize2);
548
549 // Simple truncation:
550 EXPECT_EQ(200, entry1->WriteData(0, 0, buffer1, 200, NULL, false));
551 EXPECT_EQ(200, entry1->GetDataSize(0));
552 EXPECT_EQ(100, entry1->WriteData(0, 0, buffer1, 100, NULL, false));
553 EXPECT_EQ(200, entry1->GetDataSize(0));
554 EXPECT_EQ(100, entry1->WriteData(0, 0, buffer1, 100, NULL, true));
555 EXPECT_EQ(100, entry1->GetDataSize(0));
556 EXPECT_EQ(0, entry1->WriteData(0, 50, buffer1, 0, NULL, true));
557 EXPECT_EQ(50, entry1->GetDataSize(0));
558 EXPECT_EQ(0, entry1->WriteData(0, 0, buffer1, 0, NULL, true));
559 EXPECT_EQ(0, entry1->GetDataSize(0));
560 entry1->Close();
561 ASSERT_TRUE(cache_->OpenEntry(key1, &entry1));
562
563 // Go to an external file.
564 EXPECT_EQ(20000, entry1->WriteData(0, 0, buffer1, 20000, NULL, true));
565 EXPECT_EQ(20000, entry1->GetDataSize(0));
566 EXPECT_EQ(20000, entry1->ReadData(0, 0, buffer2, 20000, NULL));
567 EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), 20000));
568 memset(buffer2->data(), 0, kSize2);
569
570 // External file truncation
571 EXPECT_EQ(18000, entry1->WriteData(0, 0, buffer1, 18000, NULL, false));
572 EXPECT_EQ(20000, entry1->GetDataSize(0));
573 EXPECT_EQ(18000, entry1->WriteData(0, 0, buffer1, 18000, NULL, true));
574 EXPECT_EQ(18000, entry1->GetDataSize(0));
575 EXPECT_EQ(0, entry1->WriteData(0, 17500, buffer1, 0, NULL, true));
576 EXPECT_EQ(17500, entry1->GetDataSize(0));
577
578 // And back to an internal block.
579 EXPECT_EQ(600, entry1->WriteData(0, 1000, buffer1, 600, NULL, true));
580 EXPECT_EQ(1600, entry1->GetDataSize(0));
581 EXPECT_EQ(600, entry1->ReadData(0, 1000, buffer2, 600, NULL));
582 EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), 600));
583 EXPECT_EQ(1000, entry1->ReadData(0, 0, buffer2, 1000, NULL));
584 EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), 1000)) <<
585 "Preserves previous data";
586
587 // Go from external file to zero length.
588 EXPECT_EQ(20000, entry1->WriteData(0, 0, buffer1, 20000, NULL, true));
589 EXPECT_EQ(20000, entry1->GetDataSize(0));
590 EXPECT_EQ(0, entry1->WriteData(0, 0, buffer1, 0, NULL, true));
591 EXPECT_EQ(0, entry1->GetDataSize(0));
592
593 entry1->Close();
594 }
595
TEST_F(DiskCacheEntryTest,TruncateData)596 TEST_F(DiskCacheEntryTest, TruncateData) {
597 InitCache();
598 TruncateData();
599
600 // We generate asynchronous IO that is not really tracked until completion
601 // so we just wait here before running the next test.
602 MessageLoopHelper helper;
603 helper.WaitUntilCacheIoFinished(1);
604 }
605
TEST_F(DiskCacheEntryTest,MemoryOnlyTruncateData)606 TEST_F(DiskCacheEntryTest, MemoryOnlyTruncateData) {
607 SetMemoryOnlyMode();
608 InitCache();
609 TruncateData();
610 }
611
ZeroLengthIO()612 void DiskCacheEntryTest::ZeroLengthIO() {
613 std::string key1("the first key");
614 disk_cache::Entry *entry1;
615 ASSERT_TRUE(cache_->CreateEntry(key1, &entry1));
616
617 EXPECT_EQ(0, entry1->ReadData(0, 0, NULL, 0, NULL));
618 EXPECT_EQ(0, entry1->WriteData(0, 0, NULL, 0, NULL, false));
619
620 // This write should extend the entry.
621 EXPECT_EQ(0, entry1->WriteData(0, 1000, NULL, 0, NULL, false));
622 EXPECT_EQ(0, entry1->ReadData(0, 500, NULL, 0, NULL));
623 EXPECT_EQ(0, entry1->ReadData(0, 2000, NULL, 0, NULL));
624 EXPECT_EQ(1000, entry1->GetDataSize(0));
625 entry1->Close();
626 }
627
TEST_F(DiskCacheEntryTest,ZeroLengthIO)628 TEST_F(DiskCacheEntryTest, ZeroLengthIO) {
629 InitCache();
630 ZeroLengthIO();
631 }
632
TEST_F(DiskCacheEntryTest,MemoryOnlyZeroLengthIO)633 TEST_F(DiskCacheEntryTest, MemoryOnlyZeroLengthIO) {
634 SetMemoryOnlyMode();
635 InitCache();
636 ZeroLengthIO();
637 }
638
639 // Write more than the total cache capacity but to a single entry. |size| is the
640 // amount of bytes to write each time.
ReuseEntry(int size)641 void DiskCacheEntryTest::ReuseEntry(int size) {
642 std::string key1("the first key");
643 disk_cache::Entry *entry;
644 ASSERT_TRUE(cache_->CreateEntry(key1, &entry));
645
646 entry->Close();
647 std::string key2("the second key");
648 ASSERT_TRUE(cache_->CreateEntry(key2, &entry));
649
650 scoped_refptr<net::IOBuffer> buffer = new net::IOBuffer(size);
651 CacheTestFillBuffer(buffer->data(), size, false);
652
653 for (int i = 0; i < 15; i++) {
654 EXPECT_EQ(0, entry->WriteData(0, 0, buffer, 0, NULL, true));
655 EXPECT_EQ(size, entry->WriteData(0, 0, buffer, size, NULL, false));
656 entry->Close();
657 ASSERT_TRUE(cache_->OpenEntry(key2, &entry));
658 }
659
660 entry->Close();
661 ASSERT_TRUE(cache_->OpenEntry(key1, &entry)) << "have not evicted this entry";
662 entry->Close();
663 }
664
TEST_F(DiskCacheEntryTest,ReuseExternalEntry)665 TEST_F(DiskCacheEntryTest, ReuseExternalEntry) {
666 SetDirectMode();
667 SetMaxSize(200 * 1024);
668 InitCache();
669 ReuseEntry(20 * 1024);
670 }
671
TEST_F(DiskCacheEntryTest,MemoryOnlyReuseExternalEntry)672 TEST_F(DiskCacheEntryTest, MemoryOnlyReuseExternalEntry) {
673 SetDirectMode();
674 SetMemoryOnlyMode();
675 SetMaxSize(200 * 1024);
676 InitCache();
677 ReuseEntry(20 * 1024);
678 }
679
TEST_F(DiskCacheEntryTest,ReuseInternalEntry)680 TEST_F(DiskCacheEntryTest, ReuseInternalEntry) {
681 SetDirectMode();
682 SetMaxSize(100 * 1024);
683 InitCache();
684 ReuseEntry(10 * 1024);
685 }
686
TEST_F(DiskCacheEntryTest,MemoryOnlyReuseInternalEntry)687 TEST_F(DiskCacheEntryTest, MemoryOnlyReuseInternalEntry) {
688 SetDirectMode();
689 SetMemoryOnlyMode();
690 SetMaxSize(100 * 1024);
691 InitCache();
692 ReuseEntry(10 * 1024);
693 }
694
695 // Reading somewhere that was not written should return zeros.
InvalidData()696 void DiskCacheEntryTest::InvalidData() {
697 std::string key1("the first key");
698 disk_cache::Entry *entry1;
699 ASSERT_TRUE(cache_->CreateEntry(key1, &entry1));
700
701 const int kSize1 = 20000;
702 const int kSize2 = 20000;
703 const int kSize3 = 20000;
704 scoped_refptr<net::IOBuffer> buffer1 = new net::IOBuffer(kSize1);
705 scoped_refptr<net::IOBuffer> buffer2 = new net::IOBuffer(kSize2);
706 scoped_refptr<net::IOBuffer> buffer3 = new net::IOBuffer(kSize3);
707
708 CacheTestFillBuffer(buffer1->data(), kSize1, false);
709 memset(buffer2->data(), 0, kSize2);
710
711 // Simple data grow:
712 EXPECT_EQ(200, entry1->WriteData(0, 400, buffer1, 200, NULL, false));
713 EXPECT_EQ(600, entry1->GetDataSize(0));
714 EXPECT_EQ(100, entry1->ReadData(0, 300, buffer3, 100, NULL));
715 EXPECT_TRUE(!memcmp(buffer3->data(), buffer2->data(), 100));
716 entry1->Close();
717 ASSERT_TRUE(cache_->OpenEntry(key1, &entry1));
718
719 // The entry is now on disk. Load it and extend it.
720 EXPECT_EQ(200, entry1->WriteData(0, 800, buffer1, 200, NULL, false));
721 EXPECT_EQ(1000, entry1->GetDataSize(0));
722 EXPECT_EQ(100, entry1->ReadData(0, 700, buffer3, 100, NULL));
723 EXPECT_TRUE(!memcmp(buffer3->data(), buffer2->data(), 100));
724 entry1->Close();
725 ASSERT_TRUE(cache_->OpenEntry(key1, &entry1));
726
727 // This time using truncate.
728 EXPECT_EQ(200, entry1->WriteData(0, 1800, buffer1, 200, NULL, true));
729 EXPECT_EQ(2000, entry1->GetDataSize(0));
730 EXPECT_EQ(100, entry1->ReadData(0, 1500, buffer3, 100, NULL));
731 EXPECT_TRUE(!memcmp(buffer3->data(), buffer2->data(), 100));
732
733 // Go to an external file.
734 EXPECT_EQ(200, entry1->WriteData(0, 19800, buffer1, 200, NULL, false));
735 EXPECT_EQ(20000, entry1->GetDataSize(0));
736 EXPECT_EQ(4000, entry1->ReadData(0, 14000, buffer3, 4000, NULL));
737 EXPECT_TRUE(!memcmp(buffer3->data(), buffer2->data(), 4000));
738
739 // And back to an internal block.
740 EXPECT_EQ(600, entry1->WriteData(0, 1000, buffer1, 600, NULL, true));
741 EXPECT_EQ(1600, entry1->GetDataSize(0));
742 EXPECT_EQ(600, entry1->ReadData(0, 1000, buffer3, 600, NULL));
743 EXPECT_TRUE(!memcmp(buffer3->data(), buffer1->data(), 600));
744
745 // Extend it again.
746 EXPECT_EQ(600, entry1->WriteData(0, 2000, buffer1, 600, NULL, false));
747 EXPECT_EQ(2600, entry1->GetDataSize(0));
748 EXPECT_EQ(200, entry1->ReadData(0, 1800, buffer3, 200, NULL));
749 EXPECT_TRUE(!memcmp(buffer3->data(), buffer2->data(), 200));
750
751 // And again (with truncation flag).
752 EXPECT_EQ(600, entry1->WriteData(0, 3000, buffer1, 600, NULL, true));
753 EXPECT_EQ(3600, entry1->GetDataSize(0));
754 EXPECT_EQ(200, entry1->ReadData(0, 2800, buffer3, 200, NULL));
755 EXPECT_TRUE(!memcmp(buffer3->data(), buffer2->data(), 200));
756
757 entry1->Close();
758 }
759
TEST_F(DiskCacheEntryTest,InvalidData)760 TEST_F(DiskCacheEntryTest, InvalidData) {
761 InitCache();
762 InvalidData();
763 }
764
TEST_F(DiskCacheEntryTest,MemoryOnlyInvalidData)765 TEST_F(DiskCacheEntryTest, MemoryOnlyInvalidData) {
766 SetMemoryOnlyMode();
767 InitCache();
768 InvalidData();
769 }
770
DoomEntry()771 void DiskCacheEntryTest::DoomEntry() {
772 std::string key1("the first key");
773 disk_cache::Entry *entry1;
774 ASSERT_TRUE(cache_->CreateEntry(key1, &entry1));
775 entry1->Doom();
776 entry1->Close();
777
778 const int kSize = 20000;
779 scoped_refptr<net::IOBuffer> buffer = new net::IOBuffer(kSize);
780 CacheTestFillBuffer(buffer->data(), kSize, true);
781 buffer->data()[19999] = '\0';
782
783 key1 = buffer->data();
784 ASSERT_TRUE(cache_->CreateEntry(key1, &entry1));
785 EXPECT_EQ(20000, entry1->WriteData(0, 0, buffer, kSize, NULL, false));
786 EXPECT_EQ(20000, entry1->WriteData(1, 0, buffer, kSize, NULL, false));
787 entry1->Doom();
788 entry1->Close();
789
790 EXPECT_EQ(0, cache_->GetEntryCount());
791 }
792
TEST_F(DiskCacheEntryTest,DoomEntry)793 TEST_F(DiskCacheEntryTest, DoomEntry) {
794 InitCache();
795 DoomEntry();
796 }
797
TEST_F(DiskCacheEntryTest,MemoryOnlyDoomEntry)798 TEST_F(DiskCacheEntryTest, MemoryOnlyDoomEntry) {
799 SetMemoryOnlyMode();
800 InitCache();
801 DoomEntry();
802 }
803
804 // Verify that basic operations work as expected with doomed entries.
DoomedEntry()805 void DiskCacheEntryTest::DoomedEntry() {
806 std::string key("the first key");
807 disk_cache::Entry *entry;
808 ASSERT_TRUE(cache_->CreateEntry(key, &entry));
809 entry->Doom();
810
811 EXPECT_EQ(0, cache_->GetEntryCount());
812 Time initial = Time::Now();
813 PlatformThread::Sleep(20);
814
815 const int kSize1 = 2000;
816 const int kSize2 = 2000;
817 scoped_refptr<net::IOBuffer> buffer1 = new net::IOBuffer(kSize1);
818 scoped_refptr<net::IOBuffer> buffer2 = new net::IOBuffer(kSize2);
819 CacheTestFillBuffer(buffer1->data(), kSize1, false);
820 memset(buffer2->data(), 0, kSize2);
821
822 EXPECT_EQ(2000, entry->WriteData(0, 0, buffer1, 2000, NULL, false));
823 EXPECT_EQ(2000, entry->ReadData(0, 0, buffer2, 2000, NULL));
824 EXPECT_EQ(0, memcmp(buffer1->data(), buffer2->data(), kSize1));
825 EXPECT_EQ(key, entry->GetKey());
826 EXPECT_TRUE(initial < entry->GetLastModified());
827 EXPECT_TRUE(initial < entry->GetLastUsed());
828
829 entry->Close();
830 }
831
TEST_F(DiskCacheEntryTest,DoomedEntry)832 TEST_F(DiskCacheEntryTest, DoomedEntry) {
833 InitCache();
834 DoomEntry();
835 }
836
TEST_F(DiskCacheEntryTest,MemoryOnlyDoomedEntry)837 TEST_F(DiskCacheEntryTest, MemoryOnlyDoomedEntry) {
838 SetMemoryOnlyMode();
839 InitCache();
840 DoomEntry();
841 }
842
843 // Test that child entries in a memory cache backend are not visible from
844 // enumerations.
TEST_F(DiskCacheEntryTest,MemoryOnlyEnumerationWithSparseEntries)845 TEST_F(DiskCacheEntryTest, MemoryOnlyEnumerationWithSparseEntries) {
846 SetMemoryOnlyMode();
847 InitCache();
848
849 const int kSize = 4096;
850 scoped_refptr<net::IOBuffer> buf = new net::IOBuffer(kSize);
851 CacheTestFillBuffer(buf->data(), kSize, false);
852
853 std::string key("the first key");
854 disk_cache::Entry* parent_entry;
855 ASSERT_TRUE(cache_->CreateEntry(key, &parent_entry));
856
857 // Writes to the parent entry.
858 EXPECT_EQ(kSize, parent_entry->WriteSparseData(0, buf, kSize, NULL));
859
860 // This write creates a child entry and writes to it.
861 EXPECT_EQ(kSize, parent_entry->WriteSparseData(8192, buf, kSize, NULL));
862
863 parent_entry->Close();
864
865 // Perform the enumerations.
866 void* iter = NULL;
867 disk_cache::Entry* entry = NULL;
868 int count = 0;
869 while (cache_->OpenNextEntry(&iter, &entry)) {
870 ASSERT_TRUE(entry != NULL);
871 ++count;
872 disk_cache::MemEntryImpl* mem_entry =
873 reinterpret_cast<disk_cache::MemEntryImpl*>(entry);
874 EXPECT_EQ(disk_cache::MemEntryImpl::kParentEntry, mem_entry->type());
875 mem_entry->Close();
876 }
877 EXPECT_EQ(1, count);
878 }
879
880 // Writes |buf_1| to offset and reads it back as |buf_2|.
VerifySparseIO(disk_cache::Entry * entry,int64 offset,net::IOBuffer * buf_1,int size,bool async,net::IOBuffer * buf_2)881 void VerifySparseIO(disk_cache::Entry* entry, int64 offset,
882 net::IOBuffer* buf_1, int size, bool async,
883 net::IOBuffer* buf_2) {
884 TestCompletionCallback callback;
885 TestCompletionCallback* cb = async ? &callback : NULL;
886
887 memset(buf_2->data(), 0, size);
888 int ret = entry->ReadSparseData(offset, buf_2, size, cb);
889 ret = callback.GetResult(ret);
890 EXPECT_EQ(0, ret);
891
892 ret = entry->WriteSparseData(offset, buf_1, size, cb);
893 ret = callback.GetResult(ret);
894 EXPECT_EQ(size, ret);
895
896 ret = entry->ReadSparseData(offset, buf_2, size, cb);
897 ret = callback.GetResult(ret);
898 EXPECT_EQ(size, ret);
899
900 EXPECT_EQ(0, memcmp(buf_1->data(), buf_2->data(), size));
901 }
902
903 // Reads |size| bytes from |entry| at |offset| and verifies that they are the
904 // same as the content of the provided |buffer|.
VerifyContentSparseIO(disk_cache::Entry * entry,int64 offset,char * buffer,int size,bool async)905 void VerifyContentSparseIO(disk_cache::Entry* entry, int64 offset, char* buffer,
906 int size, bool async) {
907 TestCompletionCallback callback;
908 TestCompletionCallback* cb = async ? &callback : NULL;
909
910 scoped_refptr<net::IOBuffer> buf_1 = new net::IOBuffer(size);
911 memset(buf_1->data(), 0, size);
912 int ret = entry->ReadSparseData(offset, buf_1, size, cb);
913 ret = callback.GetResult(ret);
914 EXPECT_EQ(size, ret);
915
916 EXPECT_EQ(0, memcmp(buf_1->data(), buffer, size));
917 }
918
BasicSparseIO(bool async)919 void DiskCacheEntryTest::BasicSparseIO(bool async) {
920 std::string key("the first key");
921 disk_cache::Entry* entry;
922 ASSERT_TRUE(cache_->CreateEntry(key, &entry));
923
924 const int kSize = 2048;
925 scoped_refptr<net::IOBuffer> buf_1 = new net::IOBuffer(kSize);
926 scoped_refptr<net::IOBuffer> buf_2 = new net::IOBuffer(kSize);
927 CacheTestFillBuffer(buf_1->data(), kSize, false);
928
929 // Write at offset 0.
930 VerifySparseIO(entry, 0, buf_1, kSize, async, buf_2);
931
932 // Write at offset 0x400000 (4 MB).
933 VerifySparseIO(entry, 0x400000, buf_1, kSize, async, buf_2);
934
935 // Write at offset 0x800000000 (32 GB).
936 VerifySparseIO(entry, 0x800000000LL, buf_1, kSize, async, buf_2);
937
938 entry->Close();
939
940 // Check everything again.
941 ASSERT_TRUE(cache_->OpenEntry(key, &entry));
942 VerifyContentSparseIO(entry, 0, buf_1->data(), kSize, async);
943 VerifyContentSparseIO(entry, 0x400000, buf_1->data(), kSize, async);
944 VerifyContentSparseIO(entry, 0x800000000LL, buf_1->data(), kSize, async);
945 entry->Close();
946 }
947
TEST_F(DiskCacheEntryTest,BasicSparseSyncIO)948 TEST_F(DiskCacheEntryTest, BasicSparseSyncIO) {
949 InitCache();
950 BasicSparseIO(false);
951 }
952
TEST_F(DiskCacheEntryTest,MemoryOnlyBasicSparseSyncIO)953 TEST_F(DiskCacheEntryTest, MemoryOnlyBasicSparseSyncIO) {
954 SetMemoryOnlyMode();
955 InitCache();
956 BasicSparseIO(false);
957 }
958
TEST_F(DiskCacheEntryTest,BasicSparseAsyncIO)959 TEST_F(DiskCacheEntryTest, BasicSparseAsyncIO) {
960 InitCache();
961 BasicSparseIO(true);
962 }
963
TEST_F(DiskCacheEntryTest,MemoryOnlyBasicSparseAsyncIO)964 TEST_F(DiskCacheEntryTest, MemoryOnlyBasicSparseAsyncIO) {
965 SetMemoryOnlyMode();
966 InitCache();
967 BasicSparseIO(true);
968 }
969
HugeSparseIO(bool async)970 void DiskCacheEntryTest::HugeSparseIO(bool async) {
971 std::string key("the first key");
972 disk_cache::Entry* entry;
973 ASSERT_TRUE(cache_->CreateEntry(key, &entry));
974
975 // Write 1.2 MB so that we cover multiple entries.
976 const int kSize = 1200 * 1024;
977 scoped_refptr<net::IOBuffer> buf_1 = new net::IOBuffer(kSize);
978 scoped_refptr<net::IOBuffer> buf_2 = new net::IOBuffer(kSize);
979 CacheTestFillBuffer(buf_1->data(), kSize, false);
980
981 // Write at offset 0x20F0000 (33 MB - 64 KB).
982 VerifySparseIO(entry, 0x20F0000, buf_1, kSize, async, buf_2);
983 entry->Close();
984
985 // Check it again.
986 ASSERT_TRUE(cache_->OpenEntry(key, &entry));
987 VerifyContentSparseIO(entry, 0x20F0000, buf_1->data(), kSize, async);
988 entry->Close();
989 }
990
TEST_F(DiskCacheEntryTest,HugeSparseSyncIO)991 TEST_F(DiskCacheEntryTest, HugeSparseSyncIO) {
992 InitCache();
993 HugeSparseIO(false);
994 }
995
TEST_F(DiskCacheEntryTest,MemoryOnlyHugeSparseSyncIO)996 TEST_F(DiskCacheEntryTest, MemoryOnlyHugeSparseSyncIO) {
997 SetMemoryOnlyMode();
998 InitCache();
999 HugeSparseIO(false);
1000 }
1001
TEST_F(DiskCacheEntryTest,HugeSparseAsyncIO)1002 TEST_F(DiskCacheEntryTest, HugeSparseAsyncIO) {
1003 InitCache();
1004 HugeSparseIO(true);
1005 }
1006
TEST_F(DiskCacheEntryTest,MemoryOnlyHugeSparseAsyncIO)1007 TEST_F(DiskCacheEntryTest, MemoryOnlyHugeSparseAsyncIO) {
1008 SetMemoryOnlyMode();
1009 InitCache();
1010 HugeSparseIO(true);
1011 }
1012
GetAvailableRange()1013 void DiskCacheEntryTest::GetAvailableRange() {
1014 std::string key("the first key");
1015 disk_cache::Entry* entry;
1016 ASSERT_TRUE(cache_->CreateEntry(key, &entry));
1017
1018 const int kSize = 16 * 1024;
1019 scoped_refptr<net::IOBuffer> buf = new net::IOBuffer(kSize);
1020 CacheTestFillBuffer(buf->data(), kSize, false);
1021
1022 // Write at offset 0x20F0000 (33 MB - 64 KB), and 0x20F4400 (33 MB - 47 KB).
1023 EXPECT_EQ(kSize, entry->WriteSparseData(0x20F0000, buf, kSize, NULL));
1024 EXPECT_EQ(kSize, entry->WriteSparseData(0x20F4400, buf, kSize, NULL));
1025
1026 // We stop at the first empty block.
1027 int64 start;
1028 EXPECT_EQ(kSize, entry->GetAvailableRange(0x20F0000, kSize * 2, &start));
1029 EXPECT_EQ(0x20F0000, start);
1030
1031 start = 0;
1032 EXPECT_EQ(0, entry->GetAvailableRange(0, kSize, &start));
1033 EXPECT_EQ(0, entry->GetAvailableRange(0x20F0000 - kSize, kSize, &start));
1034 EXPECT_EQ(kSize, entry->GetAvailableRange(0, 0x2100000, &start));
1035 EXPECT_EQ(0x20F0000, start);
1036
1037 // We should be able to Read based on the results of GetAvailableRange.
1038 start = -1;
1039 EXPECT_EQ(0, entry->GetAvailableRange(0x2100000, kSize, &start));
1040 EXPECT_EQ(0, entry->ReadSparseData(start, buf, kSize, NULL));
1041
1042 start = 0;
1043 EXPECT_EQ(0x2000, entry->GetAvailableRange(0x20F2000, kSize, &start));
1044 EXPECT_EQ(0x20F2000, start);
1045 EXPECT_EQ(0x2000, entry->ReadSparseData(start, buf, kSize, NULL));
1046
1047 // Make sure that we respect the |len| argument.
1048 start = 0;
1049 EXPECT_EQ(1, entry->GetAvailableRange(0x20F0001 - kSize, kSize, &start));
1050 EXPECT_EQ(0x20F0000, start);
1051
1052 entry->Close();
1053 }
1054
TEST_F(DiskCacheEntryTest,GetAvailableRange)1055 TEST_F(DiskCacheEntryTest, GetAvailableRange) {
1056 InitCache();
1057 GetAvailableRange();
1058 }
1059
TEST_F(DiskCacheEntryTest,MemoryOnlyGetAvailableRange)1060 TEST_F(DiskCacheEntryTest, MemoryOnlyGetAvailableRange) {
1061 SetMemoryOnlyMode();
1062 InitCache();
1063 GetAvailableRange();
1064 }
1065
TEST_F(DiskCacheEntryTest,MemoryOnlyMisalignedSparseIO)1066 TEST_F(DiskCacheEntryTest, MemoryOnlyMisalignedSparseIO) {
1067 SetMemoryOnlyMode();
1068 InitCache();
1069
1070 const int kSize = 8192;
1071 scoped_refptr<net::IOBuffer> buf_1 = new net::IOBuffer(kSize);
1072 scoped_refptr<net::IOBuffer> buf_2 = new net::IOBuffer(kSize);
1073 CacheTestFillBuffer(buf_1->data(), kSize, false);
1074
1075 std::string key("the first key");
1076 disk_cache::Entry* entry;
1077 ASSERT_TRUE(cache_->CreateEntry(key, &entry));
1078
1079 // This loop writes back to back starting from offset 0 and 9000.
1080 for (int i = 0; i < kSize; i += 1024) {
1081 scoped_refptr<net::WrappedIOBuffer> buf_3 =
1082 new net::WrappedIOBuffer(buf_1->data() + i);
1083 VerifySparseIO(entry, i, buf_3, 1024, false, buf_2);
1084 VerifySparseIO(entry, 9000 + i, buf_3, 1024, false, buf_2);
1085 }
1086
1087 // Make sure we have data written.
1088 VerifyContentSparseIO(entry, 0, buf_1->data(), kSize, false);
1089 VerifyContentSparseIO(entry, 9000, buf_1->data(), kSize, false);
1090
1091 // This tests a large write that spans 3 entries from a misaligned offset.
1092 VerifySparseIO(entry, 20481, buf_1, 8192, false, buf_2);
1093
1094 entry->Close();
1095 }
1096
TEST_F(DiskCacheEntryTest,MemoryOnlyMisalignedGetAvailableRange)1097 TEST_F(DiskCacheEntryTest, MemoryOnlyMisalignedGetAvailableRange) {
1098 SetMemoryOnlyMode();
1099 InitCache();
1100
1101 const int kSize = 8192;
1102 scoped_refptr<net::IOBuffer> buf = new net::IOBuffer(kSize);
1103 CacheTestFillBuffer(buf->data(), kSize, false);
1104
1105 disk_cache::Entry* entry;
1106 std::string key("the first key");
1107 ASSERT_TRUE(cache_->CreateEntry(key, &entry));
1108
1109 // Writes in the middle of an entry.
1110 EXPECT_EQ(1024, entry->WriteSparseData(0, buf, 1024, NULL));
1111 EXPECT_EQ(1024, entry->WriteSparseData(5120, buf, 1024, NULL));
1112 EXPECT_EQ(1024, entry->WriteSparseData(10000, buf, 1024, NULL));
1113
1114 // Writes in the middle of an entry and spans 2 child entries.
1115 EXPECT_EQ(8192, entry->WriteSparseData(50000, buf, 8192, NULL));
1116
1117 int64 start;
1118 // Test that we stop at a discontinuous child at the second block.
1119 EXPECT_EQ(1024, entry->GetAvailableRange(0, 10000, &start));
1120 EXPECT_EQ(0, start);
1121
1122 // Test that number of bytes is reported correctly when we start from the
1123 // middle of a filled region.
1124 EXPECT_EQ(512, entry->GetAvailableRange(512, 10000, &start));
1125 EXPECT_EQ(512, start);
1126
1127 // Test that we found bytes in the child of next block.
1128 EXPECT_EQ(1024, entry->GetAvailableRange(1024, 10000, &start));
1129 EXPECT_EQ(5120, start);
1130
1131 // Test that the desired length is respected. It starts within a filled
1132 // region.
1133 EXPECT_EQ(512, entry->GetAvailableRange(5500, 512, &start));
1134 EXPECT_EQ(5500, start);
1135
1136 // Test that the desired length is respected. It starts before a filled
1137 // region.
1138 EXPECT_EQ(500, entry->GetAvailableRange(5000, 620, &start));
1139 EXPECT_EQ(5120, start);
1140
1141 // Test that multiple blocks are scanned.
1142 EXPECT_EQ(8192, entry->GetAvailableRange(40000, 20000, &start));
1143 EXPECT_EQ(50000, start);
1144
1145 entry->Close();
1146 }
1147
DoomSparseEntry()1148 void DiskCacheEntryTest::DoomSparseEntry() {
1149 std::string key1("the first key");
1150 std::string key2("the second key");
1151 disk_cache::Entry *entry1, *entry2;
1152 ASSERT_TRUE(cache_->CreateEntry(key1, &entry1));
1153 ASSERT_TRUE(cache_->CreateEntry(key2, &entry2));
1154
1155 const int kSize = 4 * 1024;
1156 scoped_refptr<net::IOBuffer> buf = new net::IOBuffer(kSize);
1157 CacheTestFillBuffer(buf->data(), kSize, false);
1158
1159 int64 offset = 1024;
1160 // Write to a bunch of ranges.
1161 for (int i = 0; i < 12; i++) {
1162 EXPECT_EQ(kSize, entry1->WriteSparseData(offset, buf, kSize, NULL));
1163 // Keep the second map under the default size.
1164 if (i < 9)
1165 EXPECT_EQ(kSize, entry2->WriteSparseData(offset, buf, kSize, NULL));
1166 offset *= 4;
1167 }
1168
1169 if (memory_only_)
1170 EXPECT_EQ(2, cache_->GetEntryCount());
1171 else
1172 EXPECT_EQ(15, cache_->GetEntryCount());
1173
1174 // Doom the first entry while it's still open.
1175 entry1->Doom();
1176 entry1->Close();
1177 entry2->Close();
1178
1179 // Doom the second entry after it's fully saved.
1180 EXPECT_TRUE(cache_->DoomEntry(key2));
1181
1182 // Make sure we do all needed work. This may fail for entry2 if between Close
1183 // and DoomEntry the system decides to remove all traces of the file from the
1184 // system cache so we don't see that there is pending IO.
1185 MessageLoop::current()->RunAllPending();
1186
1187 if (memory_only_) {
1188 EXPECT_EQ(0, cache_->GetEntryCount());
1189 } else {
1190 if (5 == cache_->GetEntryCount()) {
1191 // Most likely we are waiting for the result of reading the sparse info
1192 // (it's always async on Posix so it is easy to miss). Unfortunately we
1193 // don't have any signal to watch for so we can only wait.
1194 PlatformThread::Sleep(500);
1195 MessageLoop::current()->RunAllPending();
1196 }
1197 EXPECT_EQ(0, cache_->GetEntryCount());
1198 }
1199 }
1200
TEST_F(DiskCacheEntryTest,DoomSparseEntry)1201 TEST_F(DiskCacheEntryTest, DoomSparseEntry) {
1202 InitCache();
1203 DoomSparseEntry();
1204 }
1205
TEST_F(DiskCacheEntryTest,MemoryOnlyDoomSparseEntry)1206 TEST_F(DiskCacheEntryTest, MemoryOnlyDoomSparseEntry) {
1207 SetMemoryOnlyMode();
1208 InitCache();
1209 DoomSparseEntry();
1210 }
1211
PartialSparseEntry()1212 void DiskCacheEntryTest::PartialSparseEntry() {
1213 std::string key("the first key");
1214 disk_cache::Entry* entry;
1215 ASSERT_TRUE(cache_->CreateEntry(key, &entry));
1216
1217 // We should be able to deal with IO that is not aligned to the block size
1218 // of a sparse entry, at least to write a big range without leaving holes.
1219 const int kSize = 4 * 1024;
1220 const int kSmallSize = 128;
1221 scoped_refptr<net::IOBuffer> buf1 = new net::IOBuffer(kSize);
1222 CacheTestFillBuffer(buf1->data(), kSize, false);
1223
1224 // The first write is just to extend the entry. The third write occupies
1225 // a 1KB block partially, it may not be written internally depending on the
1226 // implementation.
1227 EXPECT_EQ(kSize, entry->WriteSparseData(20000, buf1, kSize, NULL));
1228 EXPECT_EQ(kSize, entry->WriteSparseData(500, buf1, kSize, NULL));
1229 EXPECT_EQ(kSmallSize,
1230 entry->WriteSparseData(1080321, buf1, kSmallSize, NULL));
1231 entry->Close();
1232 ASSERT_TRUE(cache_->OpenEntry(key, &entry));
1233
1234 scoped_refptr<net::IOBuffer> buf2 = new net::IOBuffer(kSize);
1235 memset(buf2->data(), 0, kSize);
1236 EXPECT_EQ(0, entry->ReadSparseData(8000, buf2, kSize, NULL));
1237
1238 EXPECT_EQ(500, entry->ReadSparseData(kSize, buf2, kSize, NULL));
1239 EXPECT_EQ(0, memcmp(buf2->data(), buf1->data() + kSize - 500, 500));
1240 EXPECT_EQ(0, entry->ReadSparseData(0, buf2, kSize, NULL));
1241
1242 // This read should not change anything.
1243 EXPECT_EQ(96, entry->ReadSparseData(24000, buf2, kSize, NULL));
1244 EXPECT_EQ(500, entry->ReadSparseData(kSize, buf2, kSize, NULL));
1245 EXPECT_EQ(0, entry->ReadSparseData(499, buf2, kSize, NULL));
1246
1247 int64 start;
1248 if (memory_only_) {
1249 EXPECT_EQ(100, entry->GetAvailableRange(0, 600, &start));
1250 EXPECT_EQ(500, start);
1251 } else {
1252 EXPECT_EQ(1024, entry->GetAvailableRange(0, 2048, &start));
1253 EXPECT_EQ(1024, start);
1254 }
1255 EXPECT_EQ(500, entry->GetAvailableRange(kSize, kSize, &start));
1256 EXPECT_EQ(kSize, start);
1257 EXPECT_EQ(3616, entry->GetAvailableRange(20 * 1024, 10000, &start));
1258 EXPECT_EQ(20 * 1024, start);
1259
1260 // 1. Query before a filled 1KB block.
1261 // 2. Query within a filled 1KB block.
1262 // 3. Query beyond a filled 1KB block.
1263 if (memory_only_) {
1264 EXPECT_EQ(3496, entry->GetAvailableRange(19400, kSize, &start));
1265 EXPECT_EQ(20000, start);
1266 } else {
1267 EXPECT_EQ(3016, entry->GetAvailableRange(19400, kSize, &start));
1268 EXPECT_EQ(20480, start);
1269 }
1270 EXPECT_EQ(1523, entry->GetAvailableRange(3073, kSize, &start));
1271 EXPECT_EQ(3073, start);
1272 EXPECT_EQ(0, entry->GetAvailableRange(4600, kSize, &start));
1273 EXPECT_EQ(4600, start);
1274
1275 // Now make another write and verify that there is no hole in between.
1276 EXPECT_EQ(kSize, entry->WriteSparseData(500 + kSize, buf1, kSize, NULL));
1277 EXPECT_EQ(7 * 1024 + 500, entry->GetAvailableRange(1024, 10000, &start));
1278 EXPECT_EQ(1024, start);
1279 EXPECT_EQ(kSize, entry->ReadSparseData(kSize, buf2, kSize, NULL));
1280 EXPECT_EQ(0, memcmp(buf2->data(), buf1->data() + kSize - 500, 500));
1281 EXPECT_EQ(0, memcmp(buf2->data() + 500, buf1->data(), kSize - 500));
1282
1283 entry->Close();
1284 }
1285
TEST_F(DiskCacheEntryTest,PartialSparseEntry)1286 TEST_F(DiskCacheEntryTest, PartialSparseEntry) {
1287 InitCache();
1288 PartialSparseEntry();
1289 }
1290
TEST_F(DiskCacheEntryTest,MemoryPartialSparseEntry)1291 TEST_F(DiskCacheEntryTest, MemoryPartialSparseEntry) {
1292 SetMemoryOnlyMode();
1293 InitCache();
1294 PartialSparseEntry();
1295 }
1296
TEST_F(DiskCacheEntryTest,CleanupSparseEntry)1297 TEST_F(DiskCacheEntryTest, CleanupSparseEntry) {
1298 InitCache();
1299 std::string key("the first key");
1300 disk_cache::Entry* entry;
1301 ASSERT_TRUE(cache_->CreateEntry(key, &entry));
1302
1303 // Corrupt sparse children should be removed automatically.
1304 const int kSize = 4 * 1024;
1305 scoped_refptr<net::IOBuffer> buf1 = new net::IOBuffer(kSize);
1306 CacheTestFillBuffer(buf1->data(), kSize, false);
1307
1308 const int k1Meg = 1024 * 1024;
1309 EXPECT_EQ(kSize, entry->WriteSparseData(8192, buf1, kSize, NULL));
1310 EXPECT_EQ(kSize, entry->WriteSparseData(k1Meg + 8192, buf1, kSize, NULL));
1311 EXPECT_EQ(kSize, entry->WriteSparseData(2 * k1Meg + 8192, buf1, kSize, NULL));
1312 entry->Close();
1313 EXPECT_EQ(4, cache_->GetEntryCount());
1314
1315 void* iter = NULL;
1316 int count = 0;
1317 std::string child_key[2];
1318 while (cache_->OpenNextEntry(&iter, &entry)) {
1319 ASSERT_TRUE(entry != NULL);
1320 // Writing to an entry will alter the LRU list and invalidate the iterator.
1321 if (entry->GetKey() != key && count < 2)
1322 child_key[count++] = entry->GetKey();
1323 entry->Close();
1324 }
1325 for (int i = 0; i < 2; i++) {
1326 ASSERT_TRUE(cache_->OpenEntry(child_key[i], &entry));
1327 // Overwrite the header's magic and signature.
1328 EXPECT_EQ(12, entry->WriteData(2, 0, buf1, 12, NULL, false));
1329 entry->Close();
1330 }
1331
1332 EXPECT_EQ(4, cache_->GetEntryCount());
1333 ASSERT_TRUE(cache_->OpenEntry(key, &entry));
1334
1335 // Two children should be gone. One while reading and one while writing.
1336 EXPECT_EQ(0, entry->ReadSparseData(2 * k1Meg + 8192, buf1, kSize, NULL));
1337 EXPECT_EQ(kSize, entry->WriteSparseData(k1Meg + 16384, buf1, kSize, NULL));
1338 EXPECT_EQ(0, entry->ReadSparseData(k1Meg + 8192, buf1, kSize, NULL));
1339
1340 // We never touched this one.
1341 EXPECT_EQ(kSize, entry->ReadSparseData(8192, buf1, kSize, NULL));
1342 entry->Close();
1343
1344 // We re-created one of the corrupt children.
1345 EXPECT_EQ(3, cache_->GetEntryCount());
1346 }
1347
TEST_F(DiskCacheEntryTest,CancelSparseIO)1348 TEST_F(DiskCacheEntryTest, CancelSparseIO) {
1349 InitCache();
1350 std::string key("the first key");
1351 disk_cache::Entry* entry;
1352 ASSERT_TRUE(cache_->CreateEntry(key, &entry));
1353
1354 const int kSize = 40 * 1024;
1355 scoped_refptr<net::IOBuffer> buf = new net::IOBuffer(kSize);
1356 CacheTestFillBuffer(buf->data(), kSize, false);
1357
1358 TestCompletionCallback cb1, cb2, cb3, cb4;
1359 int64 offset = 0;
1360 int tries = 0;
1361 const int maxtries = 100; // Avoid hang on infinitely fast disks
1362 for (int ret = 0; ret != net::ERR_IO_PENDING; offset += kSize * 4) {
1363 ret = entry->WriteSparseData(offset, buf, kSize, &cb1);
1364 if (++tries > maxtries) {
1365 LOG(ERROR) << "Data writes never come back PENDING; skipping test";
1366 entry->Close();
1367 return;
1368 }
1369 }
1370
1371 // Cannot use the entry at this point.
1372 offset = 0;
1373 EXPECT_EQ(net::ERR_CACHE_OPERATION_NOT_SUPPORTED,
1374 entry->GetAvailableRange(offset, kSize, &offset));
1375 EXPECT_EQ(net::OK, entry->ReadyForSparseIO(&cb2));
1376
1377 // We cancel the pending operation, and register multiple notifications.
1378 entry->CancelSparseIO();
1379 EXPECT_EQ(net::ERR_IO_PENDING, entry->ReadyForSparseIO(&cb2));
1380 EXPECT_EQ(net::ERR_IO_PENDING, entry->ReadyForSparseIO(&cb3));
1381 entry->CancelSparseIO(); // Should be a no op at this point.
1382 EXPECT_EQ(net::ERR_IO_PENDING, entry->ReadyForSparseIO(&cb4));
1383
1384 offset = 0;
1385 EXPECT_EQ(net::ERR_CACHE_OPERATION_NOT_SUPPORTED,
1386 entry->GetAvailableRange(offset, kSize, &offset));
1387 EXPECT_EQ(net::ERR_CACHE_OPERATION_NOT_SUPPORTED,
1388 entry->ReadSparseData(offset, buf, kSize, NULL));
1389 EXPECT_EQ(net::ERR_CACHE_OPERATION_NOT_SUPPORTED,
1390 entry->WriteSparseData(offset, buf, kSize, NULL));
1391
1392 // Now see if we receive all notifications.
1393 EXPECT_EQ(kSize, cb1.GetResult(net::ERR_IO_PENDING));
1394 EXPECT_EQ(net::OK, cb2.GetResult(net::ERR_IO_PENDING));
1395 EXPECT_EQ(net::OK, cb3.GetResult(net::ERR_IO_PENDING));
1396 EXPECT_EQ(net::OK, cb4.GetResult(net::ERR_IO_PENDING));
1397
1398 EXPECT_EQ(kSize, entry->GetAvailableRange(offset, kSize, &offset));
1399 EXPECT_EQ(net::OK, entry->ReadyForSparseIO(&cb2));
1400 entry->Close();
1401 }
1402