1 // Copyright 2011 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #ifdef UNSAFE_BUFFERS_BUILD
6 // TODO(crbug.com/40284755): Remove this and spanify to fix the errors.
7 #pragma allow_unsafe_buffers
8 #endif
9
10 #include <limits>
11 #include <memory>
12 #include <string>
13
14 #include "base/barrier_closure.h"
15 #include "base/files/file_enumerator.h"
16 #include "base/files/file_path.h"
17 #include "base/functional/bind.h"
18 #include "base/hash/hash.h"
19 #include "base/memory/raw_ptr.h"
20 #include "base/process/process_metrics.h"
21 #include "base/rand_util.h"
22 #include "base/run_loop.h"
23 #include "base/strings/string_number_conversions.h"
24 #include "base/strings/string_util.h"
25 #include "base/test/scoped_run_loop_timeout.h"
26 #include "base/test/test_file_util.h"
27 #include "base/test/test_timeouts.h"
28 #include "base/threading/thread.h"
29 #include "base/time/time.h"
30 #include "base/timer/elapsed_timer.h"
31 #include "build/build_config.h"
32 #include "net/base/cache_type.h"
33 #include "net/base/completion_repeating_callback.h"
34 #include "net/base/io_buffer.h"
35 #include "net/base/net_errors.h"
36 #include "net/base/test_completion_callback.h"
37 #include "net/disk_cache/backend_cleanup_tracker.h"
38 #include "net/disk_cache/blockfile/backend_impl.h"
39 #include "net/disk_cache/blockfile/block_files.h"
40 #include "net/disk_cache/disk_cache.h"
41 #include "net/disk_cache/disk_cache_test_base.h"
42 #include "net/disk_cache/disk_cache_test_util.h"
43 #include "net/disk_cache/simple/simple_backend_impl.h"
44 #include "net/disk_cache/simple/simple_index.h"
45 #include "net/disk_cache/simple/simple_index_file.h"
46 #include "testing/gtest/include/gtest/gtest.h"
47 #include "testing/perf/perf_result_reporter.h"
48 #include "testing/platform_test.h"
49
50 using base::Time;
51
52 namespace {
53
54 const size_t kNumEntries = 10000;
55 const int kHeadersSize = 2000;
56
57 const int kBodySize = 72 * 1024 - 1;
58
59 // HttpCache likes this chunk size.
60 const int kChunkSize = 32 * 1024;
61
62 // As of 2017-01-12, this is a typical per-tab limit on HTTP connections.
63 const int kMaxParallelOperations = 10;
64
65 static constexpr char kMetricPrefixDiskCache[] = "DiskCache.";
66 static constexpr char kMetricPrefixSimpleIndex[] = "SimpleIndex.";
67 static constexpr char kMetricCacheEntriesWriteTimeMs[] =
68 "cache_entries_write_time";
69 static constexpr char kMetricCacheHeadersReadTimeColdMs[] =
70 "cache_headers_read_time_cold";
71 static constexpr char kMetricCacheHeadersReadTimeWarmMs[] =
72 "cache_headers_read_time_warm";
73 static constexpr char kMetricCacheEntriesReadTimeColdMs[] =
74 "cache_entries_read_time_cold";
75 static constexpr char kMetricCacheEntriesReadTimeWarmMs[] =
76 "cache_entries_read_time_warm";
77 static constexpr char kMetricCacheKeysHashTimeMs[] = "cache_keys_hash_time";
78 static constexpr char kMetricFillBlocksTimeMs[] = "fill_sequential_blocks_time";
79 static constexpr char kMetricCreateDeleteBlocksTimeMs[] =
80 "create_and_delete_random_blocks_time";
81 static constexpr char kMetricSimpleCacheInitTotalTimeMs[] =
82 "simple_cache_initial_read_total_time";
83 static constexpr char kMetricSimpleCacheInitPerEntryTimeUs[] =
84 "simple_cache_initial_read_per_entry_time";
85 static constexpr char kMetricAverageEvictionTimeMs[] = "average_eviction_time";
86
SetUpDiskCacheReporter(const std::string & story)87 perf_test::PerfResultReporter SetUpDiskCacheReporter(const std::string& story) {
88 perf_test::PerfResultReporter reporter(kMetricPrefixDiskCache, story);
89 reporter.RegisterImportantMetric(kMetricCacheEntriesWriteTimeMs, "ms");
90 reporter.RegisterImportantMetric(kMetricCacheHeadersReadTimeColdMs, "ms");
91 reporter.RegisterImportantMetric(kMetricCacheHeadersReadTimeWarmMs, "ms");
92 reporter.RegisterImportantMetric(kMetricCacheEntriesReadTimeColdMs, "ms");
93 reporter.RegisterImportantMetric(kMetricCacheEntriesReadTimeWarmMs, "ms");
94 reporter.RegisterImportantMetric(kMetricCacheKeysHashTimeMs, "ms");
95 reporter.RegisterImportantMetric(kMetricFillBlocksTimeMs, "ms");
96 reporter.RegisterImportantMetric(kMetricCreateDeleteBlocksTimeMs, "ms");
97 reporter.RegisterImportantMetric(kMetricSimpleCacheInitTotalTimeMs, "ms");
98 reporter.RegisterImportantMetric(kMetricSimpleCacheInitPerEntryTimeUs, "us");
99 return reporter;
100 }
101
SetUpSimpleIndexReporter(const std::string & story)102 perf_test::PerfResultReporter SetUpSimpleIndexReporter(
103 const std::string& story) {
104 perf_test::PerfResultReporter reporter(kMetricPrefixSimpleIndex, story);
105 reporter.RegisterImportantMetric(kMetricAverageEvictionTimeMs, "ms");
106 return reporter;
107 }
108
MaybeIncreaseFdLimitTo(unsigned int max_descriptors)109 void MaybeIncreaseFdLimitTo(unsigned int max_descriptors) {
110 #if BUILDFLAG(IS_POSIX)
111 base::IncreaseFdLimitTo(max_descriptors);
112 #endif
113 }
114
115 struct TestEntry {
116 std::string key;
117 int data_len;
118 };
119
120 enum class WhatToRead {
121 HEADERS_ONLY,
122 HEADERS_AND_BODY,
123 };
124
125 class DiskCachePerfTest : public DiskCacheTestWithCache {
126 public:
DiskCachePerfTest()127 DiskCachePerfTest() { MaybeIncreaseFdLimitTo(kFdLimitForCacheTests); }
128
entries() const129 const std::vector<TestEntry>& entries() const { return entries_; }
130
131 protected:
132 // Helper methods for constructing tests.
133 bool TimeWrites(const std::string& story);
134 bool TimeReads(WhatToRead what_to_read,
135 const std::string& metric,
136 const std::string& story);
137 void ResetAndEvictSystemDiskCache();
138
139 // Callbacks used within tests for intermediate operations.
140 void WriteCallback(net::CompletionOnceCallback final_callback,
141 scoped_refptr<net::IOBuffer> headers_buffer,
142 scoped_refptr<net::IOBuffer> body_buffer,
143 disk_cache::Entry* cache_entry,
144 int entry_index,
145 size_t write_offset,
146 int result);
147
148 // Complete perf tests.
149 void CacheBackendPerformance(const std::string& story);
150
151 const size_t kFdLimitForCacheTests = 8192;
152
153 std::vector<TestEntry> entries_;
154 };
155
156 class WriteHandler {
157 public:
WriteHandler(const DiskCachePerfTest * test,disk_cache::Backend * cache,net::CompletionOnceCallback final_callback)158 WriteHandler(const DiskCachePerfTest* test,
159 disk_cache::Backend* cache,
160 net::CompletionOnceCallback final_callback)
161 : test_(test), cache_(cache), final_callback_(std::move(final_callback)) {
162 CacheTestFillBuffer(headers_buffer_->span(), false);
163 CacheTestFillBuffer(body_buffer_->span(), false);
164 }
165
166 void Run();
167
168 protected:
169 void CreateNextEntry();
170
171 void CreateCallback(int data_len, disk_cache::EntryResult result);
172 void WriteDataCallback(disk_cache::Entry* entry,
173 int next_offset,
174 int data_len,
175 int expected_result,
176 int result);
177
178 private:
179 bool CheckForErrorAndCancel(int result);
180
181 raw_ptr<const DiskCachePerfTest> test_;
182 raw_ptr<disk_cache::Backend> cache_;
183 net::CompletionOnceCallback final_callback_;
184
185 size_t next_entry_index_ = 0;
186 size_t pending_operations_count_ = 0;
187
188 int pending_result_ = net::OK;
189
190 scoped_refptr<net::IOBuffer> headers_buffer_ =
191 base::MakeRefCounted<net::IOBufferWithSize>(kHeadersSize);
192 scoped_refptr<net::IOBuffer> body_buffer_ =
193 base::MakeRefCounted<net::IOBufferWithSize>(kChunkSize);
194 };
195
Run()196 void WriteHandler::Run() {
197 for (int i = 0; i < kMaxParallelOperations; ++i) {
198 ++pending_operations_count_;
199 CreateNextEntry();
200 }
201 }
202
CreateNextEntry()203 void WriteHandler::CreateNextEntry() {
204 ASSERT_GT(kNumEntries, next_entry_index_);
205 TestEntry test_entry = test_->entries()[next_entry_index_++];
206 auto callback =
207 base::BindRepeating(&WriteHandler::CreateCallback, base::Unretained(this),
208 test_entry.data_len);
209 disk_cache::EntryResult result =
210 cache_->CreateEntry(test_entry.key, net::HIGHEST, callback);
211 if (result.net_error() != net::ERR_IO_PENDING)
212 callback.Run(std::move(result));
213 }
214
CreateCallback(int data_len,disk_cache::EntryResult result)215 void WriteHandler::CreateCallback(int data_len,
216 disk_cache::EntryResult result) {
217 if (CheckForErrorAndCancel(result.net_error()))
218 return;
219
220 disk_cache::Entry* entry = result.ReleaseEntry();
221 net::CompletionRepeatingCallback callback = base::BindRepeating(
222 &WriteHandler::WriteDataCallback, base::Unretained(this), entry, 0,
223 data_len, kHeadersSize);
224 int new_result = entry->WriteData(0, 0, headers_buffer_.get(), kHeadersSize,
225 callback, false);
226 if (new_result != net::ERR_IO_PENDING)
227 callback.Run(new_result);
228 }
229
WriteDataCallback(disk_cache::Entry * entry,int next_offset,int data_len,int expected_result,int result)230 void WriteHandler::WriteDataCallback(disk_cache::Entry* entry,
231 int next_offset,
232 int data_len,
233 int expected_result,
234 int result) {
235 if (CheckForErrorAndCancel(result)) {
236 entry->Close();
237 return;
238 }
239 DCHECK_LE(next_offset, data_len);
240 if (next_offset == data_len) {
241 entry->Close();
242 if (next_entry_index_ < kNumEntries) {
243 CreateNextEntry();
244 } else {
245 --pending_operations_count_;
246 if (pending_operations_count_ == 0)
247 std::move(final_callback_).Run(net::OK);
248 }
249 return;
250 }
251
252 int write_size = std::min(kChunkSize, data_len - next_offset);
253 net::CompletionRepeatingCallback callback = base::BindRepeating(
254 &WriteHandler::WriteDataCallback, base::Unretained(this), entry,
255 next_offset + write_size, data_len, write_size);
256 int new_result = entry->WriteData(1, next_offset, body_buffer_.get(),
257 write_size, callback, true);
258 if (new_result != net::ERR_IO_PENDING)
259 callback.Run(new_result);
260 }
261
CheckForErrorAndCancel(int result)262 bool WriteHandler::CheckForErrorAndCancel(int result) {
263 DCHECK_NE(net::ERR_IO_PENDING, result);
264 if (result != net::OK && !(result > 0))
265 pending_result_ = result;
266 if (pending_result_ != net::OK) {
267 --pending_operations_count_;
268 if (pending_operations_count_ == 0)
269 std::move(final_callback_).Run(pending_result_);
270 return true;
271 }
272 return false;
273 }
274
275 class ReadHandler {
276 public:
ReadHandler(const DiskCachePerfTest * test,WhatToRead what_to_read,disk_cache::Backend * cache,net::CompletionOnceCallback final_callback)277 ReadHandler(const DiskCachePerfTest* test,
278 WhatToRead what_to_read,
279 disk_cache::Backend* cache,
280 net::CompletionOnceCallback final_callback)
281 : test_(test),
282 what_to_read_(what_to_read),
283 cache_(cache),
284 final_callback_(std::move(final_callback)) {
285 for (auto& read_buffer : read_buffers_) {
286 read_buffer = base::MakeRefCounted<net::IOBufferWithSize>(
287 std::max(kHeadersSize, kChunkSize));
288 }
289 }
290
291 void Run();
292
293 protected:
294 void OpenNextEntry(int parallel_operation_index);
295
296 void OpenCallback(int parallel_operation_index,
297 int data_len,
298 disk_cache::EntryResult result);
299 void ReadDataCallback(int parallel_operation_index,
300 disk_cache::Entry* entry,
301 int next_offset,
302 int data_len,
303 int expected_result,
304 int result);
305
306 private:
307 bool CheckForErrorAndCancel(int result);
308
309 raw_ptr<const DiskCachePerfTest> test_;
310 const WhatToRead what_to_read_;
311
312 raw_ptr<disk_cache::Backend> cache_;
313 net::CompletionOnceCallback final_callback_;
314
315 size_t next_entry_index_ = 0;
316 size_t pending_operations_count_ = 0;
317
318 int pending_result_ = net::OK;
319
320 scoped_refptr<net::IOBuffer> read_buffers_[kMaxParallelOperations];
321 };
322
Run()323 void ReadHandler::Run() {
324 for (int i = 0; i < kMaxParallelOperations; ++i) {
325 OpenNextEntry(pending_operations_count_);
326 ++pending_operations_count_;
327 }
328 }
329
OpenNextEntry(int parallel_operation_index)330 void ReadHandler::OpenNextEntry(int parallel_operation_index) {
331 ASSERT_GT(kNumEntries, next_entry_index_);
332 TestEntry test_entry = test_->entries()[next_entry_index_++];
333 auto callback =
334 base::BindRepeating(&ReadHandler::OpenCallback, base::Unretained(this),
335 parallel_operation_index, test_entry.data_len);
336 disk_cache::EntryResult result =
337 cache_->OpenEntry(test_entry.key, net::HIGHEST, callback);
338 if (result.net_error() != net::ERR_IO_PENDING)
339 callback.Run(std::move(result));
340 }
341
OpenCallback(int parallel_operation_index,int data_len,disk_cache::EntryResult result)342 void ReadHandler::OpenCallback(int parallel_operation_index,
343 int data_len,
344 disk_cache::EntryResult result) {
345 if (CheckForErrorAndCancel(result.net_error()))
346 return;
347
348 disk_cache::Entry* entry = result.ReleaseEntry();
349
350 EXPECT_EQ(data_len, entry->GetDataSize(1));
351
352 net::CompletionRepeatingCallback callback = base::BindRepeating(
353 &ReadHandler::ReadDataCallback, base::Unretained(this),
354 parallel_operation_index, entry, 0, data_len, kHeadersSize);
355 int new_result =
356 entry->ReadData(0, 0, read_buffers_[parallel_operation_index].get(),
357 kChunkSize, callback);
358 if (new_result != net::ERR_IO_PENDING)
359 callback.Run(new_result);
360 }
361
ReadDataCallback(int parallel_operation_index,disk_cache::Entry * entry,int next_offset,int data_len,int expected_result,int result)362 void ReadHandler::ReadDataCallback(int parallel_operation_index,
363 disk_cache::Entry* entry,
364 int next_offset,
365 int data_len,
366 int expected_result,
367 int result) {
368 if (CheckForErrorAndCancel(result)) {
369 entry->Close();
370 return;
371 }
372 DCHECK_LE(next_offset, data_len);
373 if (what_to_read_ == WhatToRead::HEADERS_ONLY || next_offset == data_len) {
374 entry->Close();
375 if (next_entry_index_ < kNumEntries) {
376 OpenNextEntry(parallel_operation_index);
377 } else {
378 --pending_operations_count_;
379 if (pending_operations_count_ == 0)
380 std::move(final_callback_).Run(net::OK);
381 }
382 return;
383 }
384
385 int expected_read_size = std::min(kChunkSize, data_len - next_offset);
386 net::CompletionRepeatingCallback callback = base::BindRepeating(
387 &ReadHandler::ReadDataCallback, base::Unretained(this),
388 parallel_operation_index, entry, next_offset + expected_read_size,
389 data_len, expected_read_size);
390 int new_result = entry->ReadData(
391 1, next_offset, read_buffers_[parallel_operation_index].get(), kChunkSize,
392 callback);
393 if (new_result != net::ERR_IO_PENDING)
394 callback.Run(new_result);
395 }
396
CheckForErrorAndCancel(int result)397 bool ReadHandler::CheckForErrorAndCancel(int result) {
398 DCHECK_NE(net::ERR_IO_PENDING, result);
399 if (result != net::OK && !(result > 0))
400 pending_result_ = result;
401 if (pending_result_ != net::OK) {
402 --pending_operations_count_;
403 if (pending_operations_count_ == 0)
404 std::move(final_callback_).Run(pending_result_);
405 return true;
406 }
407 return false;
408 }
409
TimeWrites(const std::string & story)410 bool DiskCachePerfTest::TimeWrites(const std::string& story) {
411 for (size_t i = 0; i < kNumEntries; i++) {
412 TestEntry entry;
413 entry.key = GenerateKey(true);
414 entry.data_len = base::RandInt(0, kBodySize);
415 entries_.push_back(entry);
416 }
417
418 net::TestCompletionCallback cb;
419
420 auto reporter = SetUpDiskCacheReporter(story);
421 base::ElapsedTimer write_timer;
422
423 WriteHandler write_handler(this, cache_.get(), cb.callback());
424 write_handler.Run();
425 auto result = cb.WaitForResult();
426 reporter.AddResult(kMetricCacheEntriesWriteTimeMs,
427 write_timer.Elapsed().InMillisecondsF());
428 return result == net::OK;
429 }
430
TimeReads(WhatToRead what_to_read,const std::string & metric,const std::string & story)431 bool DiskCachePerfTest::TimeReads(WhatToRead what_to_read,
432 const std::string& metric,
433 const std::string& story) {
434 auto reporter = SetUpDiskCacheReporter(story);
435 base::ElapsedTimer timer;
436
437 net::TestCompletionCallback cb;
438 ReadHandler read_handler(this, what_to_read, cache_.get(), cb.callback());
439 read_handler.Run();
440 auto result = cb.WaitForResult();
441 reporter.AddResult(metric, timer.Elapsed().InMillisecondsF());
442 return result == net::OK;
443 }
444
TEST_F(DiskCachePerfTest,BlockfileHashes)445 TEST_F(DiskCachePerfTest, BlockfileHashes) {
446 auto reporter = SetUpDiskCacheReporter("baseline_story");
447 base::ElapsedTimer timer;
448 for (int i = 0; i < 300000; i++) {
449 std::string key = GenerateKey(true);
450 // TODO(dcheng): It's unclear if this is sufficient to keep a sufficiently
451 // smart optimizer from simply discarding the function call if it realizes
452 // there are no side effects.
453 base::PersistentHash(key);
454 }
455 reporter.AddResult(kMetricCacheKeysHashTimeMs,
456 timer.Elapsed().InMillisecondsF());
457 }
458
ResetAndEvictSystemDiskCache()459 void DiskCachePerfTest::ResetAndEvictSystemDiskCache() {
460 base::RunLoop().RunUntilIdle();
461 cache_.reset();
462
463 // Flush all files in the cache out of system memory.
464 const base::FilePath::StringType file_pattern = FILE_PATH_LITERAL("*");
465 base::FileEnumerator enumerator(cache_path_, true /* recursive */,
466 base::FileEnumerator::FILES, file_pattern);
467 for (base::FilePath file_path = enumerator.Next(); !file_path.empty();
468 file_path = enumerator.Next()) {
469 ASSERT_TRUE(base::EvictFileFromSystemCache(file_path));
470 }
471 #if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_ANDROID)
472 // And, cache directories, on platforms where the eviction utility supports
473 // this (currently Linux and Android only).
474 if (simple_cache_mode_) {
475 ASSERT_TRUE(
476 base::EvictFileFromSystemCache(cache_path_.AppendASCII("index-dir")));
477 }
478 ASSERT_TRUE(base::EvictFileFromSystemCache(cache_path_));
479 #endif
480
481 DisableFirstCleanup();
482 InitCache();
483 }
484
CacheBackendPerformance(const std::string & story)485 void DiskCachePerfTest::CacheBackendPerformance(const std::string& story) {
486 base::test::ScopedRunLoopTimeout default_timeout(
487 FROM_HERE, TestTimeouts::action_max_timeout());
488
489 LOG(ERROR) << "Using cache at:" << cache_path_.MaybeAsASCII();
490 SetMaxSize(500 * 1024 * 1024);
491 InitCache();
492 EXPECT_TRUE(TimeWrites(story));
493
494 disk_cache::FlushCacheThreadForTesting();
495 base::RunLoop().RunUntilIdle();
496
497 ResetAndEvictSystemDiskCache();
498 EXPECT_TRUE(TimeReads(WhatToRead::HEADERS_ONLY,
499 kMetricCacheHeadersReadTimeColdMs, story));
500 EXPECT_TRUE(TimeReads(WhatToRead::HEADERS_ONLY,
501 kMetricCacheHeadersReadTimeWarmMs, story));
502
503 disk_cache::FlushCacheThreadForTesting();
504 base::RunLoop().RunUntilIdle();
505
506 ResetAndEvictSystemDiskCache();
507 EXPECT_TRUE(TimeReads(WhatToRead::HEADERS_AND_BODY,
508 kMetricCacheEntriesReadTimeColdMs, story));
509 EXPECT_TRUE(TimeReads(WhatToRead::HEADERS_AND_BODY,
510 kMetricCacheEntriesReadTimeWarmMs, story));
511
512 disk_cache::FlushCacheThreadForTesting();
513 base::RunLoop().RunUntilIdle();
514 }
515
516 #if BUILDFLAG(IS_FUCHSIA)
517 // TODO(crbug.com/41393579): Fix this test on Fuchsia and re-enable.
518 #define MAYBE_CacheBackendPerformance DISABLED_CacheBackendPerformance
519 #else
520 #define MAYBE_CacheBackendPerformance CacheBackendPerformance
521 #endif
TEST_F(DiskCachePerfTest,MAYBE_CacheBackendPerformance)522 TEST_F(DiskCachePerfTest, MAYBE_CacheBackendPerformance) {
523 CacheBackendPerformance("blockfile_cache");
524 }
525
526 #if BUILDFLAG(IS_FUCHSIA)
527 // TODO(crbug.com/41393579): Fix this test on Fuchsia and re-enable.
528 #define MAYBE_SimpleCacheBackendPerformance \
529 DISABLED_SimpleCacheBackendPerformance
530 #else
531 #define MAYBE_SimpleCacheBackendPerformance SimpleCacheBackendPerformance
532 #endif
TEST_F(DiskCachePerfTest,MAYBE_SimpleCacheBackendPerformance)533 TEST_F(DiskCachePerfTest, MAYBE_SimpleCacheBackendPerformance) {
534 SetSimpleCacheMode();
535 CacheBackendPerformance("simple_cache");
536 }
537
538 // Creating and deleting "entries" on a block-file is something quite frequent
539 // (after all, almost everything is stored on block files). The operation is
540 // almost free when the file is empty, but can be expensive if the file gets
541 // fragmented, or if we have multiple files. This test measures that scenario,
542 // by using multiple, highly fragmented files.
TEST_F(DiskCachePerfTest,BlockFilesPerformance)543 TEST_F(DiskCachePerfTest, BlockFilesPerformance) {
544 ASSERT_TRUE(CleanupCacheDir());
545
546 disk_cache::BlockFiles files(cache_path_);
547 ASSERT_TRUE(files.Init(true));
548
549 const int kNumBlocks = 60000;
550 disk_cache::Addr address[kNumBlocks];
551
552 auto reporter = SetUpDiskCacheReporter("blockfile_cache");
553 base::ElapsedTimer sequential_timer;
554
555 // Fill up the 32-byte block file (use three files).
556 for (auto& addr : address) {
557 int block_size = base::RandInt(1, 4);
558 EXPECT_TRUE(files.CreateBlock(disk_cache::RANKINGS, block_size, &addr));
559 }
560
561 reporter.AddResult(kMetricFillBlocksTimeMs,
562 sequential_timer.Elapsed().InMillisecondsF());
563 base::ElapsedTimer random_timer;
564
565 for (int i = 0; i < 200000; i++) {
566 int block_size = base::RandInt(1, 4);
567 int entry = base::RandInt(0, kNumBlocks - 1);
568
569 files.DeleteBlock(address[entry], false);
570 EXPECT_TRUE(
571 files.CreateBlock(disk_cache::RANKINGS, block_size, &address[entry]));
572 }
573
574 reporter.AddResult(kMetricCreateDeleteBlocksTimeMs,
575 random_timer.Elapsed().InMillisecondsF());
576 base::RunLoop().RunUntilIdle();
577 }
578
VerifyRvAndCallClosure(base::RepeatingClosure * c,int expect_rv,int rv)579 void VerifyRvAndCallClosure(base::RepeatingClosure* c, int expect_rv, int rv) {
580 EXPECT_EQ(expect_rv, rv);
581 c->Run();
582 }
583
TEST_F(DiskCachePerfTest,SimpleCacheInitialReadPortion)584 TEST_F(DiskCachePerfTest, SimpleCacheInitialReadPortion) {
585 // A benchmark that aims to measure how much time we take in I/O thread
586 // for initial bookkeeping before returning to the caller, and how much
587 // after (batched up some). The later portion includes some event loop
588 // overhead.
589 const int kBatchSize = 100;
590
591 SetSimpleCacheMode();
592
593 InitCache();
594 // Write out the entries, and keep their objects around.
595 auto buffer1 = base::MakeRefCounted<net::IOBufferWithSize>(kHeadersSize);
596 auto buffer2 = base::MakeRefCounted<net::IOBufferWithSize>(kBodySize);
597
598 CacheTestFillBuffer(buffer1->span(), false);
599 CacheTestFillBuffer(buffer2->span(), false);
600
601 disk_cache::Entry* cache_entry[kBatchSize];
602 for (int i = 0; i < kBatchSize; ++i) {
603 TestEntryResultCompletionCallback cb_create;
604 disk_cache::EntryResult result = cb_create.GetResult(cache_->CreateEntry(
605 base::NumberToString(i), net::HIGHEST, cb_create.callback()));
606 ASSERT_EQ(net::OK, result.net_error());
607 cache_entry[i] = result.ReleaseEntry();
608
609 net::TestCompletionCallback cb;
610 int rv = cache_entry[i]->WriteData(0, 0, buffer1.get(), kHeadersSize,
611 cb.callback(), false);
612 ASSERT_EQ(kHeadersSize, cb.GetResult(rv));
613 rv = cache_entry[i]->WriteData(1, 0, buffer2.get(), kBodySize,
614 cb.callback(), false);
615 ASSERT_EQ(kBodySize, cb.GetResult(rv));
616 }
617
618 // Now repeatedly read these, batching up the waiting to try to
619 // account for the two portions separately. Note that we need separate entries
620 // since we are trying to keep interesting work from being on the delayed-done
621 // portion.
622 const int kIterations = 50000;
623
624 double elapsed_early = 0.0;
625 double elapsed_late = 0.0;
626
627 for (int i = 0; i < kIterations; ++i) {
628 base::RunLoop event_loop;
629 base::RepeatingClosure barrier =
630 base::BarrierClosure(kBatchSize, event_loop.QuitWhenIdleClosure());
631 net::CompletionRepeatingCallback cb_batch(base::BindRepeating(
632 VerifyRvAndCallClosure, base::Unretained(&barrier), kHeadersSize));
633
634 base::ElapsedTimer timer_early;
635 for (auto* entry : cache_entry) {
636 int rv = entry->ReadData(0, 0, buffer1.get(), kHeadersSize, cb_batch);
637 if (rv != net::ERR_IO_PENDING) {
638 barrier.Run();
639 ASSERT_EQ(kHeadersSize, rv);
640 }
641 }
642 elapsed_early += timer_early.Elapsed().InMillisecondsF();
643
644 base::ElapsedTimer timer_late;
645 event_loop.Run();
646 elapsed_late += timer_late.Elapsed().InMillisecondsF();
647 }
648
649 // Cleanup
650 for (auto* entry : cache_entry)
651 entry->Close();
652
653 disk_cache::FlushCacheThreadForTesting();
654 base::RunLoop().RunUntilIdle();
655 auto reporter = SetUpDiskCacheReporter("early_portion");
656 reporter.AddResult(kMetricSimpleCacheInitTotalTimeMs, elapsed_early);
657 reporter.AddResult(kMetricSimpleCacheInitPerEntryTimeUs,
658 1000 * (elapsed_early / (kIterations * kBatchSize)));
659 reporter = SetUpDiskCacheReporter("event_loop_portion");
660 reporter.AddResult(kMetricSimpleCacheInitTotalTimeMs, elapsed_late);
661 reporter.AddResult(kMetricSimpleCacheInitPerEntryTimeUs,
662 1000 * (elapsed_late / (kIterations * kBatchSize)));
663 }
664
665 #if BUILDFLAG(IS_FUCHSIA)
666 // TODO(crbug.com/40222788): Fix this test on Fuchsia and re-enable.
667 #define MAYBE_EvictionPerformance DISABLED_EvictionPerformance
668 #else
669 #define MAYBE_EvictionPerformance EvictionPerformance
670 #endif
671 // Measures how quickly SimpleIndex can compute which entries to evict.
TEST(SimpleIndexPerfTest,MAYBE_EvictionPerformance)672 TEST(SimpleIndexPerfTest, MAYBE_EvictionPerformance) {
673 const int kEntries = 10000;
674
675 class NoOpDelegate : public disk_cache::SimpleIndexDelegate {
676 void DoomEntries(std::vector<uint64_t>* entry_hashes,
677 net::CompletionOnceCallback callback) override {}
678 };
679
680 NoOpDelegate delegate;
681 base::Time start(base::Time::Now());
682
683 double evict_elapsed_ms = 0;
684 int iterations = 0;
685 while (iterations < 61000) {
686 ++iterations;
687 disk_cache::SimpleIndex index(/* io_thread = */ nullptr,
688 /* cleanup_tracker = */ nullptr, &delegate,
689 net::DISK_CACHE,
690 /* simple_index_file = */ nullptr);
691
692 // Make sure large enough to not evict on insertion.
693 index.SetMaxSize(kEntries * 2);
694
695 for (int i = 0; i < kEntries; ++i) {
696 index.InsertEntryForTesting(
697 i, disk_cache::EntryMetadata(start + base::Seconds(i), 1u));
698 }
699
700 // Trigger an eviction.
701 base::ElapsedTimer timer;
702 index.SetMaxSize(kEntries);
703 index.UpdateEntrySize(0, 1u);
704 evict_elapsed_ms += timer.Elapsed().InMillisecondsF();
705 }
706
707 auto reporter = SetUpSimpleIndexReporter("baseline_story");
708 reporter.AddResult(kMetricAverageEvictionTimeMs,
709 evict_elapsed_ms / iterations);
710 }
711
712 } // namespace
713