1 // Copyright 2019 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include <cinttypes>
6 #include <cstdlib>
7 #include <iostream>
8 #include <map>
9 #include <memory>
10 #include <string>
11
12 #include "base/at_exit.h"
13 #include "base/command_line.h"
14 #include "base/files/file_path.h"
15 #include "base/files/file_util.h"
16 #include "base/files/scoped_temp_dir.h"
17 #include "base/functional/callback.h"
18 #include "base/logging.h"
19 #include "base/memory/raw_ptr.h"
20 #include "base/memory/raw_ptr_exclusion.h"
21 #include "base/memory/ref_counted.h"
22 #include "base/memory/scoped_refptr.h"
23 #include "base/numerics/checked_math.h"
24 #include "base/strings/string_number_conversions.h"
25 #include "base/test/task_environment.h"
26 #include "base/test/test_timeouts.h"
27 #include "base/time/time.h"
28 #include "net/base/cache_type.h"
29 #include "net/base/interval.h"
30 #include "net/base/io_buffer.h"
31 #include "net/base/net_errors.h"
32 #include "net/base/test_completion_callback.h"
33 #include "net/disk_cache/backend_cleanup_tracker.h"
34 #include "net/disk_cache/blockfile/backend_impl.h"
35 #include "net/disk_cache/disk_cache.h"
36 #include "net/disk_cache/disk_cache_fuzzer.pb.h"
37 #include "net/disk_cache/disk_cache_test_util.h"
38 #include "net/disk_cache/memory/mem_backend_impl.h"
39 #include "net/disk_cache/simple/simple_backend_impl.h"
40 #include "net/disk_cache/simple/simple_file_tracker.h"
41 #include "net/disk_cache/simple/simple_index.h"
42 #include "testing/libfuzzer/proto/lpm_interface.h"
43
44 // To get a good idea of what a test case is doing, just run the libfuzzer
45 // target with LPM_DUMP_NATIVE_INPUT=1 prefixed. This will trigger all the
46 // prints below and will convey exactly what the test case is doing: use this
47 // instead of trying to print the protobuf as text.
48
49 // For code coverage:
50 // python ./tools/code_coverage/coverage.py disk_cache_lpm_fuzzer -b
51 // out/coverage -o out/report -c 'out/coverage/disk_cache_lpm_fuzzer
52 // -runs=0 -workers=24 corpus_disk_cache_simple' -f net/disk_cache
53
54 void IOCallback(std::string io_type, int rv);
55
56 namespace {
57 const uint32_t kMaxSizeKB = 128; // 128KB maximum.
58 const uint32_t kMaxSize = kMaxSizeKB * 1024;
59 const uint32_t kMaxEntrySize = kMaxSize * 2;
60 const uint32_t kNumStreams = 3; // All caches seem to have 3 streams. TODO do
61 // other specialized caches have this?
62 const uint64_t kFirstSavedTime =
63 5; // Totally random number chosen by dice roll. ;)
64 const uint32_t kMaxNumMillisToWait = 2019;
65 const int kMaxFdsSimpleCache = 10;
66
67 // Known colliding key values taken from SimpleCacheCreateCollision unittest.
68 const std::string kCollidingKey1 =
69 "\xfb\x4e\x9c\x1d\x66\x71\xf7\x54\xa3\x11\xa0\x7e\x16\xa5\x68\xf6";
70 const std::string kCollidingKey2 =
71 "\xbc\x60\x64\x92\xbc\xa0\x5c\x15\x17\x93\x29\x2d\xe4\x21\xbd\x03";
72
73 #define IOTYPES_APPLY(F) \
74 F(WriteData) \
75 F(ReadData) \
76 F(WriteSparseData) \
77 F(ReadSparseData) \
78 F(DoomAllEntries) \
79 F(DoomEntriesSince) \
80 F(DoomEntriesBetween) \
81 F(GetAvailableRange) \
82 F(DoomKey)
83
84 enum class IOType {
85 #define ENUM_ENTRY(IO_TYPE) IO_TYPE,
86 IOTYPES_APPLY(ENUM_ENTRY)
87 #undef ENUM_ENTRY
88 };
89
90 struct InitGlobals {
InitGlobals__anon9e382f6a0111::InitGlobals91 InitGlobals() {
92 base::CommandLine::Init(0, nullptr);
93
94 print_comms_ = ::getenv("LPM_DUMP_NATIVE_INPUT");
95
96 // TaskEnvironment requires TestTimeouts initialization to watch for
97 // problematic long-running tasks.
98 TestTimeouts::Initialize();
99
100 // Mark this thread as an IO_THREAD with MOCK_TIME, and ensure that Now()
101 // is driven from the same mock clock.
102 task_environment_ = std::make_unique<base::test::TaskEnvironment>(
103 base::test::TaskEnvironment::MainThreadType::IO,
104 base::test::TaskEnvironment::TimeSource::MOCK_TIME);
105
106 // Disable noisy logging as per "libFuzzer in Chrome" documentation:
107 // testing/libfuzzer/getting_started.md#Disable-noisy-error-message-logging.
108 logging::SetMinLogLevel(logging::LOG_FATAL);
109
110 // Re-using this buffer for write operations may technically be against
111 // IOBuffer rules but it shouldn't cause any actual problems.
112 buffer_ =
113 base::MakeRefCounted<net::IOBuffer>(static_cast<size_t>(kMaxEntrySize));
114 CacheTestFillBuffer(buffer_->data(), kMaxEntrySize, false);
115
116 #define CREATE_IO_CALLBACK(IO_TYPE) \
117 io_callbacks_.push_back(base::BindRepeating(&IOCallback, #IO_TYPE));
118 IOTYPES_APPLY(CREATE_IO_CALLBACK)
119 #undef CREATE_IO_CALLBACK
120 }
121
122 // This allows us to mock time for all threads.
123 std::unique_ptr<base::test::TaskEnvironment> task_environment_;
124
125 // Used as a pre-filled buffer for all writes.
126 scoped_refptr<net::IOBuffer> buffer_;
127
128 // Should we print debugging info?
129 bool print_comms_;
130
131 // List of IO callbacks. They do nothing (except maybe print) but are used by
132 // all async entry operations.
133 std::vector<base::RepeatingCallback<void(int)>> io_callbacks_;
134 };
135
136 InitGlobals* init_globals = new InitGlobals();
137 } // namespace
138
139 class DiskCacheLPMFuzzer {
140 public:
DiskCacheLPMFuzzer()141 DiskCacheLPMFuzzer() {
142 CHECK(temp_dir_.CreateUniqueTempDir());
143 cache_path_ = temp_dir_.GetPath();
144 }
145
146 ~DiskCacheLPMFuzzer();
147
148 void RunCommands(const disk_cache_fuzzer::FuzzCommands& commands);
149
150 private:
151 struct EntryInfo {
152 EntryInfo() = default;
153
154 EntryInfo(const EntryInfo&) = delete;
155 EntryInfo& operator=(const EntryInfo&) = delete;
156
157 // This field is not a raw_ptr<> because it was filtered by the rewriter
158 // for: #addr-of, #constexpr-ctor-field-initializer
159 RAW_PTR_EXCLUSION disk_cache::Entry* entry_ptr = nullptr;
160 std::unique_ptr<TestEntryResultCompletionCallback> tcb;
161 };
162 void RunTaskForTest(base::OnceClosure closure);
163
164 // Waits for an entry to be ready. Only should be called if there is a pending
165 // callback for this entry; i.e. ei->tcb != nullptr.
166 // Also takes the rv that the cache entry creation functions return, and does
167 // not wait if rv.net_error != net::ERR_IO_PENDING (and would never have
168 // called the callback).
169 disk_cache::EntryResult WaitOnEntry(
170 EntryInfo* ei,
171 disk_cache::EntryResult result =
172 disk_cache::EntryResult::MakeError(net::ERR_IO_PENDING));
173
174 // Used as a callback for entry-opening backend calls. Will record the entry
175 // in the map as usable and will release any entry-specific calls waiting for
176 // the entry to be ready.
177 void OpenCacheEntryCallback(uint64_t entry_id,
178 bool async,
179 bool set_is_sparse,
180 disk_cache::EntryResult result);
181
182 // Waits for the entry to finish opening, in the async case. Then, if the
183 // entry is successfully open (callback returns net::OK, or was already
184 // successfully opened), check if the entry_ptr == nullptr. If so, the
185 // entry has been closed.
186 bool IsValidEntry(EntryInfo* ei);
187
188 // Closes any non-nullptr entries in open_cache_entries_.
189 void CloseAllRemainingEntries();
190
191 void HandleSetMaxSize(const disk_cache_fuzzer::SetMaxSize&);
192 void CreateBackend(
193 disk_cache_fuzzer::FuzzCommands::CacheBackend cache_backend,
194 uint32_t mask,
195 net::CacheType type,
196 bool simple_cache_wait_for_index);
197
198 // Places to keep our cache files.
199 base::FilePath cache_path_;
200 base::ScopedTempDir temp_dir_;
201
202 // Pointers to our backend. Only one of block_impl_, simple_cache_impl_, and
203 // mem_cache_ are active at one time.
204 std::unique_ptr<disk_cache::Backend> cache_;
205 raw_ptr<disk_cache::BackendImpl> block_impl_ = nullptr;
206 std::unique_ptr<disk_cache::SimpleFileTracker> simple_file_tracker_;
207 raw_ptr<disk_cache::SimpleBackendImpl> simple_cache_impl_ = nullptr;
208 raw_ptr<disk_cache::MemBackendImpl> mem_cache_ = nullptr;
209
210 // Maximum size of the cache, that we have currently set.
211 uint32_t max_size_ = kMaxSize;
212
213 // This "consistent hash table" keeys track of the keys we've added to the
214 // backend so far. This should always be indexed by a "key_id" from a
215 // protobuf.
216 std::map<uint64_t, std::string> created_cache_entries_;
217 // This "consistent hash table" keeps track of all opened entries we have from
218 // the backend, and also contains some nullptr's where entries were already
219 // closed. This should always be indexed by an "entry_id" from a protobuf.
220 // When destructed, we close all entries that are still open in order to avoid
221 // memory leaks.
222 std::map<uint64_t, EntryInfo> open_cache_entries_;
223 // This "consistent hash table" keeps track of all times we have saved, so
224 // that we can call backend methods like DoomEntriesSince or
225 // DoomEntriesBetween with sane timestamps. This should always be indexed by a
226 // "time_id" from a protobuf.
227 std::map<uint64_t, base::Time> saved_times_;
228 // This "consistent hash table" keeps tack of all the iterators we have open
229 // from the backend. This should always be indexed by a "it_id" from a
230 // protobuf.
231 std::map<uint64_t, std::unique_ptr<disk_cache::Backend::Iterator>>
232 open_iterators_;
233
234 // This maps keeps track of the sparsity of each entry, using their pointers.
235 // TODO(mpdenton) remove if CreateEntry("Key0"); WriteData("Key0", index = 2,
236 // ...); WriteSparseData("Key0", ...); is supposed to be valid.
237 // Then we can just use CouldBeSparse before the WriteData.
238 std::map<disk_cache::Entry*, bool> sparse_entry_tracker_;
239 };
240
241 #define MAYBE_PRINT \
242 if (init_globals->print_comms_) \
243 std::cout
244
GetIOCallback(IOType iot)245 inline base::RepeatingCallback<void(int)> GetIOCallback(IOType iot) {
246 return init_globals->io_callbacks_[static_cast<int>(iot)];
247 }
248
ToKey(uint64_t key_num)249 std::string ToKey(uint64_t key_num) {
250 // Use one of the two colliding key values in 1% of executions.
251 if (key_num % 100 == 99)
252 return kCollidingKey1;
253 if (key_num % 100 == 98)
254 return kCollidingKey2;
255
256 // Otherwise, use a value based on the key id and fuzzy padding.
257 std::string padding(key_num & 0xFFFF, 'A');
258 return "Key" + padding + base::NumberToString(key_num);
259 }
260
GetRequestPriority(disk_cache_fuzzer::RequestPriority lpm_pri)261 net::RequestPriority GetRequestPriority(
262 disk_cache_fuzzer::RequestPriority lpm_pri) {
263 CHECK(net::MINIMUM_PRIORITY <= static_cast<int>(lpm_pri) &&
264 static_cast<int>(lpm_pri) <= net::MAXIMUM_PRIORITY);
265 return static_cast<net::RequestPriority>(lpm_pri);
266 }
267
GetCacheTypeAndPrint(disk_cache_fuzzer::FuzzCommands::CacheType type,disk_cache_fuzzer::FuzzCommands::CacheBackend backend)268 net::CacheType GetCacheTypeAndPrint(
269 disk_cache_fuzzer::FuzzCommands::CacheType type,
270 disk_cache_fuzzer::FuzzCommands::CacheBackend backend) {
271 switch (type) {
272 case disk_cache_fuzzer::FuzzCommands::APP_CACHE:
273 MAYBE_PRINT << "Cache type = APP_CACHE." << std::endl;
274 return net::CacheType::APP_CACHE;
275 break;
276 case disk_cache_fuzzer::FuzzCommands::REMOVED_MEDIA_CACHE:
277 // Media cache no longer in use; handle as HTTP_CACHE
278 MAYBE_PRINT << "Cache type = REMOVED_MEDIA_CACHE." << std::endl;
279 return net::CacheType::DISK_CACHE;
280 break;
281 case disk_cache_fuzzer::FuzzCommands::SHADER_CACHE:
282 MAYBE_PRINT << "Cache type = SHADER_CACHE." << std::endl;
283 return net::CacheType::SHADER_CACHE;
284 break;
285 case disk_cache_fuzzer::FuzzCommands::PNACL_CACHE:
286 // Simple cache won't handle PNACL_CACHE.
287 if (backend == disk_cache_fuzzer::FuzzCommands::SIMPLE) {
288 MAYBE_PRINT << "Cache type = DISK_CACHE." << std::endl;
289 return net::CacheType::DISK_CACHE;
290 }
291 MAYBE_PRINT << "Cache type = PNACL_CACHE." << std::endl;
292 return net::CacheType::PNACL_CACHE;
293 break;
294 case disk_cache_fuzzer::FuzzCommands::GENERATED_BYTE_CODE_CACHE:
295 MAYBE_PRINT << "Cache type = GENERATED_BYTE_CODE_CACHE." << std::endl;
296 return net::CacheType::GENERATED_BYTE_CODE_CACHE;
297 break;
298 case disk_cache_fuzzer::FuzzCommands::GENERATED_NATIVE_CODE_CACHE:
299 MAYBE_PRINT << "Cache type = GENERATED_NATIVE_CODE_CACHE." << std::endl;
300 return net::CacheType::GENERATED_NATIVE_CODE_CACHE;
301 break;
302 case disk_cache_fuzzer::FuzzCommands::DISK_CACHE:
303 MAYBE_PRINT << "Cache type = DISK_CACHE." << std::endl;
304 return net::CacheType::DISK_CACHE;
305 break;
306 }
307 }
308
IOCallback(std::string io_type,int rv)309 void IOCallback(std::string io_type, int rv) {
310 MAYBE_PRINT << " [Async IO (" << io_type << ") = " << rv << "]" << std::endl;
311 }
312
313 /*
314 * Consistent hashing inspired map for fuzzer state.
315 * If we stored open cache entries in a hash table mapping cache_entry_id ->
316 * disk_cache::Entry*, then it would be highly unlikely that any subsequent
317 * "CloseEntry" or "WriteData" etc. command would come up with an ID that would
318 * correspond to a valid entry in the hash table. The optimal solution is for
319 * libfuzzer to generate CloseEntry commands with an ID that matches the ID of a
320 * previous OpenEntry command. But libfuzzer is stateless and should stay that
321 * way.
322 *
323 * On the other hand, if we stored entries in a vector, and on a CloseEntry
324 * command we took the entry at CloseEntry.id % (size of entries vector), we
325 * would always generate correct CloseEntries. This is good, but all
326 * dumb/general minimization techniques stop working, because deleting a single
327 * OpenEntry command changes the indexes of every entry in the vector from then
328 * on.
329 *
330 * So, we use something that's more stable for minimization: consistent hashing.
331 * Basically, when we see a CloseEntry.id, we take the entry in the table that
332 * has the next highest id (wrapping when there is no higher entry).
333 *
334 * This makes us resilient to deleting irrelevant OpenEntry commands. But, if we
335 * delete from the table on CloseEntry commands, we still screw up all the
336 * indexes during minimization. We'll get around this by not deleting entries
337 * after CloseEntry commands, but that will result in a slightly less efficient
338 * fuzzer, as if there are many closed entries in the table, many of the *Entry
339 * commands will be useless. It seems like a decent balance between generating
340 * useful fuzz commands and effective minimization.
341 */
342 template <typename T>
GetNextValue(typename std::map<uint64_t,T> * entries,uint64_t val)343 typename std::map<uint64_t, T>::iterator GetNextValue(
344 typename std::map<uint64_t, T>* entries,
345 uint64_t val) {
346 auto iter = entries->lower_bound(val);
347 if (iter != entries->end())
348 return iter;
349 // Wrap to 0
350 iter = entries->lower_bound(0);
351 if (iter != entries->end())
352 return iter;
353
354 return entries->end();
355 }
356
RunTaskForTest(base::OnceClosure closure)357 void DiskCacheLPMFuzzer::RunTaskForTest(base::OnceClosure closure) {
358 if (!block_impl_) {
359 std::move(closure).Run();
360 return;
361 }
362
363 net::TestCompletionCallback cb;
364 int rv = block_impl_->RunTaskForTest(std::move(closure), cb.callback());
365 CHECK_EQ(cb.GetResult(rv), net::OK);
366 }
367
368 // Resets the cb in the map so that WriteData and other calls that work on an
369 // entry don't wait for its result.
OpenCacheEntryCallback(uint64_t entry_id,bool async,bool set_is_sparse,disk_cache::EntryResult result)370 void DiskCacheLPMFuzzer::OpenCacheEntryCallback(
371 uint64_t entry_id,
372 bool async,
373 bool set_is_sparse,
374 disk_cache::EntryResult result) {
375 // TODO(mpdenton) if this fails should we delete the entry entirely?
376 // Would need to mark it for deletion and delete it later, as
377 // IsValidEntry might be waiting for it.
378 EntryInfo* ei = &open_cache_entries_[entry_id];
379
380 if (async) {
381 int rv = result.net_error();
382 ei->entry_ptr = result.ReleaseEntry();
383 // We are responsible for setting things up.
384 if (set_is_sparse && ei->entry_ptr) {
385 sparse_entry_tracker_[ei->entry_ptr] = true;
386 }
387 if (ei->entry_ptr) {
388 MAYBE_PRINT << " [Async opening of cache entry for \""
389 << ei->entry_ptr->GetKey() << "\" callback (rv = " << rv
390 << ")]" << std::endl;
391 }
392 // Unblock any subsequent ops waiting for this --- they don't care about
393 // the actual return value, but use something distinctive for debugging.
394 ei->tcb->callback().Run(
395 disk_cache::EntryResult::MakeError(net::ERR_FILE_VIRUS_INFECTED));
396 } else {
397 // The operation code will pull the result out of the completion callback,
398 // so hand it to it.
399 ei->tcb->callback().Run(std::move(result));
400 }
401 }
402
WaitOnEntry(EntryInfo * ei,disk_cache::EntryResult result)403 disk_cache::EntryResult DiskCacheLPMFuzzer::WaitOnEntry(
404 EntryInfo* ei,
405 disk_cache::EntryResult result) {
406 CHECK(ei->tcb);
407 result = ei->tcb->GetResult(std::move(result));
408
409 // Reset the callback so nobody accidentally waits on a callback that never
410 // comes.
411 ei->tcb.reset();
412 return result;
413 }
414
IsValidEntry(EntryInfo * ei)415 bool DiskCacheLPMFuzzer::IsValidEntry(EntryInfo* ei) {
416 if (ei->tcb) {
417 // If we have a callback, we are the first to access this async-created
418 // entry. Wait for it, and then delete it so nobody waits on it again.
419 WaitOnEntry(ei);
420 }
421 // entry_ptr will be nullptr if the entry has been closed.
422 return ei->entry_ptr != nullptr;
423 }
424
425 /*
426 * Async implementation:
427 1. RunUntilIdle at the top of the loop to handle any callbacks we've been
428 posted from the backend thread.
429 2. Only the entry creation functions have important callbacks. The good thing
430 is backend destruction will cancel these operations. The entry creation
431 functions simply need to keep the entry_ptr* alive until the callback is
432 posted, and then need to make sure the entry_ptr is added to the map in order
433 to Close it in the destructor.
434 As for iterators, it's unclear whether closing an iterator will cancel
435 callbacks.
436
437 Problem: WriteData (and similar) calls will fail on the entry_id until the
438 callback happens. So, I should probably delay these calls or otherwise will
439 have very unreliable test cases. These are the options:
440 1. Queue up WriteData (etc.) calls in some map, such that when the OpenEntry
441 callback runs, the WriteData calls will all run.
442 2. Just sit there and wait for the entry to be ready.
443
444 #2 is probably best as it doesn't prevent any interesting cases and is much
445 simpler.
446 */
447
RunCommands(const disk_cache_fuzzer::FuzzCommands & commands)448 void DiskCacheLPMFuzzer::RunCommands(
449 const disk_cache_fuzzer::FuzzCommands& commands) {
450 // Skip too long command sequences, they are counterproductive for fuzzing.
451 // The number was chosen empirically using the existing fuzzing corpus.
452 if (commands.fuzz_commands_size() > 129)
453 return;
454
455 uint32_t mask =
456 commands.has_set_mask() ? (commands.set_mask() ? 0x1 : 0xf) : 0;
457 net::CacheType type =
458 GetCacheTypeAndPrint(commands.cache_type(), commands.cache_backend());
459 CreateBackend(commands.cache_backend(), mask, type,
460 commands.simple_cache_wait_for_index());
461 MAYBE_PRINT << "CreateBackend()" << std::endl;
462
463 if (commands.has_set_max_size()) {
464 HandleSetMaxSize(commands.set_max_size());
465 }
466
467 {
468 base::Time curr_time = base::Time::Now();
469 saved_times_[kFirstSavedTime] = curr_time;
470 // MAYBE_PRINT << "Saved initial time " << curr_time << std::endl;
471 }
472
473 for (const disk_cache_fuzzer::FuzzCommand& command :
474 commands.fuzz_commands()) {
475 // Handle any callbacks that other threads may have posted to us in the
476 // meantime, so any successful async OpenEntry's (etc.) add their
477 // entry_ptr's to the map.
478 init_globals->task_environment_->RunUntilIdle();
479
480 switch (command.fuzz_command_oneof_case()) {
481 case disk_cache_fuzzer::FuzzCommand::kSetMaxSize: {
482 HandleSetMaxSize(command.set_max_size());
483 break;
484 }
485 case disk_cache_fuzzer::FuzzCommand::kCreateEntry: {
486 if (!cache_)
487 continue;
488
489 const disk_cache_fuzzer::CreateEntry& ce = command.create_entry();
490 uint64_t key_id = ce.key_id();
491 uint64_t entry_id = ce.entry_id();
492 net::RequestPriority pri = GetRequestPriority(ce.pri());
493 bool async = ce.async();
494 bool is_sparse = ce.is_sparse();
495
496 if (open_cache_entries_.find(entry_id) != open_cache_entries_.end())
497 continue; // Don't overwrite a currently open cache entry.
498
499 std::string key_str = ToKey(key_id);
500 created_cache_entries_[key_id] = key_str;
501
502 EntryInfo* entry_info = &open_cache_entries_[entry_id];
503
504 entry_info->tcb = std::make_unique<TestEntryResultCompletionCallback>();
505 disk_cache::EntryResultCallback cb =
506 base::BindOnce(&DiskCacheLPMFuzzer::OpenCacheEntryCallback,
507 base::Unretained(this), entry_id, async, is_sparse);
508
509 MAYBE_PRINT << "CreateEntry(\"" << key_str
510 << "\", set_is_sparse = " << is_sparse
511 << ") = " << std::flush;
512 disk_cache::EntryResult result =
513 cache_->CreateEntry(key_str, pri, std::move(cb));
514 if (!async || result.net_error() != net::ERR_IO_PENDING) {
515 result = WaitOnEntry(entry_info, std::move(result));
516 int rv = result.net_error();
517
518 // Ensure we mark sparsity, save entry if the callback never ran.
519 if (rv == net::OK) {
520 entry_info->entry_ptr = result.ReleaseEntry();
521 sparse_entry_tracker_[entry_info->entry_ptr] = is_sparse;
522 }
523 MAYBE_PRINT << rv << std::endl;
524 } else {
525 MAYBE_PRINT << "net::ERR_IO_PENDING (async)" << std::endl;
526 }
527 break;
528 }
529 case disk_cache_fuzzer::FuzzCommand::kOpenEntry: {
530 if (!cache_)
531 continue;
532
533 const disk_cache_fuzzer::OpenEntry& oe = command.open_entry();
534 uint64_t key_id = oe.key_id();
535 uint64_t entry_id = oe.entry_id();
536 net::RequestPriority pri = GetRequestPriority(oe.pri());
537 bool async = oe.async();
538
539 if (created_cache_entries_.empty())
540 continue;
541
542 if (open_cache_entries_.find(entry_id) != open_cache_entries_.end())
543 continue; // Don't overwrite a currently open cache entry.
544
545 EntryInfo* entry_info = &open_cache_entries_[entry_id];
546
547 entry_info->tcb = std::make_unique<TestEntryResultCompletionCallback>();
548 disk_cache::EntryResultCallback cb =
549 base::BindOnce(&DiskCacheLPMFuzzer::OpenCacheEntryCallback,
550 base::Unretained(this), entry_id, async, false);
551
552 auto key_it = GetNextValue(&created_cache_entries_, key_id);
553 MAYBE_PRINT << "OpenEntry(\"" << key_it->second
554 << "\") = " << std::flush;
555 disk_cache::EntryResult result =
556 cache_->OpenEntry(key_it->second, pri, std::move(cb));
557 if (!async || result.net_error() != net::ERR_IO_PENDING) {
558 result = WaitOnEntry(entry_info, std::move(result));
559 int rv = result.net_error();
560 if (rv == net::OK)
561 entry_info->entry_ptr = result.ReleaseEntry();
562 MAYBE_PRINT << rv << std::endl;
563 } else {
564 MAYBE_PRINT << "net::ERR_IO_PENDING (async)" << std::endl;
565 }
566 break;
567 }
568 case disk_cache_fuzzer::FuzzCommand::kOpenOrCreateEntry: {
569 if (!cache_)
570 continue;
571
572 const disk_cache_fuzzer::OpenOrCreateEntry& ooce =
573 command.open_or_create_entry();
574 uint64_t key_id = ooce.key_id();
575 uint64_t entry_id = ooce.entry_id();
576 net::RequestPriority pri = GetRequestPriority(ooce.pri());
577 bool async = ooce.async();
578 bool is_sparse = ooce.is_sparse();
579
580 if (open_cache_entries_.find(entry_id) != open_cache_entries_.end())
581 continue; // Don't overwrite a currently open cache entry.
582
583 std::string key_str;
584 // If our proto tells us to create a new entry, create a new entry, just
585 // with OpenOrCreateEntry.
586 if (ooce.create_new()) {
587 // Use a possibly new key.
588 key_str = ToKey(key_id);
589 created_cache_entries_[key_id] = key_str;
590 } else {
591 if (created_cache_entries_.empty())
592 continue;
593 auto key_it = GetNextValue(&created_cache_entries_, key_id);
594 key_str = key_it->second;
595 }
596
597 // Setup for callbacks.
598
599 EntryInfo* entry_info = &open_cache_entries_[entry_id];
600
601 entry_info->tcb = std::make_unique<TestEntryResultCompletionCallback>();
602 disk_cache::EntryResultCallback cb =
603 base::BindOnce(&DiskCacheLPMFuzzer::OpenCacheEntryCallback,
604 base::Unretained(this), entry_id, async, is_sparse);
605
606 // Will only be set as sparse if it is created and not opened.
607 MAYBE_PRINT << "OpenOrCreateEntry(\"" << key_str
608 << "\", set_is_sparse = " << is_sparse
609 << ") = " << std::flush;
610 disk_cache::EntryResult result =
611 cache_->OpenOrCreateEntry(key_str, pri, std::move(cb));
612 if (!async || result.net_error() != net::ERR_IO_PENDING) {
613 result = WaitOnEntry(entry_info, std::move(result));
614 int rv = result.net_error();
615 bool opened = result.opened();
616 entry_info->entry_ptr = result.ReleaseEntry();
617 // Ensure we mark sparsity, even if the callback never ran.
618 if (rv == net::OK && !opened)
619 sparse_entry_tracker_[entry_info->entry_ptr] = is_sparse;
620 MAYBE_PRINT << rv << ", opened = " << opened << std::endl;
621 } else {
622 MAYBE_PRINT << "net::ERR_IO_PENDING (async)" << std::endl;
623 }
624 break;
625 }
626 case disk_cache_fuzzer::FuzzCommand::kCloseEntry: {
627 if (open_cache_entries_.empty())
628 continue;
629
630 auto entry_it = GetNextValue(&open_cache_entries_,
631 command.close_entry().entry_id());
632 if (!IsValidEntry(&entry_it->second))
633 continue;
634
635 MAYBE_PRINT << "CloseEntry(\"" << entry_it->second.entry_ptr->GetKey()
636 << "\")" << std::endl;
637 entry_it->second.entry_ptr->Close();
638
639 // Set the entry_ptr to nullptr to ensure no one uses it anymore.
640 entry_it->second.entry_ptr = nullptr;
641 break;
642 }
643 case disk_cache_fuzzer::FuzzCommand::kDoomEntry: {
644 if (open_cache_entries_.empty())
645 continue;
646
647 auto entry_it =
648 GetNextValue(&open_cache_entries_, command.doom_entry().entry_id());
649 if (!IsValidEntry(&entry_it->second))
650 continue;
651
652 MAYBE_PRINT << "DoomEntry(\"" << entry_it->second.entry_ptr->GetKey()
653 << "\")" << std::endl;
654 entry_it->second.entry_ptr->Doom();
655 break;
656 }
657 case disk_cache_fuzzer::FuzzCommand::kWriteData: {
658 if (open_cache_entries_.empty())
659 continue;
660
661 const disk_cache_fuzzer::WriteData& wd = command.write_data();
662 auto entry_it = GetNextValue(&open_cache_entries_, wd.entry_id());
663 if (!IsValidEntry(&entry_it->second))
664 continue;
665
666 int index = 0; // if it's sparse, these non-sparse aware streams must
667 // read from stream 0 according to the spec.
668 // Implementations might have weaker constraints.
669 if (!sparse_entry_tracker_[entry_it->second.entry_ptr])
670 index = wd.index() % kNumStreams;
671 uint32_t offset = wd.offset() % kMaxEntrySize;
672 size_t size = wd.size() % kMaxEntrySize;
673 bool async = wd.async();
674
675 net::TestCompletionCallback tcb;
676 net::CompletionOnceCallback cb =
677 !async ? tcb.callback() : GetIOCallback(IOType::WriteData);
678
679 MAYBE_PRINT << "WriteData(\"" << entry_it->second.entry_ptr->GetKey()
680 << "\", index = " << index << ", offset = " << offset
681 << ", size = " << size << ", truncate = " << wd.truncate()
682 << ")" << std::flush;
683 int rv = entry_it->second.entry_ptr->WriteData(
684 index, offset, init_globals->buffer_.get(), size, std::move(cb),
685 wd.truncate());
686 if (!async)
687 rv = tcb.GetResult(rv);
688 MAYBE_PRINT << " = " << rv << std::endl;
689 break;
690 }
691 case disk_cache_fuzzer::FuzzCommand::kReadData: {
692 if (open_cache_entries_.empty())
693 continue;
694
695 const disk_cache_fuzzer::ReadData& wd = command.read_data();
696 auto entry_it = GetNextValue(&open_cache_entries_, wd.entry_id());
697 if (!IsValidEntry(&entry_it->second))
698 continue;
699
700 int index = 0; // if it's sparse, these non-sparse aware streams must
701 // read from stream 0 according to the spec.
702 // Implementations might weaker constraints?
703 if (!sparse_entry_tracker_[entry_it->second.entry_ptr])
704 index = wd.index() % kNumStreams;
705 uint32_t offset = wd.offset() % kMaxEntrySize;
706 size_t size = wd.size() % kMaxEntrySize;
707 bool async = wd.async();
708 scoped_refptr<net::IOBuffer> buffer =
709 base::MakeRefCounted<net::IOBuffer>(size);
710
711 net::TestCompletionCallback tcb;
712 net::CompletionOnceCallback cb =
713 !async ? tcb.callback() : GetIOCallback(IOType::ReadData);
714
715 MAYBE_PRINT << "ReadData(\"" << entry_it->second.entry_ptr->GetKey()
716 << "\", index = " << index << ", offset = " << offset
717 << ", size = " << size << ")" << std::flush;
718 int rv = entry_it->second.entry_ptr->ReadData(
719 index, offset, buffer.get(), size, std::move(cb));
720 if (!async)
721 rv = tcb.GetResult(rv);
722 MAYBE_PRINT << " = " << rv << std::endl;
723 break;
724 }
725 case disk_cache_fuzzer::FuzzCommand::kWriteSparseData: {
726 if (open_cache_entries_.empty())
727 continue;
728
729 const disk_cache_fuzzer::WriteSparseData& wsd =
730 command.write_sparse_data();
731 auto entry_it = GetNextValue(&open_cache_entries_, wsd.entry_id());
732 if (!IsValidEntry(&entry_it->second) ||
733 !sparse_entry_tracker_[entry_it->second.entry_ptr])
734 continue;
735
736 uint64_t offset = wsd.offset();
737 if (wsd.cap_offset())
738 offset %= kMaxEntrySize;
739 size_t size = wsd.size() % kMaxEntrySize;
740 bool async = wsd.async();
741
742 net::TestCompletionCallback tcb;
743 net::CompletionOnceCallback cb =
744 !async ? tcb.callback() : GetIOCallback(IOType::WriteSparseData);
745 MAYBE_PRINT << "WriteSparseData(\""
746 << entry_it->second.entry_ptr->GetKey()
747 << "\", offset = " << offset << ", size = " << size << ")"
748 << std::flush;
749 int rv = entry_it->second.entry_ptr->WriteSparseData(
750 offset, init_globals->buffer_.get(), size, std::move(cb));
751 if (!async)
752 rv = tcb.GetResult(rv);
753 MAYBE_PRINT << " = " << rv << std::endl;
754 break;
755 }
756 case disk_cache_fuzzer::FuzzCommand::kReadSparseData: {
757 if (open_cache_entries_.empty())
758 continue;
759
760 const disk_cache_fuzzer::ReadSparseData& rsd =
761 command.read_sparse_data();
762 auto entry_it = GetNextValue(&open_cache_entries_, rsd.entry_id());
763 if (!IsValidEntry(&entry_it->second) ||
764 !sparse_entry_tracker_[entry_it->second.entry_ptr])
765 continue;
766
767 uint64_t offset = rsd.offset();
768 if (rsd.cap_offset())
769 offset %= kMaxEntrySize;
770 size_t size = rsd.size() % kMaxEntrySize;
771 bool async = rsd.async();
772 scoped_refptr<net::IOBuffer> buffer =
773 base::MakeRefCounted<net::IOBuffer>(size);
774
775 net::TestCompletionCallback tcb;
776 net::CompletionOnceCallback cb =
777 !async ? tcb.callback() : GetIOCallback(IOType::ReadSparseData);
778
779 MAYBE_PRINT << "ReadSparseData(\""
780 << entry_it->second.entry_ptr->GetKey()
781 << "\", offset = " << offset << ", size = " << size << ")"
782 << std::flush;
783 int rv = entry_it->second.entry_ptr->ReadSparseData(
784 offset, buffer.get(), size, std::move(cb));
785 if (!async)
786 rv = tcb.GetResult(rv);
787 MAYBE_PRINT << " = " << rv << std::endl;
788 break;
789 }
790 case disk_cache_fuzzer::FuzzCommand::kDoomAllEntries: {
791 if (!cache_)
792 continue;
793 bool async = command.doom_all_entries().async();
794
795 net::TestCompletionCallback tcb;
796 net::CompletionOnceCallback cb =
797 !async ? tcb.callback() : GetIOCallback(IOType::DoomAllEntries);
798 MAYBE_PRINT << "DoomAllEntries()" << std::flush;
799 int rv = cache_->DoomAllEntries(std::move(cb));
800 if (!async)
801 rv = tcb.GetResult(rv);
802 MAYBE_PRINT << " = " << rv << std::endl;
803 break;
804 }
805 case disk_cache_fuzzer::FuzzCommand::kFlushQueueForTest: {
806 // Blockfile-cache specific method.
807 if (!block_impl_)
808 return;
809
810 net::TestCompletionCallback cb;
811 MAYBE_PRINT << "FlushQueueForTest()" << std::endl;
812 int rv = block_impl_->FlushQueueForTest(cb.callback());
813 CHECK_EQ(cb.GetResult(rv), net::OK);
814 break;
815 }
816 case disk_cache_fuzzer::FuzzCommand::kCreateIterator: {
817 if (!cache_)
818 continue;
819 uint64_t it_id = command.create_iterator().it_id();
820 MAYBE_PRINT << "CreateIterator(), id = " << it_id << std::endl;
821 open_iterators_[it_id] = cache_->CreateIterator();
822 break;
823 }
824 case disk_cache_fuzzer::FuzzCommand::kIteratorOpenNextEntry: {
825 const disk_cache_fuzzer::IteratorOpenNextEntry& ione =
826 command.iterator_open_next_entry();
827
828 uint64_t it_id = ione.it_id();
829 uint64_t entry_id = ione.entry_id();
830 bool async = ione.async();
831
832 if (open_iterators_.empty())
833 continue;
834
835 if (open_cache_entries_.find(entry_id) != open_cache_entries_.end())
836 continue; // Don't overwrite a currently
837 // open cache entry.
838
839 auto iterator_it = GetNextValue(&open_iterators_, it_id);
840
841 EntryInfo* entry_info = &open_cache_entries_[entry_id];
842
843 entry_info->tcb = std::make_unique<TestEntryResultCompletionCallback>();
844 disk_cache::EntryResultCallback cb =
845 base::BindOnce(&DiskCacheLPMFuzzer::OpenCacheEntryCallback,
846 base::Unretained(this), entry_id, async, false);
847
848 MAYBE_PRINT << "Iterator(" << ione.it_id()
849 << ").OpenNextEntry() = " << std::flush;
850 disk_cache::EntryResult result =
851 iterator_it->second->OpenNextEntry(std::move(cb));
852 if (!async || result.net_error() != net::ERR_IO_PENDING) {
853 result = WaitOnEntry(entry_info, std::move(result));
854 int rv = result.net_error();
855 entry_info->entry_ptr = result.ReleaseEntry();
856 // Print return value, and key if applicable.
857 if (!entry_info->entry_ptr) {
858 MAYBE_PRINT << rv << std::endl;
859 } else {
860 MAYBE_PRINT << rv << ", key = " << entry_info->entry_ptr->GetKey()
861 << std::endl;
862 }
863 } else {
864 MAYBE_PRINT << "net::ERR_IO_PENDING (async)" << std::endl;
865 }
866 break;
867 }
868 case disk_cache_fuzzer::FuzzCommand::kFastForwardBy: {
869 base::TimeDelta to_wait =
870 base::Milliseconds(command.fast_forward_by().capped_num_millis() %
871 kMaxNumMillisToWait);
872 MAYBE_PRINT << "FastForwardBy(" << to_wait << ")" << std::endl;
873 init_globals->task_environment_->FastForwardBy(to_wait);
874
875 base::Time curr_time = base::Time::Now();
876 saved_times_[command.fast_forward_by().time_id()] = curr_time;
877 // MAYBE_PRINT << "Saved time " << curr_time << std::endl;
878 break;
879 }
880 case disk_cache_fuzzer::FuzzCommand::kDoomEntriesSince: {
881 if (!cache_)
882 continue;
883 // App cache does not keep track of LRU timestamps so this method cannot
884 // be used.
885 if (type == net::APP_CACHE)
886 continue;
887 if (saved_times_.empty())
888 continue;
889
890 const disk_cache_fuzzer::DoomEntriesSince& des =
891 command.doom_entries_since();
892 auto time_it = GetNextValue(&saved_times_, des.time_id());
893 bool async = des.async();
894
895 net::TestCompletionCallback tcb;
896 net::CompletionOnceCallback cb =
897 !async ? tcb.callback() : GetIOCallback(IOType::DoomEntriesSince);
898
899 MAYBE_PRINT << "DoomEntriesSince(" << time_it->second << ")"
900 << std::flush;
901 int rv = cache_->DoomEntriesSince(time_it->second, std::move(cb));
902 if (!async)
903 rv = tcb.GetResult(rv);
904 MAYBE_PRINT << " = " << rv << std::endl;
905 break;
906 }
907 case disk_cache_fuzzer::FuzzCommand::kDoomEntriesBetween: {
908 if (!cache_)
909 continue;
910 // App cache does not keep track of LRU timestamps so this method cannot
911 // be used.
912 if (type == net::APP_CACHE)
913 continue;
914 if (saved_times_.empty())
915 continue;
916
917 const disk_cache_fuzzer::DoomEntriesBetween& deb =
918 command.doom_entries_between();
919 auto time_it1 = GetNextValue(&saved_times_, deb.time_id1());
920 auto time_it2 = GetNextValue(&saved_times_, deb.time_id2());
921 base::Time time1 = time_it1->second;
922 base::Time time2 = time_it2->second;
923 if (time1 > time2)
924 std::swap(time1, time2);
925 bool async = deb.async();
926
927 net::TestCompletionCallback tcb;
928 net::CompletionOnceCallback cb =
929 !async ? tcb.callback() : GetIOCallback(IOType::DoomEntriesBetween);
930
931 MAYBE_PRINT << "DoomEntriesBetween(" << time1 << ", " << time2 << ")"
932 << std::flush;
933 int rv = cache_->DoomEntriesBetween(time1, time2, std::move(cb));
934 if (!async)
935 rv = tcb.GetResult(rv);
936 MAYBE_PRINT << " = " << rv << std::endl;
937 break;
938 }
939 case disk_cache_fuzzer::FuzzCommand::kOnExternalCacheHit: {
940 if (!cache_)
941 continue;
942 if (created_cache_entries_.empty())
943 continue;
944
945 uint64_t key_id = command.on_external_cache_hit().key_id();
946
947 auto key_it = GetNextValue(&created_cache_entries_, key_id);
948 MAYBE_PRINT << "OnExternalCacheHit(\"" << key_it->second << "\")"
949 << std::endl;
950 cache_->OnExternalCacheHit(key_it->second);
951 break;
952 }
953 case disk_cache_fuzzer::FuzzCommand::kTrimForTest: {
954 // Blockfile-cache specific method.
955 if (!block_impl_ || type != net::DISK_CACHE)
956 return;
957
958 MAYBE_PRINT << "TrimForTest()" << std::endl;
959
960 RunTaskForTest(base::BindOnce(&disk_cache::BackendImpl::TrimForTest,
961 base::Unretained(block_impl_),
962 command.trim_for_test().empty()));
963 break;
964 }
965 case disk_cache_fuzzer::FuzzCommand::kTrimDeletedListForTest: {
966 // Blockfile-cache specific method.
967 if (!block_impl_ || type != net::DISK_CACHE)
968 return;
969
970 MAYBE_PRINT << "TrimDeletedListForTest()" << std::endl;
971
972 RunTaskForTest(
973 base::BindOnce(&disk_cache::BackendImpl::TrimDeletedListForTest,
974 base::Unretained(block_impl_),
975 command.trim_deleted_list_for_test().empty()));
976 break;
977 }
978 case disk_cache_fuzzer::FuzzCommand::kGetAvailableRange: {
979 if (open_cache_entries_.empty())
980 continue;
981
982 const disk_cache_fuzzer::GetAvailableRange& gar =
983 command.get_available_range();
984 auto entry_it = GetNextValue(&open_cache_entries_, gar.entry_id());
985 if (!IsValidEntry(&entry_it->second) ||
986 !sparse_entry_tracker_[entry_it->second.entry_ptr])
987 continue;
988
989 disk_cache::Entry* entry = entry_it->second.entry_ptr;
990 uint32_t offset = gar.offset() % kMaxEntrySize;
991 uint32_t len = gar.len() % kMaxEntrySize;
992 bool async = gar.async();
993
994 auto result_checker = base::BindRepeating(
995 [](net::CompletionOnceCallback callback, uint32_t offset,
996 uint32_t len, const disk_cache::RangeResult& result) {
997 std::move(callback).Run(result.net_error);
998
999 if (result.net_error <= 0)
1000 return;
1001
1002 // Make sure that the result is contained in what was
1003 // requested. It doesn't have to be the same even if there was
1004 // an exact corresponding write, since representation of ranges
1005 // may be imprecise, and here we don't know that there was.
1006
1007 // No overflow thanks to % kMaxEntrySize.
1008 net::Interval<uint32_t> requested(offset, offset + len);
1009
1010 uint32_t range_start, range_end;
1011 base::CheckedNumeric<uint64_t> range_start64(result.start);
1012 CHECK(range_start64.AssignIfValid(&range_start));
1013 base::CheckedNumeric<uint64_t> range_end64 =
1014 range_start + result.available_len;
1015 CHECK(range_end64.AssignIfValid(&range_end));
1016 net::Interval<uint32_t> gotten(range_start, range_end);
1017
1018 CHECK(requested.Contains(gotten));
1019 },
1020 GetIOCallback(IOType::GetAvailableRange), offset, len);
1021
1022 TestRangeResultCompletionCallback tcb;
1023 disk_cache::RangeResultCallback cb =
1024 !async ? tcb.callback() : result_checker;
1025
1026 MAYBE_PRINT << "GetAvailableRange(\"" << entry->GetKey() << "\", "
1027 << offset << ", " << len << ")" << std::flush;
1028 disk_cache::RangeResult result =
1029 entry->GetAvailableRange(offset, len, std::move(cb));
1030
1031 if (result.net_error != net::ERR_IO_PENDING) {
1032 // Run the checker callback ourselves.
1033 result_checker.Run(result);
1034 } else if (!async) {
1035 // In this case the callback will be run by the backend, so we don't
1036 // need to do it manually.
1037 result = tcb.GetResult(result);
1038 }
1039
1040 // Finally, take care of printing.
1041 if (async && result.net_error == net::ERR_IO_PENDING) {
1042 MAYBE_PRINT << " = net::ERR_IO_PENDING (async)" << std::endl;
1043 } else {
1044 MAYBE_PRINT << " = " << result.net_error
1045 << ", start = " << result.start
1046 << ", available_len = " << result.available_len;
1047 if (result.net_error < 0) {
1048 MAYBE_PRINT << ", error to string: "
1049 << net::ErrorToShortString(result.net_error)
1050 << std::endl;
1051 } else {
1052 MAYBE_PRINT << std::endl;
1053 }
1054 }
1055 break;
1056 }
1057 case disk_cache_fuzzer::FuzzCommand::kCancelSparseIo: {
1058 if (open_cache_entries_.empty())
1059 continue;
1060
1061 const disk_cache_fuzzer::CancelSparseIO& csio =
1062 command.cancel_sparse_io();
1063 auto entry_it = GetNextValue(&open_cache_entries_, csio.entry_id());
1064 if (!IsValidEntry(&entry_it->second))
1065 continue;
1066
1067 MAYBE_PRINT << "CancelSparseIO(\""
1068 << entry_it->second.entry_ptr->GetKey() << "\")"
1069 << std::endl;
1070 entry_it->second.entry_ptr->CancelSparseIO();
1071 break;
1072 }
1073 case disk_cache_fuzzer::FuzzCommand::kDoomKey: {
1074 if (!cache_)
1075 continue;
1076 if (created_cache_entries_.empty())
1077 continue;
1078
1079 const disk_cache_fuzzer::DoomKey& dk = command.doom_key();
1080 uint64_t key_id = dk.key_id();
1081 net::RequestPriority pri = GetRequestPriority(dk.pri());
1082 bool async = dk.async();
1083
1084 auto key_it = GetNextValue(&created_cache_entries_, key_id);
1085
1086 net::TestCompletionCallback tcb;
1087 net::CompletionOnceCallback cb =
1088 !async ? tcb.callback() : GetIOCallback(IOType::DoomKey);
1089
1090 MAYBE_PRINT << "DoomKey(\"" << key_it->second << "\")" << std::flush;
1091 int rv = cache_->DoomEntry(key_it->second, pri, std::move(cb));
1092 if (!async)
1093 rv = tcb.GetResult(rv);
1094 MAYBE_PRINT << " = " << rv << std::endl;
1095
1096 break;
1097 }
1098 case disk_cache_fuzzer::FuzzCommand::kDestructBackend: {
1099 // Block_impl_ will leak if we destruct the backend without closing
1100 // previous entries.
1101 // TODO(mpdenton) consider creating a separate fuzz target that allows
1102 // closing the |block_impl_| and ignore leaks.
1103 if (block_impl_ || !cache_)
1104 continue;
1105
1106 const disk_cache_fuzzer::DestructBackend& db =
1107 command.destruct_backend();
1108 // Only sometimes actually destruct the backend.
1109 if (!db.actually_destruct1() || !db.actually_destruct2())
1110 continue;
1111
1112 MAYBE_PRINT << "~Backend(). Backend destruction." << std::endl;
1113 cache_.reset();
1114 break;
1115 }
1116 case disk_cache_fuzzer::FuzzCommand::kAddRealDelay: {
1117 if (!command.add_real_delay().actually_delay())
1118 continue;
1119
1120 MAYBE_PRINT << "AddRealDelay(1ms)" << std::endl;
1121 base::PlatformThread::Sleep(base::Milliseconds(1));
1122 break;
1123 }
1124 case disk_cache_fuzzer::FuzzCommand::FUZZ_COMMAND_ONEOF_NOT_SET: {
1125 continue;
1126 break;
1127 }
1128 }
1129 }
1130 }
1131
HandleSetMaxSize(const disk_cache_fuzzer::SetMaxSize & sms)1132 void DiskCacheLPMFuzzer::HandleSetMaxSize(
1133 const disk_cache_fuzzer::SetMaxSize& sms) {
1134 if (!cache_)
1135 return;
1136
1137 max_size_ = sms.size();
1138 max_size_ %= kMaxSizeKB;
1139 max_size_ *= 1024;
1140 MAYBE_PRINT << "SetMaxSize(" << max_size_ << ")" << std::endl;
1141 if (simple_cache_impl_)
1142 CHECK_EQ(true, simple_cache_impl_->SetMaxSize(max_size_));
1143
1144 if (block_impl_)
1145 CHECK_EQ(true, block_impl_->SetMaxSize(max_size_));
1146
1147 if (mem_cache_)
1148 CHECK_EQ(true, mem_cache_->SetMaxSize(max_size_));
1149 }
1150
CreateBackend(disk_cache_fuzzer::FuzzCommands::CacheBackend cache_backend,uint32_t mask,net::CacheType type,bool simple_cache_wait_for_index)1151 void DiskCacheLPMFuzzer::CreateBackend(
1152 disk_cache_fuzzer::FuzzCommands::CacheBackend cache_backend,
1153 uint32_t mask,
1154 net::CacheType type,
1155 bool simple_cache_wait_for_index) {
1156 if (cache_backend == disk_cache_fuzzer::FuzzCommands::IN_MEMORY) {
1157 MAYBE_PRINT << "Using in-memory cache." << std::endl;
1158 auto cache = std::make_unique<disk_cache::MemBackendImpl>(nullptr);
1159 mem_cache_ = cache.get();
1160 cache_ = std::move(cache);
1161 CHECK(cache_);
1162 } else if (cache_backend == disk_cache_fuzzer::FuzzCommands::SIMPLE) {
1163 MAYBE_PRINT << "Using simple cache." << std::endl;
1164 net::TestCompletionCallback cb;
1165 // We limit ourselves to 64 fds since OS X by default gives us 256.
1166 // (Chrome raises the number on startup, but the fuzzer doesn't).
1167 if (!simple_file_tracker_)
1168 simple_file_tracker_ =
1169 std::make_unique<disk_cache::SimpleFileTracker>(kMaxFdsSimpleCache);
1170 auto simple_backend = std::make_unique<disk_cache::SimpleBackendImpl>(
1171 /*file_operations=*/nullptr, cache_path_,
1172 /*cleanup_tracker=*/nullptr, simple_file_tracker_.get(), max_size_,
1173 type, /*net_log=*/nullptr);
1174 simple_backend->Init(cb.callback());
1175 CHECK_EQ(cb.WaitForResult(), net::OK);
1176 simple_cache_impl_ = simple_backend.get();
1177 cache_ = std::move(simple_backend);
1178
1179 if (simple_cache_wait_for_index) {
1180 MAYBE_PRINT << "Waiting for simple cache index to be ready..."
1181 << std::endl;
1182 net::TestCompletionCallback wait_for_index_cb;
1183 simple_cache_impl_->index()->ExecuteWhenReady(
1184 wait_for_index_cb.callback());
1185 int rv = wait_for_index_cb.WaitForResult();
1186 CHECK_EQ(rv, net::OK);
1187 }
1188 } else {
1189 MAYBE_PRINT << "Using blockfile cache";
1190 std::unique_ptr<disk_cache::BackendImpl> cache;
1191 if (mask) {
1192 MAYBE_PRINT << ", mask = " << mask << std::endl;
1193 cache = std::make_unique<disk_cache::BackendImpl>(
1194 cache_path_, mask,
1195 /* runner = */ nullptr, type,
1196 /* net_log = */ nullptr);
1197 } else {
1198 MAYBE_PRINT << "." << std::endl;
1199 cache = std::make_unique<disk_cache::BackendImpl>(
1200 cache_path_,
1201 /* cleanup_tracker = */ nullptr,
1202 /* runner = */ nullptr, type,
1203 /* net_log = */ nullptr);
1204 }
1205 block_impl_ = cache.get();
1206 cache_ = std::move(cache);
1207 CHECK(cache_);
1208 // TODO(mpdenton) kNoRandom or not? It does a lot of waiting for IO. May be
1209 // good for avoiding leaks but tests a less realistic cache.
1210 // block_impl_->SetFlags(disk_cache::kNoRandom);
1211
1212 // TODO(mpdenton) should I always wait here?
1213 net::TestCompletionCallback cb;
1214 block_impl_->Init(cb.callback());
1215 CHECK_EQ(cb.WaitForResult(), net::OK);
1216 }
1217 }
1218
CloseAllRemainingEntries()1219 void DiskCacheLPMFuzzer::CloseAllRemainingEntries() {
1220 for (auto& entry_info : open_cache_entries_) {
1221 disk_cache::Entry** entry_ptr = &entry_info.second.entry_ptr;
1222 if (!*entry_ptr)
1223 continue;
1224 MAYBE_PRINT << "Destructor CloseEntry(\"" << (*entry_ptr)->GetKey() << "\")"
1225 << std::endl;
1226 (*entry_ptr)->Close();
1227 *entry_ptr = nullptr;
1228 }
1229 }
1230
~DiskCacheLPMFuzzer()1231 DiskCacheLPMFuzzer::~DiskCacheLPMFuzzer() {
1232 // |block_impl_| leaks a lot more if we don't close entries before destructing
1233 // the backend.
1234 if (block_impl_) {
1235 // TODO(mpdenton) Consider creating a fuzz target that does not wait for
1236 // blockfile, and also does not detect leaks.
1237
1238 // Because the blockfile backend will leak any entries closed after its
1239 // destruction, we need to wait for any remaining backend callbacks to
1240 // finish. Otherwise, there will always be a race between handling callbacks
1241 // with RunUntilIdle() and actually closing all of the remaining entries.
1242 // And, closing entries after destructing the backend will not work and
1243 // cause leaks.
1244 for (auto& entry_it : open_cache_entries_) {
1245 if (entry_it.second.tcb) {
1246 WaitOnEntry(&entry_it.second);
1247 }
1248 }
1249
1250 // Destroy any open iterators before destructing the backend so we don't
1251 // cause leaks. TODO(mpdenton) should maybe be documented?
1252 // Also *must* happen after waiting for all OpenNextEntry callbacks to
1253 // finish, because destructing the iterators may cause those callbacks to be
1254 // cancelled, which will cause WaitOnEntry() to spin forever waiting.
1255 // TODO(mpdenton) should also be documented?
1256 open_iterators_.clear();
1257 // Just in case, finish any callbacks.
1258 init_globals->task_environment_->RunUntilIdle();
1259 // Close all entries that haven't been closed yet.
1260 CloseAllRemainingEntries();
1261 // Destroy the backend.
1262 cache_.reset();
1263 } else {
1264 // Here we won't bother with waiting for our OpenEntry* callbacks.
1265 cache_.reset();
1266 // Finish any callbacks that came in before backend destruction.
1267 init_globals->task_environment_->RunUntilIdle();
1268 // Close all entries that haven't been closed yet.
1269 CloseAllRemainingEntries();
1270 }
1271
1272 // Make sure any tasks triggered by the CloseEntry's have run.
1273 init_globals->task_environment_->RunUntilIdle();
1274 if (simple_cache_impl_)
1275 CHECK(simple_file_tracker_->IsEmptyForTesting());
1276 base::RunLoop().RunUntilIdle();
1277
1278 DeleteCache(cache_path_);
1279 }
1280
DEFINE_BINARY_PROTO_FUZZER(const disk_cache_fuzzer::FuzzCommands & commands)1281 DEFINE_BINARY_PROTO_FUZZER(const disk_cache_fuzzer::FuzzCommands& commands) {
1282 {
1283 DiskCacheLPMFuzzer disk_cache_fuzzer_instance;
1284 disk_cache_fuzzer_instance.RunCommands(commands);
1285 }
1286 MAYBE_PRINT << "-----------------------" << std::endl;
1287 }
1288 //
1289