1 // Copyright 2019 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include <cinttypes>
6 #include <cstdlib>
7 #include <iostream>
8 #include <map>
9 #include <memory>
10 #include <string>
11
12 #include "base/at_exit.h"
13 #include "base/command_line.h"
14 #include "base/files/file_path.h"
15 #include "base/files/file_util.h"
16 #include "base/files/scoped_temp_dir.h"
17 #include "base/functional/callback.h"
18 #include "base/logging.h"
19 #include "base/memory/raw_ptr.h"
20 #include "base/memory/raw_ptr_exclusion.h"
21 #include "base/memory/ref_counted.h"
22 #include "base/memory/scoped_refptr.h"
23 #include "base/numerics/checked_math.h"
24 #include "base/strings/string_number_conversions.h"
25 #include "base/test/task_environment.h"
26 #include "base/test/test_timeouts.h"
27 #include "base/time/time.h"
28 #include "net/base/cache_type.h"
29 #include "net/base/interval.h"
30 #include "net/base/io_buffer.h"
31 #include "net/base/net_errors.h"
32 #include "net/base/test_completion_callback.h"
33 #include "net/disk_cache/backend_cleanup_tracker.h"
34 #include "net/disk_cache/blockfile/backend_impl.h"
35 #include "net/disk_cache/disk_cache.h"
36 #include "net/disk_cache/disk_cache_fuzzer.pb.h"
37 #include "net/disk_cache/disk_cache_test_util.h"
38 #include "net/disk_cache/memory/mem_backend_impl.h"
39 #include "net/disk_cache/simple/simple_backend_impl.h"
40 #include "net/disk_cache/simple/simple_file_tracker.h"
41 #include "net/disk_cache/simple/simple_index.h"
42 #include "testing/libfuzzer/proto/lpm_interface.h"
43
44 // To get a good idea of what a test case is doing, just run the libfuzzer
45 // target with LPM_DUMP_NATIVE_INPUT=1 prefixed. This will trigger all the
46 // prints below and will convey exactly what the test case is doing: use this
47 // instead of trying to print the protobuf as text.
48
49 // For code coverage:
50 // python ./tools/code_coverage/coverage.py disk_cache_lpm_fuzzer -b
51 // out/coverage -o out/report -c 'out/coverage/disk_cache_lpm_fuzzer
52 // -runs=0 -workers=24 corpus_disk_cache_simple' -f net/disk_cache
53
54 void IOCallback(std::string io_type, int rv);
55
56 namespace {
57 const uint32_t kMaxSizeKB = 128; // 128KB maximum.
58 const uint32_t kMaxSize = kMaxSizeKB * 1024;
59 const uint32_t kMaxEntrySize = kMaxSize * 2;
60 const uint32_t kNumStreams = 3; // All caches seem to have 3 streams. TODO do
61 // other specialized caches have this?
62 const uint64_t kFirstSavedTime =
63 5; // Totally random number chosen by dice roll. ;)
64 const uint32_t kMaxNumMillisToWait = 2019;
65 const int kMaxFdsSimpleCache = 10;
66
67 // Known colliding key values taken from SimpleCacheCreateCollision unittest.
68 const std::string kCollidingKey1 =
69 "\xfb\x4e\x9c\x1d\x66\x71\xf7\x54\xa3\x11\xa0\x7e\x16\xa5\x68\xf6";
70 const std::string kCollidingKey2 =
71 "\xbc\x60\x64\x92\xbc\xa0\x5c\x15\x17\x93\x29\x2d\xe4\x21\xbd\x03";
72
73 #define IOTYPES_APPLY(F) \
74 F(WriteData) \
75 F(ReadData) \
76 F(WriteSparseData) \
77 F(ReadSparseData) \
78 F(DoomAllEntries) \
79 F(DoomEntriesSince) \
80 F(DoomEntriesBetween) \
81 F(GetAvailableRange) \
82 F(DoomKey)
83
84 enum class IOType {
85 #define ENUM_ENTRY(IO_TYPE) IO_TYPE,
86 IOTYPES_APPLY(ENUM_ENTRY)
87 #undef ENUM_ENTRY
88 };
89
90 struct InitGlobals {
InitGlobals__anonf582cd010111::InitGlobals91 InitGlobals() {
92 base::CommandLine::Init(0, nullptr);
93
94 print_comms_ = ::getenv("LPM_DUMP_NATIVE_INPUT");
95
96 // TaskEnvironment requires TestTimeouts initialization to watch for
97 // problematic long-running tasks.
98 TestTimeouts::Initialize();
99
100 // Mark this thread as an IO_THREAD with MOCK_TIME, and ensure that Now()
101 // is driven from the same mock clock.
102 task_environment_ = std::make_unique<base::test::TaskEnvironment>(
103 base::test::TaskEnvironment::MainThreadType::IO,
104 base::test::TaskEnvironment::TimeSource::MOCK_TIME);
105
106 // Disable noisy logging as per "libFuzzer in Chrome" documentation:
107 // testing/libfuzzer/getting_started.md#Disable-noisy-error-message-logging.
108 logging::SetMinLogLevel(logging::LOGGING_FATAL);
109
110 // Re-using this buffer for write operations may technically be against
111 // IOBuffer rules but it shouldn't cause any actual problems.
112 buffer_ = base::MakeRefCounted<net::IOBufferWithSize>(
113 static_cast<size_t>(kMaxEntrySize));
114 CacheTestFillBuffer(buffer_->span(), false);
115
116 #define CREATE_IO_CALLBACK(IO_TYPE) \
117 io_callbacks_.push_back(base::BindRepeating(&IOCallback, #IO_TYPE));
118 IOTYPES_APPLY(CREATE_IO_CALLBACK)
119 #undef CREATE_IO_CALLBACK
120 }
121
122 // This allows us to mock time for all threads.
123 std::unique_ptr<base::test::TaskEnvironment> task_environment_;
124
125 // Used as a pre-filled buffer for all writes.
126 scoped_refptr<net::IOBuffer> buffer_;
127
128 // Should we print debugging info?
129 bool print_comms_;
130
131 // List of IO callbacks. They do nothing (except maybe print) but are used by
132 // all async entry operations.
133 std::vector<base::RepeatingCallback<void(int)>> io_callbacks_;
134 };
135
136 InitGlobals* init_globals = new InitGlobals();
137 } // namespace
138
139 class DiskCacheLPMFuzzer {
140 public:
DiskCacheLPMFuzzer()141 DiskCacheLPMFuzzer() {
142 CHECK(temp_dir_.CreateUniqueTempDir());
143 cache_path_ = temp_dir_.GetPath();
144 }
145
146 ~DiskCacheLPMFuzzer();
147
148 void RunCommands(const disk_cache_fuzzer::FuzzCommands& commands);
149
150 private:
151 struct EntryInfo {
152 EntryInfo() = default;
153
154 EntryInfo(const EntryInfo&) = delete;
155 EntryInfo& operator=(const EntryInfo&) = delete;
156
157 // RAW_PTR_EXCLUSION: #addr-of
158 RAW_PTR_EXCLUSION disk_cache::Entry* entry_ptr = nullptr;
159 std::unique_ptr<TestEntryResultCompletionCallback> tcb;
160 };
161 void RunTaskForTest(base::OnceClosure closure);
162
163 // Waits for an entry to be ready. Only should be called if there is a pending
164 // callback for this entry; i.e. ei->tcb != nullptr.
165 // Also takes the rv that the cache entry creation functions return, and does
166 // not wait if rv.net_error != net::ERR_IO_PENDING (and would never have
167 // called the callback).
168 disk_cache::EntryResult WaitOnEntry(
169 EntryInfo* ei,
170 disk_cache::EntryResult result =
171 disk_cache::EntryResult::MakeError(net::ERR_IO_PENDING));
172
173 // Used as a callback for entry-opening backend calls. Will record the entry
174 // in the map as usable and will release any entry-specific calls waiting for
175 // the entry to be ready.
176 void OpenCacheEntryCallback(uint64_t entry_id,
177 bool async,
178 bool set_is_sparse,
179 disk_cache::EntryResult result);
180
181 // Waits for the entry to finish opening, in the async case. Then, if the
182 // entry is successfully open (callback returns net::OK, or was already
183 // successfully opened), check if the entry_ptr == nullptr. If so, the
184 // entry has been closed.
185 bool IsValidEntry(EntryInfo* ei);
186
187 // Closes any non-nullptr entries in open_cache_entries_.
188 void CloseAllRemainingEntries();
189
190 // Fully shuts down and cleans up the cache backend.
191 void ShutdownBackend();
192
193 int64_t ComputeMaxSize(const disk_cache_fuzzer::SetMaxSize* maybe_max_size);
194 void CreateBackend(
195 disk_cache_fuzzer::FuzzCommands::CacheBackend cache_backend,
196 uint32_t mask,
197 const disk_cache_fuzzer::SetMaxSize* maybe_max_size,
198 net::CacheType type,
199 bool simple_cache_wait_for_index);
200
201 // Places to keep our cache files.
202 base::FilePath cache_path_;
203 base::ScopedTempDir temp_dir_;
204
205 // Pointers to our backend. Only one of block_impl_, simple_cache_impl_, and
206 // mem_cache_ are active at one time.
207 std::unique_ptr<disk_cache::Backend> cache_;
208 raw_ptr<disk_cache::BackendImpl> block_impl_ = nullptr;
209 std::unique_ptr<disk_cache::SimpleFileTracker> simple_file_tracker_;
210 raw_ptr<disk_cache::SimpleBackendImpl> simple_cache_impl_ = nullptr;
211 raw_ptr<disk_cache::MemBackendImpl> mem_cache_ = nullptr;
212
213 // This "consistent hash table" keeys track of the keys we've added to the
214 // backend so far. This should always be indexed by a "key_id" from a
215 // protobuf.
216 std::map<uint64_t, std::string> created_cache_entries_;
217 // This "consistent hash table" keeps track of all opened entries we have from
218 // the backend, and also contains some nullptr's where entries were already
219 // closed. This should always be indexed by an "entry_id" from a protobuf.
220 // When destructed, we close all entries that are still open in order to avoid
221 // memory leaks.
222 std::map<uint64_t, EntryInfo> open_cache_entries_;
223 // This "consistent hash table" keeps track of all times we have saved, so
224 // that we can call backend methods like DoomEntriesSince or
225 // DoomEntriesBetween with sane timestamps. This should always be indexed by a
226 // "time_id" from a protobuf.
227 std::map<uint64_t, base::Time> saved_times_;
228 // This "consistent hash table" keeps tack of all the iterators we have open
229 // from the backend. This should always be indexed by a "it_id" from a
230 // protobuf.
231 std::map<uint64_t, std::unique_ptr<disk_cache::Backend::Iterator>>
232 open_iterators_;
233
234 // This maps keeps track of the sparsity of each entry, using their pointers.
235 // TODO(mpdenton) remove if CreateEntry("Key0"); WriteData("Key0", index = 2,
236 // ...); WriteSparseData("Key0", ...); is supposed to be valid.
237 // Then we can just use CouldBeSparse before the WriteData.
238 std::map<disk_cache::Entry*, bool> sparse_entry_tracker_;
239 };
240
241 #define MAYBE_PRINT \
242 if (init_globals->print_comms_) \
243 std::cout
244
GetIOCallback(IOType iot)245 inline base::RepeatingCallback<void(int)> GetIOCallback(IOType iot) {
246 return init_globals->io_callbacks_[static_cast<int>(iot)];
247 }
248
ToKey(uint64_t key_num)249 std::string ToKey(uint64_t key_num) {
250 // Use one of the two colliding key values in 1% of executions.
251 if (key_num % 100 == 99)
252 return kCollidingKey1;
253 if (key_num % 100 == 98)
254 return kCollidingKey2;
255
256 // Otherwise, use a value based on the key id and fuzzy padding.
257 std::string padding(key_num & 0xFFFF, 'A');
258 return "Key" + padding + base::NumberToString(key_num);
259 }
260
GetRequestPriority(disk_cache_fuzzer::RequestPriority lpm_pri)261 net::RequestPriority GetRequestPriority(
262 disk_cache_fuzzer::RequestPriority lpm_pri) {
263 CHECK(net::MINIMUM_PRIORITY <= static_cast<int>(lpm_pri) &&
264 static_cast<int>(lpm_pri) <= net::MAXIMUM_PRIORITY);
265 return static_cast<net::RequestPriority>(lpm_pri);
266 }
267
GetCacheTypeAndPrint(disk_cache_fuzzer::FuzzCommands::CacheType type,disk_cache_fuzzer::FuzzCommands::CacheBackend backend)268 net::CacheType GetCacheTypeAndPrint(
269 disk_cache_fuzzer::FuzzCommands::CacheType type,
270 disk_cache_fuzzer::FuzzCommands::CacheBackend backend) {
271 switch (type) {
272 case disk_cache_fuzzer::FuzzCommands::APP_CACHE:
273 MAYBE_PRINT << "Cache type = APP_CACHE." << std::endl;
274 return net::CacheType::APP_CACHE;
275 case disk_cache_fuzzer::FuzzCommands::REMOVED_MEDIA_CACHE:
276 // Media cache no longer in use; handle as HTTP_CACHE
277 MAYBE_PRINT << "Cache type = REMOVED_MEDIA_CACHE." << std::endl;
278 return net::CacheType::DISK_CACHE;
279 case disk_cache_fuzzer::FuzzCommands::SHADER_CACHE:
280 MAYBE_PRINT << "Cache type = SHADER_CACHE." << std::endl;
281 return net::CacheType::SHADER_CACHE;
282 case disk_cache_fuzzer::FuzzCommands::PNACL_CACHE:
283 // Simple cache won't handle PNACL_CACHE.
284 if (backend == disk_cache_fuzzer::FuzzCommands::SIMPLE) {
285 MAYBE_PRINT << "Cache type = DISK_CACHE." << std::endl;
286 return net::CacheType::DISK_CACHE;
287 }
288 MAYBE_PRINT << "Cache type = PNACL_CACHE." << std::endl;
289 return net::CacheType::PNACL_CACHE;
290 case disk_cache_fuzzer::FuzzCommands::GENERATED_BYTE_CODE_CACHE:
291 MAYBE_PRINT << "Cache type = GENERATED_BYTE_CODE_CACHE." << std::endl;
292 return net::CacheType::GENERATED_BYTE_CODE_CACHE;
293 case disk_cache_fuzzer::FuzzCommands::GENERATED_NATIVE_CODE_CACHE:
294 MAYBE_PRINT << "Cache type = GENERATED_NATIVE_CODE_CACHE." << std::endl;
295 return net::CacheType::GENERATED_NATIVE_CODE_CACHE;
296 case disk_cache_fuzzer::FuzzCommands::DISK_CACHE:
297 MAYBE_PRINT << "Cache type = DISK_CACHE." << std::endl;
298 return net::CacheType::DISK_CACHE;
299 }
300 }
301
IOCallback(std::string io_type,int rv)302 void IOCallback(std::string io_type, int rv) {
303 MAYBE_PRINT << " [Async IO (" << io_type << ") = " << rv << "]" << std::endl;
304 }
305
306 /*
307 * Consistent hashing inspired map for fuzzer state.
308 * If we stored open cache entries in a hash table mapping cache_entry_id ->
309 * disk_cache::Entry*, then it would be highly unlikely that any subsequent
310 * "CloseEntry" or "WriteData" etc. command would come up with an ID that would
311 * correspond to a valid entry in the hash table. The optimal solution is for
312 * libfuzzer to generate CloseEntry commands with an ID that matches the ID of a
313 * previous OpenEntry command. But libfuzzer is stateless and should stay that
314 * way.
315 *
316 * On the other hand, if we stored entries in a vector, and on a CloseEntry
317 * command we took the entry at CloseEntry.id % (size of entries vector), we
318 * would always generate correct CloseEntries. This is good, but all
319 * dumb/general minimization techniques stop working, because deleting a single
320 * OpenEntry command changes the indexes of every entry in the vector from then
321 * on.
322 *
323 * So, we use something that's more stable for minimization: consistent hashing.
324 * Basically, when we see a CloseEntry.id, we take the entry in the table that
325 * has the next highest id (wrapping when there is no higher entry).
326 *
327 * This makes us resilient to deleting irrelevant OpenEntry commands. But, if we
328 * delete from the table on CloseEntry commands, we still screw up all the
329 * indexes during minimization. We'll get around this by not deleting entries
330 * after CloseEntry commands, but that will result in a slightly less efficient
331 * fuzzer, as if there are many closed entries in the table, many of the *Entry
332 * commands will be useless. It seems like a decent balance between generating
333 * useful fuzz commands and effective minimization.
334 */
335 template <typename T>
GetNextValue(typename std::map<uint64_t,T> * entries,uint64_t val)336 typename std::map<uint64_t, T>::iterator GetNextValue(
337 typename std::map<uint64_t, T>* entries,
338 uint64_t val) {
339 auto iter = entries->lower_bound(val);
340 if (iter != entries->end())
341 return iter;
342 // Wrap to 0
343 iter = entries->lower_bound(0);
344 if (iter != entries->end())
345 return iter;
346
347 return entries->end();
348 }
349
RunTaskForTest(base::OnceClosure closure)350 void DiskCacheLPMFuzzer::RunTaskForTest(base::OnceClosure closure) {
351 if (!block_impl_) {
352 std::move(closure).Run();
353 return;
354 }
355
356 net::TestCompletionCallback cb;
357 int rv = block_impl_->RunTaskForTest(std::move(closure), cb.callback());
358 CHECK_EQ(cb.GetResult(rv), net::OK);
359 }
360
361 // Resets the cb in the map so that WriteData and other calls that work on an
362 // entry don't wait for its result.
OpenCacheEntryCallback(uint64_t entry_id,bool async,bool set_is_sparse,disk_cache::EntryResult result)363 void DiskCacheLPMFuzzer::OpenCacheEntryCallback(
364 uint64_t entry_id,
365 bool async,
366 bool set_is_sparse,
367 disk_cache::EntryResult result) {
368 // TODO(mpdenton) if this fails should we delete the entry entirely?
369 // Would need to mark it for deletion and delete it later, as
370 // IsValidEntry might be waiting for it.
371 EntryInfo* ei = &open_cache_entries_[entry_id];
372
373 if (async) {
374 int rv = result.net_error();
375 ei->entry_ptr = result.ReleaseEntry();
376 // We are responsible for setting things up.
377 if (set_is_sparse && ei->entry_ptr) {
378 sparse_entry_tracker_[ei->entry_ptr] = true;
379 }
380 if (ei->entry_ptr) {
381 MAYBE_PRINT << " [Async opening of cache entry for \""
382 << ei->entry_ptr->GetKey() << "\" callback (rv = " << rv
383 << ")]" << std::endl;
384 }
385 // Unblock any subsequent ops waiting for this --- they don't care about
386 // the actual return value, but use something distinctive for debugging.
387 ei->tcb->callback().Run(
388 disk_cache::EntryResult::MakeError(net::ERR_FILE_VIRUS_INFECTED));
389 } else {
390 // The operation code will pull the result out of the completion callback,
391 // so hand it to it.
392 ei->tcb->callback().Run(std::move(result));
393 }
394 }
395
WaitOnEntry(EntryInfo * ei,disk_cache::EntryResult result)396 disk_cache::EntryResult DiskCacheLPMFuzzer::WaitOnEntry(
397 EntryInfo* ei,
398 disk_cache::EntryResult result) {
399 CHECK(ei->tcb);
400 result = ei->tcb->GetResult(std::move(result));
401
402 // Reset the callback so nobody accidentally waits on a callback that never
403 // comes.
404 ei->tcb.reset();
405 return result;
406 }
407
IsValidEntry(EntryInfo * ei)408 bool DiskCacheLPMFuzzer::IsValidEntry(EntryInfo* ei) {
409 if (ei->tcb) {
410 // If we have a callback, we are the first to access this async-created
411 // entry. Wait for it, and then delete it so nobody waits on it again.
412 WaitOnEntry(ei);
413 }
414 // entry_ptr will be nullptr if the entry has been closed.
415 return ei->entry_ptr != nullptr;
416 }
417
418 /*
419 * Async implementation:
420 1. RunUntilIdle at the top of the loop to handle any callbacks we've been
421 posted from the backend thread.
422 2. Only the entry creation functions have important callbacks. The good thing
423 is backend destruction will cancel these operations. The entry creation
424 functions simply need to keep the entry_ptr* alive until the callback is
425 posted, and then need to make sure the entry_ptr is added to the map in order
426 to Close it in the destructor.
427 As for iterators, it's unclear whether closing an iterator will cancel
428 callbacks.
429
430 Problem: WriteData (and similar) calls will fail on the entry_id until the
431 callback happens. So, I should probably delay these calls or otherwise will
432 have very unreliable test cases. These are the options:
433 1. Queue up WriteData (etc.) calls in some map, such that when the OpenEntry
434 callback runs, the WriteData calls will all run.
435 2. Just sit there and wait for the entry to be ready.
436
437 #2 is probably best as it doesn't prevent any interesting cases and is much
438 simpler.
439 */
440
RunCommands(const disk_cache_fuzzer::FuzzCommands & commands)441 void DiskCacheLPMFuzzer::RunCommands(
442 const disk_cache_fuzzer::FuzzCommands& commands) {
443 // Skip too long command sequences, they are counterproductive for fuzzing.
444 // The number was chosen empirically using the existing fuzzing corpus.
445 if (commands.fuzz_commands_size() > 129)
446 return;
447
448 uint32_t mask =
449 commands.has_set_mask() ? (commands.set_mask() ? 0x1 : 0xf) : 0;
450 net::CacheType type =
451 GetCacheTypeAndPrint(commands.cache_type(), commands.cache_backend());
452 CreateBackend(
453 commands.cache_backend(), mask,
454 commands.has_set_max_size() ? &commands.set_max_size() : nullptr, type,
455 commands.simple_cache_wait_for_index());
456 MAYBE_PRINT << "CreateBackend()" << std::endl;
457
458 {
459 base::Time curr_time = base::Time::Now();
460 saved_times_[kFirstSavedTime] = curr_time;
461 // MAYBE_PRINT << "Saved initial time " << curr_time << std::endl;
462 }
463
464 for (const disk_cache_fuzzer::FuzzCommand& command :
465 commands.fuzz_commands()) {
466 // Handle any callbacks that other threads may have posted to us in the
467 // meantime, so any successful async OpenEntry's (etc.) add their
468 // entry_ptr's to the map.
469 init_globals->task_environment_->RunUntilIdle();
470
471 switch (command.fuzz_command_oneof_case()) {
472 case disk_cache_fuzzer::FuzzCommand::kCreateEntry: {
473 if (!cache_)
474 continue;
475
476 const disk_cache_fuzzer::CreateEntry& ce = command.create_entry();
477 uint64_t key_id = ce.key_id();
478 uint64_t entry_id = ce.entry_id();
479 net::RequestPriority pri = GetRequestPriority(ce.pri());
480 bool async = ce.async();
481 bool is_sparse = ce.is_sparse();
482
483 if (open_cache_entries_.find(entry_id) != open_cache_entries_.end())
484 continue; // Don't overwrite a currently open cache entry.
485
486 std::string key_str = ToKey(key_id);
487 created_cache_entries_[key_id] = key_str;
488
489 EntryInfo* entry_info = &open_cache_entries_[entry_id];
490
491 entry_info->tcb = std::make_unique<TestEntryResultCompletionCallback>();
492 disk_cache::EntryResultCallback cb =
493 base::BindOnce(&DiskCacheLPMFuzzer::OpenCacheEntryCallback,
494 base::Unretained(this), entry_id, async, is_sparse);
495
496 MAYBE_PRINT << "CreateEntry(\"" << key_str
497 << "\", set_is_sparse = " << is_sparse
498 << ") = " << std::flush;
499 disk_cache::EntryResult result =
500 cache_->CreateEntry(key_str, pri, std::move(cb));
501 if (!async || result.net_error() != net::ERR_IO_PENDING) {
502 result = WaitOnEntry(entry_info, std::move(result));
503 int rv = result.net_error();
504
505 // Ensure we mark sparsity, save entry if the callback never ran.
506 if (rv == net::OK) {
507 entry_info->entry_ptr = result.ReleaseEntry();
508 sparse_entry_tracker_[entry_info->entry_ptr] = is_sparse;
509 }
510 MAYBE_PRINT << rv << std::endl;
511 } else {
512 MAYBE_PRINT << "net::ERR_IO_PENDING (async)" << std::endl;
513 }
514 break;
515 }
516 case disk_cache_fuzzer::FuzzCommand::kOpenEntry: {
517 if (!cache_)
518 continue;
519
520 const disk_cache_fuzzer::OpenEntry& oe = command.open_entry();
521 uint64_t key_id = oe.key_id();
522 uint64_t entry_id = oe.entry_id();
523 net::RequestPriority pri = GetRequestPriority(oe.pri());
524 bool async = oe.async();
525
526 if (created_cache_entries_.empty())
527 continue;
528
529 if (open_cache_entries_.find(entry_id) != open_cache_entries_.end())
530 continue; // Don't overwrite a currently open cache entry.
531
532 EntryInfo* entry_info = &open_cache_entries_[entry_id];
533
534 entry_info->tcb = std::make_unique<TestEntryResultCompletionCallback>();
535 disk_cache::EntryResultCallback cb =
536 base::BindOnce(&DiskCacheLPMFuzzer::OpenCacheEntryCallback,
537 base::Unretained(this), entry_id, async, false);
538
539 auto key_it = GetNextValue(&created_cache_entries_, key_id);
540 MAYBE_PRINT << "OpenEntry(\"" << key_it->second
541 << "\") = " << std::flush;
542 disk_cache::EntryResult result =
543 cache_->OpenEntry(key_it->second, pri, std::move(cb));
544 if (!async || result.net_error() != net::ERR_IO_PENDING) {
545 result = WaitOnEntry(entry_info, std::move(result));
546 int rv = result.net_error();
547 if (rv == net::OK)
548 entry_info->entry_ptr = result.ReleaseEntry();
549 MAYBE_PRINT << rv << std::endl;
550 } else {
551 MAYBE_PRINT << "net::ERR_IO_PENDING (async)" << std::endl;
552 }
553 break;
554 }
555 case disk_cache_fuzzer::FuzzCommand::kOpenOrCreateEntry: {
556 if (!cache_)
557 continue;
558
559 const disk_cache_fuzzer::OpenOrCreateEntry& ooce =
560 command.open_or_create_entry();
561 uint64_t key_id = ooce.key_id();
562 uint64_t entry_id = ooce.entry_id();
563 net::RequestPriority pri = GetRequestPriority(ooce.pri());
564 bool async = ooce.async();
565 bool is_sparse = ooce.is_sparse();
566
567 if (open_cache_entries_.find(entry_id) != open_cache_entries_.end())
568 continue; // Don't overwrite a currently open cache entry.
569
570 std::string key_str;
571 // If our proto tells us to create a new entry, create a new entry, just
572 // with OpenOrCreateEntry.
573 if (ooce.create_new()) {
574 // Use a possibly new key.
575 key_str = ToKey(key_id);
576 created_cache_entries_[key_id] = key_str;
577 } else {
578 if (created_cache_entries_.empty())
579 continue;
580 auto key_it = GetNextValue(&created_cache_entries_, key_id);
581 key_str = key_it->second;
582 }
583
584 // Setup for callbacks.
585
586 EntryInfo* entry_info = &open_cache_entries_[entry_id];
587
588 entry_info->tcb = std::make_unique<TestEntryResultCompletionCallback>();
589 disk_cache::EntryResultCallback cb =
590 base::BindOnce(&DiskCacheLPMFuzzer::OpenCacheEntryCallback,
591 base::Unretained(this), entry_id, async, is_sparse);
592
593 // Will only be set as sparse if it is created and not opened.
594 MAYBE_PRINT << "OpenOrCreateEntry(\"" << key_str
595 << "\", set_is_sparse = " << is_sparse
596 << ") = " << std::flush;
597 disk_cache::EntryResult result =
598 cache_->OpenOrCreateEntry(key_str, pri, std::move(cb));
599 if (!async || result.net_error() != net::ERR_IO_PENDING) {
600 result = WaitOnEntry(entry_info, std::move(result));
601 int rv = result.net_error();
602 bool opened = result.opened();
603 entry_info->entry_ptr = result.ReleaseEntry();
604 // Ensure we mark sparsity, even if the callback never ran.
605 if (rv == net::OK && !opened)
606 sparse_entry_tracker_[entry_info->entry_ptr] = is_sparse;
607 MAYBE_PRINT << rv << ", opened = " << opened << std::endl;
608 } else {
609 MAYBE_PRINT << "net::ERR_IO_PENDING (async)" << std::endl;
610 }
611 break;
612 }
613 case disk_cache_fuzzer::FuzzCommand::kCloseEntry: {
614 if (open_cache_entries_.empty())
615 continue;
616
617 auto entry_it = GetNextValue(&open_cache_entries_,
618 command.close_entry().entry_id());
619 if (!IsValidEntry(&entry_it->second))
620 continue;
621
622 MAYBE_PRINT << "CloseEntry(\"" << entry_it->second.entry_ptr->GetKey()
623 << "\")" << std::endl;
624 entry_it->second.entry_ptr->Close();
625
626 // Set the entry_ptr to nullptr to ensure no one uses it anymore.
627 entry_it->second.entry_ptr = nullptr;
628 break;
629 }
630 case disk_cache_fuzzer::FuzzCommand::kDoomEntry: {
631 if (open_cache_entries_.empty())
632 continue;
633
634 auto entry_it =
635 GetNextValue(&open_cache_entries_, command.doom_entry().entry_id());
636 if (!IsValidEntry(&entry_it->second))
637 continue;
638
639 MAYBE_PRINT << "DoomEntry(\"" << entry_it->second.entry_ptr->GetKey()
640 << "\")" << std::endl;
641 entry_it->second.entry_ptr->Doom();
642 break;
643 }
644 case disk_cache_fuzzer::FuzzCommand::kWriteData: {
645 if (open_cache_entries_.empty())
646 continue;
647
648 const disk_cache_fuzzer::WriteData& wd = command.write_data();
649 auto entry_it = GetNextValue(&open_cache_entries_, wd.entry_id());
650 if (!IsValidEntry(&entry_it->second))
651 continue;
652
653 int index = 0; // if it's sparse, these non-sparse aware streams must
654 // read from stream 0 according to the spec.
655 // Implementations might have weaker constraints.
656 if (!sparse_entry_tracker_[entry_it->second.entry_ptr])
657 index = wd.index() % kNumStreams;
658 uint32_t offset = wd.offset() % kMaxEntrySize;
659 size_t size = wd.size() % kMaxEntrySize;
660 bool async = wd.async();
661
662 net::TestCompletionCallback tcb;
663 net::CompletionOnceCallback cb =
664 !async ? tcb.callback() : GetIOCallback(IOType::WriteData);
665
666 MAYBE_PRINT << "WriteData(\"" << entry_it->second.entry_ptr->GetKey()
667 << "\", index = " << index << ", offset = " << offset
668 << ", size = " << size << ", truncate = " << wd.truncate()
669 << ")" << std::flush;
670 int rv = entry_it->second.entry_ptr->WriteData(
671 index, offset, init_globals->buffer_.get(), size, std::move(cb),
672 wd.truncate());
673 if (!async)
674 rv = tcb.GetResult(rv);
675 MAYBE_PRINT << " = " << rv << std::endl;
676 break;
677 }
678 case disk_cache_fuzzer::FuzzCommand::kReadData: {
679 if (open_cache_entries_.empty())
680 continue;
681
682 const disk_cache_fuzzer::ReadData& wd = command.read_data();
683 auto entry_it = GetNextValue(&open_cache_entries_, wd.entry_id());
684 if (!IsValidEntry(&entry_it->second))
685 continue;
686
687 int index = 0; // if it's sparse, these non-sparse aware streams must
688 // read from stream 0 according to the spec.
689 // Implementations might weaker constraints?
690 if (!sparse_entry_tracker_[entry_it->second.entry_ptr])
691 index = wd.index() % kNumStreams;
692 uint32_t offset = wd.offset() % kMaxEntrySize;
693 size_t size = wd.size() % kMaxEntrySize;
694 bool async = wd.async();
695 auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(size);
696
697 net::TestCompletionCallback tcb;
698 net::CompletionOnceCallback cb =
699 !async ? tcb.callback() : GetIOCallback(IOType::ReadData);
700
701 MAYBE_PRINT << "ReadData(\"" << entry_it->second.entry_ptr->GetKey()
702 << "\", index = " << index << ", offset = " << offset
703 << ", size = " << size << ")" << std::flush;
704 int rv = entry_it->second.entry_ptr->ReadData(
705 index, offset, buffer.get(), size, std::move(cb));
706 if (!async)
707 rv = tcb.GetResult(rv);
708 MAYBE_PRINT << " = " << rv << std::endl;
709 break;
710 }
711 case disk_cache_fuzzer::FuzzCommand::kWriteSparseData: {
712 if (open_cache_entries_.empty())
713 continue;
714
715 const disk_cache_fuzzer::WriteSparseData& wsd =
716 command.write_sparse_data();
717 auto entry_it = GetNextValue(&open_cache_entries_, wsd.entry_id());
718 if (!IsValidEntry(&entry_it->second) ||
719 !sparse_entry_tracker_[entry_it->second.entry_ptr])
720 continue;
721
722 uint64_t offset = wsd.offset();
723 if (wsd.cap_offset())
724 offset %= kMaxEntrySize;
725 size_t size = wsd.size() % kMaxEntrySize;
726 bool async = wsd.async();
727
728 net::TestCompletionCallback tcb;
729 net::CompletionOnceCallback cb =
730 !async ? tcb.callback() : GetIOCallback(IOType::WriteSparseData);
731 MAYBE_PRINT << "WriteSparseData(\""
732 << entry_it->second.entry_ptr->GetKey()
733 << "\", offset = " << offset << ", size = " << size << ")"
734 << std::flush;
735 int rv = entry_it->second.entry_ptr->WriteSparseData(
736 offset, init_globals->buffer_.get(), size, std::move(cb));
737 if (!async)
738 rv = tcb.GetResult(rv);
739 MAYBE_PRINT << " = " << rv << std::endl;
740 break;
741 }
742 case disk_cache_fuzzer::FuzzCommand::kReadSparseData: {
743 if (open_cache_entries_.empty())
744 continue;
745
746 const disk_cache_fuzzer::ReadSparseData& rsd =
747 command.read_sparse_data();
748 auto entry_it = GetNextValue(&open_cache_entries_, rsd.entry_id());
749 if (!IsValidEntry(&entry_it->second) ||
750 !sparse_entry_tracker_[entry_it->second.entry_ptr])
751 continue;
752
753 uint64_t offset = rsd.offset();
754 if (rsd.cap_offset())
755 offset %= kMaxEntrySize;
756 size_t size = rsd.size() % kMaxEntrySize;
757 bool async = rsd.async();
758 auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(size);
759
760 net::TestCompletionCallback tcb;
761 net::CompletionOnceCallback cb =
762 !async ? tcb.callback() : GetIOCallback(IOType::ReadSparseData);
763
764 MAYBE_PRINT << "ReadSparseData(\""
765 << entry_it->second.entry_ptr->GetKey()
766 << "\", offset = " << offset << ", size = " << size << ")"
767 << std::flush;
768 int rv = entry_it->second.entry_ptr->ReadSparseData(
769 offset, buffer.get(), size, std::move(cb));
770 if (!async)
771 rv = tcb.GetResult(rv);
772 MAYBE_PRINT << " = " << rv << std::endl;
773 break;
774 }
775 case disk_cache_fuzzer::FuzzCommand::kDoomAllEntries: {
776 if (!cache_)
777 continue;
778 bool async = command.doom_all_entries().async();
779
780 net::TestCompletionCallback tcb;
781 net::CompletionOnceCallback cb =
782 !async ? tcb.callback() : GetIOCallback(IOType::DoomAllEntries);
783 MAYBE_PRINT << "DoomAllEntries()" << std::flush;
784 int rv = cache_->DoomAllEntries(std::move(cb));
785 if (!async)
786 rv = tcb.GetResult(rv);
787 MAYBE_PRINT << " = " << rv << std::endl;
788 break;
789 }
790 case disk_cache_fuzzer::FuzzCommand::kFlushQueueForTest: {
791 // Blockfile-cache specific method.
792 if (!block_impl_)
793 return;
794
795 net::TestCompletionCallback cb;
796 MAYBE_PRINT << "FlushQueueForTest()" << std::endl;
797 int rv = block_impl_->FlushQueueForTest(cb.callback());
798 CHECK_EQ(cb.GetResult(rv), net::OK);
799 break;
800 }
801 case disk_cache_fuzzer::FuzzCommand::kCreateIterator: {
802 if (!cache_)
803 continue;
804 uint64_t it_id = command.create_iterator().it_id();
805 MAYBE_PRINT << "CreateIterator(), id = " << it_id << std::endl;
806 open_iterators_[it_id] = cache_->CreateIterator();
807 break;
808 }
809 case disk_cache_fuzzer::FuzzCommand::kIteratorOpenNextEntry: {
810 const disk_cache_fuzzer::IteratorOpenNextEntry& ione =
811 command.iterator_open_next_entry();
812
813 uint64_t it_id = ione.it_id();
814 uint64_t entry_id = ione.entry_id();
815 bool async = ione.async();
816
817 if (open_iterators_.empty())
818 continue;
819
820 if (open_cache_entries_.find(entry_id) != open_cache_entries_.end())
821 continue; // Don't overwrite a currently
822 // open cache entry.
823
824 auto iterator_it = GetNextValue(&open_iterators_, it_id);
825
826 EntryInfo* entry_info = &open_cache_entries_[entry_id];
827
828 entry_info->tcb = std::make_unique<TestEntryResultCompletionCallback>();
829 disk_cache::EntryResultCallback cb =
830 base::BindOnce(&DiskCacheLPMFuzzer::OpenCacheEntryCallback,
831 base::Unretained(this), entry_id, async, false);
832
833 MAYBE_PRINT << "Iterator(" << ione.it_id()
834 << ").OpenNextEntry() = " << std::flush;
835 disk_cache::EntryResult result =
836 iterator_it->second->OpenNextEntry(std::move(cb));
837 if (!async || result.net_error() != net::ERR_IO_PENDING) {
838 result = WaitOnEntry(entry_info, std::move(result));
839 int rv = result.net_error();
840 entry_info->entry_ptr = result.ReleaseEntry();
841 // Print return value, and key if applicable.
842 if (!entry_info->entry_ptr) {
843 MAYBE_PRINT << rv << std::endl;
844 } else {
845 MAYBE_PRINT << rv << ", key = " << entry_info->entry_ptr->GetKey()
846 << std::endl;
847 }
848 } else {
849 MAYBE_PRINT << "net::ERR_IO_PENDING (async)" << std::endl;
850 }
851 break;
852 }
853 case disk_cache_fuzzer::FuzzCommand::kFastForwardBy: {
854 base::TimeDelta to_wait =
855 base::Milliseconds(command.fast_forward_by().capped_num_millis() %
856 kMaxNumMillisToWait);
857 MAYBE_PRINT << "FastForwardBy(" << to_wait << ")" << std::endl;
858 init_globals->task_environment_->FastForwardBy(to_wait);
859
860 base::Time curr_time = base::Time::Now();
861 saved_times_[command.fast_forward_by().time_id()] = curr_time;
862 // MAYBE_PRINT << "Saved time " << curr_time << std::endl;
863 break;
864 }
865 case disk_cache_fuzzer::FuzzCommand::kDoomEntriesSince: {
866 if (!cache_)
867 continue;
868 // App cache does not keep track of LRU timestamps so this method cannot
869 // be used.
870 if (type == net::APP_CACHE)
871 continue;
872 if (saved_times_.empty())
873 continue;
874
875 const disk_cache_fuzzer::DoomEntriesSince& des =
876 command.doom_entries_since();
877 auto time_it = GetNextValue(&saved_times_, des.time_id());
878 bool async = des.async();
879
880 net::TestCompletionCallback tcb;
881 net::CompletionOnceCallback cb =
882 !async ? tcb.callback() : GetIOCallback(IOType::DoomEntriesSince);
883
884 MAYBE_PRINT << "DoomEntriesSince(" << time_it->second << ")"
885 << std::flush;
886 int rv = cache_->DoomEntriesSince(time_it->second, std::move(cb));
887 if (!async)
888 rv = tcb.GetResult(rv);
889 MAYBE_PRINT << " = " << rv << std::endl;
890 break;
891 }
892 case disk_cache_fuzzer::FuzzCommand::kDoomEntriesBetween: {
893 if (!cache_)
894 continue;
895 // App cache does not keep track of LRU timestamps so this method cannot
896 // be used.
897 if (type == net::APP_CACHE)
898 continue;
899 if (saved_times_.empty())
900 continue;
901
902 const disk_cache_fuzzer::DoomEntriesBetween& deb =
903 command.doom_entries_between();
904 auto time_it1 = GetNextValue(&saved_times_, deb.time_id1());
905 auto time_it2 = GetNextValue(&saved_times_, deb.time_id2());
906 base::Time time1 = time_it1->second;
907 base::Time time2 = time_it2->second;
908 if (time1 > time2)
909 std::swap(time1, time2);
910 bool async = deb.async();
911
912 net::TestCompletionCallback tcb;
913 net::CompletionOnceCallback cb =
914 !async ? tcb.callback() : GetIOCallback(IOType::DoomEntriesBetween);
915
916 MAYBE_PRINT << "DoomEntriesBetween(" << time1 << ", " << time2 << ")"
917 << std::flush;
918 int rv = cache_->DoomEntriesBetween(time1, time2, std::move(cb));
919 if (!async)
920 rv = tcb.GetResult(rv);
921 MAYBE_PRINT << " = " << rv << std::endl;
922 break;
923 }
924 case disk_cache_fuzzer::FuzzCommand::kOnExternalCacheHit: {
925 if (!cache_)
926 continue;
927 if (created_cache_entries_.empty())
928 continue;
929
930 uint64_t key_id = command.on_external_cache_hit().key_id();
931
932 auto key_it = GetNextValue(&created_cache_entries_, key_id);
933 MAYBE_PRINT << "OnExternalCacheHit(\"" << key_it->second << "\")"
934 << std::endl;
935 cache_->OnExternalCacheHit(key_it->second);
936 break;
937 }
938 case disk_cache_fuzzer::FuzzCommand::kTrimForTest: {
939 // Blockfile-cache specific method.
940 if (!block_impl_ || type != net::DISK_CACHE)
941 return;
942
943 MAYBE_PRINT << "TrimForTest()" << std::endl;
944
945 RunTaskForTest(base::BindOnce(&disk_cache::BackendImpl::TrimForTest,
946 base::Unretained(block_impl_),
947 command.trim_for_test().empty()));
948 break;
949 }
950 case disk_cache_fuzzer::FuzzCommand::kTrimDeletedListForTest: {
951 // Blockfile-cache specific method.
952 if (!block_impl_ || type != net::DISK_CACHE)
953 return;
954
955 MAYBE_PRINT << "TrimDeletedListForTest()" << std::endl;
956
957 RunTaskForTest(
958 base::BindOnce(&disk_cache::BackendImpl::TrimDeletedListForTest,
959 base::Unretained(block_impl_),
960 command.trim_deleted_list_for_test().empty()));
961 break;
962 }
963 case disk_cache_fuzzer::FuzzCommand::kGetAvailableRange: {
964 if (open_cache_entries_.empty())
965 continue;
966
967 const disk_cache_fuzzer::GetAvailableRange& gar =
968 command.get_available_range();
969 auto entry_it = GetNextValue(&open_cache_entries_, gar.entry_id());
970 if (!IsValidEntry(&entry_it->second) ||
971 !sparse_entry_tracker_[entry_it->second.entry_ptr])
972 continue;
973
974 disk_cache::Entry* entry = entry_it->second.entry_ptr;
975 uint32_t offset = gar.offset() % kMaxEntrySize;
976 uint32_t len = gar.len() % kMaxEntrySize;
977 bool async = gar.async();
978
979 auto result_checker = base::BindRepeating(
980 [](net::CompletionOnceCallback callback, uint32_t offset,
981 uint32_t len, const disk_cache::RangeResult& result) {
982 std::move(callback).Run(result.net_error);
983
984 if (result.net_error <= 0)
985 return;
986
987 // Make sure that the result is contained in what was
988 // requested. It doesn't have to be the same even if there was
989 // an exact corresponding write, since representation of ranges
990 // may be imprecise, and here we don't know that there was.
991
992 // No overflow thanks to % kMaxEntrySize.
993 net::Interval<uint32_t> requested(offset, offset + len);
994
995 uint32_t range_start, range_end;
996 base::CheckedNumeric<uint64_t> range_start64(result.start);
997 CHECK(range_start64.AssignIfValid(&range_start));
998 base::CheckedNumeric<uint64_t> range_end64 =
999 range_start + result.available_len;
1000 CHECK(range_end64.AssignIfValid(&range_end));
1001 net::Interval<uint32_t> gotten(range_start, range_end);
1002
1003 CHECK(requested.Contains(gotten));
1004 },
1005 GetIOCallback(IOType::GetAvailableRange), offset, len);
1006
1007 TestRangeResultCompletionCallback tcb;
1008 disk_cache::RangeResultCallback cb =
1009 !async ? tcb.callback() : result_checker;
1010
1011 MAYBE_PRINT << "GetAvailableRange(\"" << entry->GetKey() << "\", "
1012 << offset << ", " << len << ")" << std::flush;
1013 disk_cache::RangeResult result =
1014 entry->GetAvailableRange(offset, len, std::move(cb));
1015
1016 if (result.net_error != net::ERR_IO_PENDING) {
1017 // Run the checker callback ourselves.
1018 result_checker.Run(result);
1019 } else if (!async) {
1020 // In this case the callback will be run by the backend, so we don't
1021 // need to do it manually.
1022 result = tcb.GetResult(result);
1023 }
1024
1025 // Finally, take care of printing.
1026 if (async && result.net_error == net::ERR_IO_PENDING) {
1027 MAYBE_PRINT << " = net::ERR_IO_PENDING (async)" << std::endl;
1028 } else {
1029 MAYBE_PRINT << " = " << result.net_error
1030 << ", start = " << result.start
1031 << ", available_len = " << result.available_len;
1032 if (result.net_error < 0) {
1033 MAYBE_PRINT << ", error to string: "
1034 << net::ErrorToShortString(result.net_error)
1035 << std::endl;
1036 } else {
1037 MAYBE_PRINT << std::endl;
1038 }
1039 }
1040 break;
1041 }
1042 case disk_cache_fuzzer::FuzzCommand::kCancelSparseIo: {
1043 if (open_cache_entries_.empty())
1044 continue;
1045
1046 const disk_cache_fuzzer::CancelSparseIO& csio =
1047 command.cancel_sparse_io();
1048 auto entry_it = GetNextValue(&open_cache_entries_, csio.entry_id());
1049 if (!IsValidEntry(&entry_it->second))
1050 continue;
1051
1052 MAYBE_PRINT << "CancelSparseIO(\""
1053 << entry_it->second.entry_ptr->GetKey() << "\")"
1054 << std::endl;
1055 entry_it->second.entry_ptr->CancelSparseIO();
1056 break;
1057 }
1058 case disk_cache_fuzzer::FuzzCommand::kDoomKey: {
1059 if (!cache_)
1060 continue;
1061 if (created_cache_entries_.empty())
1062 continue;
1063
1064 const disk_cache_fuzzer::DoomKey& dk = command.doom_key();
1065 uint64_t key_id = dk.key_id();
1066 net::RequestPriority pri = GetRequestPriority(dk.pri());
1067 bool async = dk.async();
1068
1069 auto key_it = GetNextValue(&created_cache_entries_, key_id);
1070
1071 net::TestCompletionCallback tcb;
1072 net::CompletionOnceCallback cb =
1073 !async ? tcb.callback() : GetIOCallback(IOType::DoomKey);
1074
1075 MAYBE_PRINT << "DoomKey(\"" << key_it->second << "\")" << std::flush;
1076 int rv = cache_->DoomEntry(key_it->second, pri, std::move(cb));
1077 if (!async)
1078 rv = tcb.GetResult(rv);
1079 MAYBE_PRINT << " = " << rv << std::endl;
1080
1081 break;
1082 }
1083 case disk_cache_fuzzer::FuzzCommand::kDestructBackend: {
1084 // Block_impl_ will leak if we destruct the backend without closing
1085 // previous entries.
1086 // TODO(mpdenton) consider creating a separate fuzz target that allows
1087 // closing the |block_impl_| and ignore leaks.
1088 if (block_impl_ || !cache_)
1089 continue;
1090
1091 const disk_cache_fuzzer::DestructBackend& db =
1092 command.destruct_backend();
1093 // Only sometimes actually destruct the backend.
1094 if (!db.actually_destruct1() || !db.actually_destruct2())
1095 continue;
1096
1097 MAYBE_PRINT << "~Backend(). Backend destruction." << std::endl;
1098 cache_.reset();
1099 break;
1100 }
1101 case disk_cache_fuzzer::FuzzCommand::kRecreateWithSize: {
1102 if (!cache_) {
1103 continue;
1104 }
1105 MAYBE_PRINT << "RecreateWithSize("
1106 << command.recreate_with_size().size() << ")" << std::endl;
1107 ShutdownBackend();
1108 // re-create backend with same config but (potentially) different size.
1109 CreateBackend(commands.cache_backend(), mask,
1110 &command.recreate_with_size(), type,
1111 commands.simple_cache_wait_for_index());
1112 break;
1113 }
1114 case disk_cache_fuzzer::FuzzCommand::kAddRealDelay: {
1115 if (!command.add_real_delay().actually_delay())
1116 continue;
1117
1118 MAYBE_PRINT << "AddRealDelay(1ms)" << std::endl;
1119 base::PlatformThread::Sleep(base::Milliseconds(1));
1120 break;
1121 }
1122 case disk_cache_fuzzer::FuzzCommand::FUZZ_COMMAND_ONEOF_NOT_SET: {
1123 continue;
1124 }
1125 }
1126 }
1127 }
1128
ComputeMaxSize(const disk_cache_fuzzer::SetMaxSize * maybe_max_size)1129 int64_t DiskCacheLPMFuzzer::ComputeMaxSize(
1130 const disk_cache_fuzzer::SetMaxSize* maybe_max_size) {
1131 if (!maybe_max_size) {
1132 return 0; // tell backend to use default.
1133 }
1134
1135 int64_t max_size = maybe_max_size->size();
1136 max_size %= kMaxSizeKB;
1137 max_size *= 1024;
1138 MAYBE_PRINT << "ComputeMaxSize(" << max_size << ")" << std::endl;
1139 return max_size;
1140 }
1141
CreateBackend(disk_cache_fuzzer::FuzzCommands::CacheBackend cache_backend,uint32_t mask,const disk_cache_fuzzer::SetMaxSize * maybe_max_size,net::CacheType type,bool simple_cache_wait_for_index)1142 void DiskCacheLPMFuzzer::CreateBackend(
1143 disk_cache_fuzzer::FuzzCommands::CacheBackend cache_backend,
1144 uint32_t mask,
1145 const disk_cache_fuzzer::SetMaxSize* maybe_max_size,
1146 net::CacheType type,
1147 bool simple_cache_wait_for_index) {
1148 scoped_refptr<disk_cache::BackendCleanupTracker> cleanup_tracker;
1149
1150 if (cache_backend != disk_cache_fuzzer::FuzzCommands::IN_MEMORY) {
1151 // Make sure nothing is still messing with the directory.
1152 int count = 0;
1153 while (true) {
1154 ++count;
1155 CHECK_LT(count, 1000);
1156
1157 base::RunLoop run_dir_ready;
1158 cleanup_tracker = disk_cache::BackendCleanupTracker::TryCreate(
1159 cache_path_, run_dir_ready.QuitClosure());
1160 if (cleanup_tracker) {
1161 break;
1162 } else {
1163 run_dir_ready.Run();
1164 }
1165 }
1166 }
1167
1168 if (cache_backend == disk_cache_fuzzer::FuzzCommands::IN_MEMORY) {
1169 MAYBE_PRINT << "Using in-memory cache." << std::endl;
1170 auto cache = disk_cache::MemBackendImpl::CreateBackend(
1171 ComputeMaxSize(maybe_max_size), /*net_log=*/nullptr);
1172 mem_cache_ = cache.get();
1173 cache_ = std::move(cache);
1174 CHECK(cache_);
1175 } else if (cache_backend == disk_cache_fuzzer::FuzzCommands::SIMPLE) {
1176 MAYBE_PRINT << "Using simple cache." << std::endl;
1177 net::TestCompletionCallback cb;
1178 // We limit ourselves to 64 fds since OS X by default gives us 256.
1179 // (Chrome raises the number on startup, but the fuzzer doesn't).
1180 if (!simple_file_tracker_)
1181 simple_file_tracker_ =
1182 std::make_unique<disk_cache::SimpleFileTracker>(kMaxFdsSimpleCache);
1183 auto simple_backend = std::make_unique<disk_cache::SimpleBackendImpl>(
1184 /*file_operations=*/nullptr, cache_path_, std::move(cleanup_tracker),
1185 simple_file_tracker_.get(), ComputeMaxSize(maybe_max_size), type,
1186 /*net_log=*/nullptr);
1187 simple_backend->Init(cb.callback());
1188 CHECK_EQ(cb.WaitForResult(), net::OK);
1189 simple_cache_impl_ = simple_backend.get();
1190 cache_ = std::move(simple_backend);
1191
1192 if (simple_cache_wait_for_index) {
1193 MAYBE_PRINT << "Waiting for simple cache index to be ready..."
1194 << std::endl;
1195 net::TestCompletionCallback wait_for_index_cb;
1196 simple_cache_impl_->index()->ExecuteWhenReady(
1197 wait_for_index_cb.callback());
1198 int rv = wait_for_index_cb.WaitForResult();
1199 CHECK_EQ(rv, net::OK);
1200 }
1201 } else {
1202 MAYBE_PRINT << "Using blockfile cache";
1203 std::unique_ptr<disk_cache::BackendImpl> cache;
1204 if (mask) {
1205 MAYBE_PRINT << ", mask = " << mask << std::endl;
1206 cache = std::make_unique<disk_cache::BackendImpl>(
1207 cache_path_, mask,
1208 /* cleanup_tracker = */ std::move(cleanup_tracker),
1209 /* runner = */ nullptr, type,
1210 /* net_log = */ nullptr);
1211 } else {
1212 MAYBE_PRINT << "." << std::endl;
1213 cache = std::make_unique<disk_cache::BackendImpl>(
1214 cache_path_,
1215 /* cleanup_tracker = */ std::move(cleanup_tracker),
1216 /* runner = */ nullptr, type,
1217 /* net_log = */ nullptr);
1218 }
1219 cache->SetMaxSize(ComputeMaxSize(maybe_max_size));
1220 block_impl_ = cache.get();
1221 cache_ = std::move(cache);
1222 CHECK(cache_);
1223 // TODO(mpdenton) kNoRandom or not? It does a lot of waiting for IO. May be
1224 // good for avoiding leaks but tests a less realistic cache.
1225 // block_impl_->SetFlags(disk_cache::kNoRandom);
1226
1227 // TODO(mpdenton) should I always wait here?
1228 net::TestCompletionCallback cb;
1229 block_impl_->Init(cb.callback());
1230 CHECK_EQ(cb.WaitForResult(), net::OK);
1231 }
1232 }
1233
CloseAllRemainingEntries()1234 void DiskCacheLPMFuzzer::CloseAllRemainingEntries() {
1235 for (auto& entry_info : open_cache_entries_) {
1236 disk_cache::Entry** entry_ptr = &entry_info.second.entry_ptr;
1237 if (!*entry_ptr)
1238 continue;
1239 MAYBE_PRINT << "Destructor CloseEntry(\"" << (*entry_ptr)->GetKey() << "\")"
1240 << std::endl;
1241 (*entry_ptr)->Close();
1242 *entry_ptr = nullptr;
1243 }
1244 }
1245
ShutdownBackend()1246 void DiskCacheLPMFuzzer::ShutdownBackend() {
1247 // |block_impl_| leaks a lot more if we don't close entries before destructing
1248 // the backend.
1249 if (block_impl_) {
1250 // TODO(mpdenton) Consider creating a fuzz target that does not wait for
1251 // blockfile, and also does not detect leaks.
1252
1253 // Because the blockfile backend will leak any entries closed after its
1254 // destruction, we need to wait for any remaining backend callbacks to
1255 // finish. Otherwise, there will always be a race between handling callbacks
1256 // with RunUntilIdle() and actually closing all of the remaining entries.
1257 // And, closing entries after destructing the backend will not work and
1258 // cause leaks.
1259 for (auto& entry_it : open_cache_entries_) {
1260 if (entry_it.second.tcb) {
1261 WaitOnEntry(&entry_it.second);
1262 }
1263 }
1264
1265 // Destroy any open iterators before destructing the backend so we don't
1266 // cause leaks. TODO(mpdenton) should maybe be documented?
1267 // Also *must* happen after waiting for all OpenNextEntry callbacks to
1268 // finish, because destructing the iterators may cause those callbacks to be
1269 // cancelled, which will cause WaitOnEntry() to spin forever waiting.
1270 // TODO(mpdenton) should also be documented?
1271 open_iterators_.clear();
1272 // Just in case, finish any callbacks.
1273 init_globals->task_environment_->RunUntilIdle();
1274 // Close all entries that haven't been closed yet.
1275 CloseAllRemainingEntries();
1276 // Destroy the backend.
1277 cache_.reset();
1278 } else {
1279 // Here we won't bother with waiting for our OpenEntry* callbacks.
1280 cache_.reset();
1281 // Finish any callbacks that came in before backend destruction.
1282 init_globals->task_environment_->RunUntilIdle();
1283 // Close all entries that haven't been closed yet.
1284 CloseAllRemainingEntries();
1285 }
1286
1287 // Make sure any tasks triggered by the CloseEntry's have run.
1288 init_globals->task_environment_->RunUntilIdle();
1289 if (simple_cache_impl_)
1290 CHECK(simple_file_tracker_->IsEmptyForTesting());
1291 base::RunLoop().RunUntilIdle();
1292 }
1293
~DiskCacheLPMFuzzer()1294 DiskCacheLPMFuzzer::~DiskCacheLPMFuzzer() {
1295 ShutdownBackend();
1296
1297 DeleteCache(cache_path_);
1298 }
1299
DEFINE_BINARY_PROTO_FUZZER(const disk_cache_fuzzer::FuzzCommands & commands)1300 DEFINE_BINARY_PROTO_FUZZER(const disk_cache_fuzzer::FuzzCommands& commands) {
1301 {
1302 DiskCacheLPMFuzzer disk_cache_fuzzer_instance;
1303 disk_cache_fuzzer_instance.RunCommands(commands);
1304 }
1305 MAYBE_PRINT << "-----------------------" << std::endl;
1306 }
1307 //
1308