• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2012 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifdef UNSAFE_BUFFERS_BUILD
6 // TODO(crbug.com/40284755): Remove this and spanify to fix the errors.
7 #pragma allow_unsafe_buffers
8 #endif
9 
10 #include "net/http/mock_http_cache.h"
11 
12 #include <algorithm>
13 #include <limits>
14 #include <memory>
15 #include <utility>
16 
17 #include "base/feature_list.h"
18 #include "base/functional/bind.h"
19 #include "base/functional/callback.h"
20 #include "base/functional/callback_helpers.h"
21 #include "base/location.h"
22 #include "base/memory/raw_ptr.h"
23 #include "base/task/single_thread_task_runner.h"
24 #include "net/base/features.h"
25 #include "net/base/net_errors.h"
26 #include "net/disk_cache/disk_cache_test_util.h"
27 #include "net/http/http_cache_writers.h"
28 #include "testing/gtest/include/gtest/gtest.h"
29 
30 namespace net {
31 
32 namespace {
33 
34 // During testing, we are going to limit the size of a cache entry to this many
35 // bytes using DCHECKs in order to prevent a test from causing unbounded memory
36 // growth. In practice cache entry shouldn't come anywhere near this limit for
37 // tests that use the mock cache. If they do, that's likely a problem with the
38 // test. If a test requires using massive cache entries, they should use a real
39 // cache backend instead.
40 const int kMaxMockCacheEntrySize = 100 * 1000 * 1000;
41 
42 // We can override the test mode for a given operation by setting this global
43 // variable.
44 int g_test_mode = 0;
45 
GetTestModeForEntry(const std::string & key)46 int GetTestModeForEntry(const std::string& key) {
47   GURL url(HttpCache::GetResourceURLFromHttpCacheKey(key));
48   const MockTransaction* t = FindMockTransaction(url);
49   DCHECK(t);
50   return t->test_mode;
51 }
52 
53 }  // namespace
54 
55 //-----------------------------------------------------------------------------
56 
57 struct MockDiskEntry::CallbackInfo {
58   scoped_refptr<MockDiskEntry> entry;
59   base::OnceClosure callback;
60 };
61 
MockDiskEntry(const std::string & key)62 MockDiskEntry::MockDiskEntry(const std::string& key)
63     : key_(key), max_file_size_(std::numeric_limits<int>::max()) {
64   test_mode_ = GetTestModeForEntry(key);
65 }
66 
Doom()67 void MockDiskEntry::Doom() {
68   doomed_ = true;
69 }
70 
Close()71 void MockDiskEntry::Close() {
72   Release();
73 }
74 
GetKey() const75 std::string MockDiskEntry::GetKey() const {
76   return key_;
77 }
78 
GetLastUsed() const79 base::Time MockDiskEntry::GetLastUsed() const {
80   return base::Time::Now();
81 }
82 
GetLastModified() const83 base::Time MockDiskEntry::GetLastModified() const {
84   return base::Time::Now();
85 }
86 
GetDataSize(int index) const87 int32_t MockDiskEntry::GetDataSize(int index) const {
88   DCHECK(index >= 0 && index < kNumCacheEntryDataIndices);
89   return static_cast<int32_t>(data_[index].size());
90 }
91 
ReadData(int index,int offset,IOBuffer * buf,int buf_len,CompletionOnceCallback callback)92 int MockDiskEntry::ReadData(int index,
93                             int offset,
94                             IOBuffer* buf,
95                             int buf_len,
96                             CompletionOnceCallback callback) {
97   DCHECK(index >= 0 && index < kNumCacheEntryDataIndices);
98   DCHECK(!callback.is_null());
99 
100   if (fail_requests_ & FAIL_READ) {
101     return ERR_CACHE_READ_FAILURE;
102   }
103 
104   if (offset < 0 || offset > static_cast<int>(data_[index].size())) {
105     return ERR_FAILED;
106   }
107   if (static_cast<size_t>(offset) == data_[index].size()) {
108     return 0;
109   }
110 
111   int num = std::min(buf_len, static_cast<int>(data_[index].size()) - offset);
112   memcpy(buf->data(), &data_[index][offset], num);
113 
114   if (MockHttpCache::GetTestMode(test_mode_) & TEST_MODE_SYNC_CACHE_READ) {
115     return num;
116   }
117 
118   // Pause and resume.
119   if (defer_op_ == DEFER_READ) {
120     defer_op_ = DEFER_NONE;
121     resume_callback_ = std::move(callback);
122     resume_return_code_ = num;
123     return ERR_IO_PENDING;
124   }
125 
126   CallbackLater(std::move(callback), num);
127   return ERR_IO_PENDING;
128 }
129 
ResumeDiskEntryOperation()130 void MockDiskEntry::ResumeDiskEntryOperation() {
131   DCHECK(!resume_callback_.is_null());
132   CallbackLater(std::move(resume_callback_), resume_return_code_);
133   resume_return_code_ = 0;
134 }
135 
WriteData(int index,int offset,IOBuffer * buf,int buf_len,CompletionOnceCallback callback,bool truncate)136 int MockDiskEntry::WriteData(int index,
137                              int offset,
138                              IOBuffer* buf,
139                              int buf_len,
140                              CompletionOnceCallback callback,
141                              bool truncate) {
142   DCHECK(index >= 0 && index < kNumCacheEntryDataIndices);
143   DCHECK(!callback.is_null());
144   DCHECK(truncate);
145 
146   if (fail_requests_ & FAIL_WRITE) {
147     CallbackLater(std::move(callback), ERR_CACHE_READ_FAILURE);
148     return ERR_IO_PENDING;
149   }
150 
151   if (offset < 0 || offset > static_cast<int>(data_[index].size())) {
152     return ERR_FAILED;
153   }
154 
155   DCHECK_LT(offset + buf_len, kMaxMockCacheEntrySize);
156   if (offset + buf_len > max_file_size_ && index == 1) {
157     return ERR_FAILED;
158   }
159 
160   data_[index].resize(offset + buf_len);
161   if (buf_len) {
162     memcpy(&data_[index][offset], buf->data(), buf_len);
163   }
164 
165   if (MockHttpCache::GetTestMode(test_mode_) & TEST_MODE_SYNC_CACHE_WRITE) {
166     return buf_len;
167   }
168 
169   if (defer_op_ == DEFER_WRITE) {
170     defer_op_ = DEFER_NONE;
171     resume_callback_ = std::move(callback);
172     resume_return_code_ = buf_len;
173     return ERR_IO_PENDING;
174   }
175 
176   CallbackLater(std::move(callback), buf_len);
177   return ERR_IO_PENDING;
178 }
179 
ReadSparseData(int64_t offset,IOBuffer * buf,int buf_len,CompletionOnceCallback callback)180 int MockDiskEntry::ReadSparseData(int64_t offset,
181                                   IOBuffer* buf,
182                                   int buf_len,
183                                   CompletionOnceCallback callback) {
184   DCHECK(!callback.is_null());
185   if (fail_sparse_requests_) {
186     return ERR_NOT_IMPLEMENTED;
187   }
188   if (!sparse_ || busy_ || cancel_) {
189     return ERR_CACHE_OPERATION_NOT_SUPPORTED;
190   }
191   if (offset < 0) {
192     return ERR_FAILED;
193   }
194 
195   if (fail_requests_ & FAIL_READ_SPARSE) {
196     return ERR_CACHE_READ_FAILURE;
197   }
198 
199   DCHECK(offset < std::numeric_limits<int32_t>::max());
200   int real_offset = static_cast<int>(offset);
201   if (!buf_len) {
202     return 0;
203   }
204 
205   int num = std::min(static_cast<int>(data_[1].size()) - real_offset, buf_len);
206   memcpy(buf->data(), &data_[1][real_offset], num);
207 
208   if (MockHttpCache::GetTestMode(test_mode_) & TEST_MODE_SYNC_CACHE_READ) {
209     return num;
210   }
211 
212   CallbackLater(std::move(callback), num);
213   busy_ = true;
214   delayed_ = false;
215   return ERR_IO_PENDING;
216 }
217 
WriteSparseData(int64_t offset,IOBuffer * buf,int buf_len,CompletionOnceCallback callback)218 int MockDiskEntry::WriteSparseData(int64_t offset,
219                                    IOBuffer* buf,
220                                    int buf_len,
221                                    CompletionOnceCallback callback) {
222   DCHECK(!callback.is_null());
223   if (fail_sparse_requests_) {
224     return ERR_NOT_IMPLEMENTED;
225   }
226   if (busy_ || cancel_) {
227     return ERR_CACHE_OPERATION_NOT_SUPPORTED;
228   }
229   if (!sparse_) {
230     if (data_[1].size()) {
231       return ERR_CACHE_OPERATION_NOT_SUPPORTED;
232     }
233     sparse_ = true;
234   }
235   if (offset < 0) {
236     return ERR_FAILED;
237   }
238   if (!buf_len) {
239     return 0;
240   }
241 
242   if (fail_requests_ & FAIL_WRITE_SPARSE) {
243     return ERR_CACHE_READ_FAILURE;
244   }
245 
246   DCHECK(offset < std::numeric_limits<int32_t>::max());
247   int real_offset = static_cast<int>(offset);
248 
249   if (static_cast<int>(data_[1].size()) < real_offset + buf_len) {
250     DCHECK_LT(real_offset + buf_len, kMaxMockCacheEntrySize);
251     data_[1].resize(real_offset + buf_len);
252   }
253 
254   memcpy(&data_[1][real_offset], buf->data(), buf_len);
255   if (MockHttpCache::GetTestMode(test_mode_) & TEST_MODE_SYNC_CACHE_WRITE) {
256     return buf_len;
257   }
258 
259   CallbackLater(std::move(callback), buf_len);
260   return ERR_IO_PENDING;
261 }
262 
GetAvailableRange(int64_t offset,int len,RangeResultCallback callback)263 disk_cache::RangeResult MockDiskEntry::GetAvailableRange(
264     int64_t offset,
265     int len,
266     RangeResultCallback callback) {
267   DCHECK(!callback.is_null());
268   if (!sparse_ || busy_ || cancel_) {
269     return RangeResult(ERR_CACHE_OPERATION_NOT_SUPPORTED);
270   }
271   if (offset < 0) {
272     return RangeResult(ERR_FAILED);
273   }
274 
275   if (fail_requests_ & FAIL_GET_AVAILABLE_RANGE) {
276     return RangeResult(ERR_CACHE_READ_FAILURE);
277   }
278 
279   RangeResult result;
280   result.net_error = OK;
281   result.start = offset;
282   result.available_len = 0;
283   DCHECK(offset < std::numeric_limits<int32_t>::max());
284   int real_offset = static_cast<int>(offset);
285   if (static_cast<int>(data_[1].size()) < real_offset) {
286     return result;
287   }
288 
289   int num = std::min(static_cast<int>(data_[1].size()) - real_offset, len);
290   for (; num > 0; num--, real_offset++) {
291     if (!result.available_len) {
292       if (data_[1][real_offset]) {
293         result.available_len++;
294         result.start = real_offset;
295       }
296     } else {
297       if (!data_[1][real_offset]) {
298         break;
299       }
300       result.available_len++;
301     }
302   }
303   if (MockHttpCache::GetTestMode(test_mode_) & TEST_MODE_SYNC_CACHE_WRITE) {
304     return result;
305   }
306 
307   CallbackLater(base::BindOnce(std::move(callback), result));
308   return RangeResult(ERR_IO_PENDING);
309 }
310 
CouldBeSparse() const311 bool MockDiskEntry::CouldBeSparse() const {
312   if (fail_sparse_requests_) {
313     return false;
314   }
315   return sparse_;
316 }
317 
CancelSparseIO()318 void MockDiskEntry::CancelSparseIO() {
319   cancel_ = true;
320 }
321 
ReadyForSparseIO(CompletionOnceCallback callback)322 Error MockDiskEntry::ReadyForSparseIO(CompletionOnceCallback callback) {
323   if (fail_sparse_requests_) {
324     return ERR_NOT_IMPLEMENTED;
325   }
326   if (!cancel_) {
327     return OK;
328   }
329 
330   cancel_ = false;
331   DCHECK(!callback.is_null());
332   if (MockHttpCache::GetTestMode(test_mode_) & TEST_MODE_SYNC_CACHE_READ) {
333     return OK;
334   }
335 
336   // The pending operation is already in the message loop (and hopefully
337   // already in the second pass).  Just notify the caller that it finished.
338   CallbackLater(std::move(callback), 0);
339   return ERR_IO_PENDING;
340 }
341 
SetLastUsedTimeForTest(base::Time time)342 void MockDiskEntry::SetLastUsedTimeForTest(base::Time time) {
343   NOTREACHED();
344 }
345 
346 // If |value| is true, don't deliver any completion callbacks until called
347 // again with |value| set to false.  Caution: remember to enable callbacks
348 // again or all subsequent tests will fail.
349 // Static.
IgnoreCallbacks(bool value)350 void MockDiskEntry::IgnoreCallbacks(bool value) {
351   if (ignore_callbacks_ == value) {
352     return;
353   }
354   ignore_callbacks_ = value;
355   if (!value) {
356     StoreAndDeliverCallbacks(false, nullptr, base::OnceClosure());
357   }
358 }
359 
360 MockDiskEntry::~MockDiskEntry() = default;
361 
362 // Unlike the callbacks for MockHttpTransaction, we want this one to run even
363 // if the consumer called Close on the MockDiskEntry.  We achieve that by
364 // leveraging the fact that this class is reference counted.
CallbackLater(base::OnceClosure callback)365 void MockDiskEntry::CallbackLater(base::OnceClosure callback) {
366   if (ignore_callbacks_) {
367     return StoreAndDeliverCallbacks(true, this, std::move(callback));
368   }
369   base::SingleThreadTaskRunner::GetCurrentDefault()->PostTask(
370       FROM_HERE,
371       base::BindOnce(&MockDiskEntry::RunCallback, this, std::move(callback)));
372 }
373 
CallbackLater(CompletionOnceCallback callback,int result)374 void MockDiskEntry::CallbackLater(CompletionOnceCallback callback, int result) {
375   CallbackLater(base::BindOnce(std::move(callback), result));
376 }
377 
RunCallback(base::OnceClosure callback)378 void MockDiskEntry::RunCallback(base::OnceClosure callback) {
379   if (busy_) {
380     // This is kind of hacky, but controlling the behavior of just this entry
381     // from a test is sort of complicated.  What we really want to do is
382     // delay the delivery of a sparse IO operation a little more so that the
383     // request start operation (async) will finish without seeing the end of
384     // this operation (already posted to the message loop)... and without
385     // just delaying for n mS (which may cause trouble with slow bots).  So
386     // we re-post this operation (all async sparse IO operations will take two
387     // trips through the message loop instead of one).
388     if (!delayed_) {
389       delayed_ = true;
390       return CallbackLater(std::move(callback));
391     }
392   }
393   busy_ = false;
394   std::move(callback).Run();
395 }
396 
397 // When |store| is true, stores the callback to be delivered later; otherwise
398 // delivers any callback previously stored.
399 // Static.
StoreAndDeliverCallbacks(bool store,MockDiskEntry * entry,base::OnceClosure callback)400 void MockDiskEntry::StoreAndDeliverCallbacks(bool store,
401                                              MockDiskEntry* entry,
402                                              base::OnceClosure callback) {
403   static std::vector<CallbackInfo> callback_list;
404   if (store) {
405     CallbackInfo c = {entry, std::move(callback)};
406     callback_list.push_back(std::move(c));
407   } else {
408     for (auto& callback_info : callback_list) {
409       callback_info.entry->CallbackLater(std::move(callback_info.callback));
410     }
411     callback_list.clear();
412   }
413 }
414 
415 // Statics.
416 bool MockDiskEntry::ignore_callbacks_ = false;
417 
418 //-----------------------------------------------------------------------------
419 
MockDiskCache()420 MockDiskCache::MockDiskCache()
421     : Backend(DISK_CACHE), max_file_size_(std::numeric_limits<int>::max()) {}
422 
~MockDiskCache()423 MockDiskCache::~MockDiskCache() {
424   ReleaseAll();
425 }
426 
GetEntryCount() const427 int32_t MockDiskCache::GetEntryCount() const {
428   return static_cast<int32_t>(entries_.size());
429 }
430 
OpenOrCreateEntry(const std::string & key,RequestPriority request_priority,EntryResultCallback callback)431 disk_cache::EntryResult MockDiskCache::OpenOrCreateEntry(
432     const std::string& key,
433     RequestPriority request_priority,
434     EntryResultCallback callback) {
435   DCHECK(!callback.is_null());
436 
437   if (force_fail_callback_later_) {
438     CallbackLater(base::BindOnce(
439         std::move(callback),
440         EntryResult::MakeError(ERR_CACHE_OPEN_OR_CREATE_FAILURE)));
441     return EntryResult::MakeError(ERR_IO_PENDING);
442   }
443 
444   if (fail_requests_) {
445     return EntryResult::MakeError(ERR_CACHE_OPEN_OR_CREATE_FAILURE);
446   }
447 
448   EntryResult result;
449 
450   // First try opening the entry.
451   auto split_callback = base::SplitOnceCallback(std::move(callback));
452   result = OpenEntry(key, request_priority, std::move(split_callback.first));
453   if (result.net_error() == OK || result.net_error() == ERR_IO_PENDING) {
454     return result;
455   }
456 
457   // Unable to open, try creating the entry.
458   result = CreateEntry(key, request_priority, std::move(split_callback.second));
459   if (result.net_error() == OK || result.net_error() == ERR_IO_PENDING) {
460     return result;
461   }
462 
463   return EntryResult::MakeError(ERR_CACHE_OPEN_OR_CREATE_FAILURE);
464 }
465 
OpenEntry(const std::string & key,RequestPriority request_priority,EntryResultCallback callback)466 disk_cache::EntryResult MockDiskCache::OpenEntry(
467     const std::string& key,
468     RequestPriority request_priority,
469     EntryResultCallback callback) {
470   DCHECK(!callback.is_null());
471   if (force_fail_callback_later_) {
472     CallbackLater(base::BindOnce(
473         std::move(callback), EntryResult::MakeError(ERR_CACHE_OPEN_FAILURE)));
474     return EntryResult::MakeError(ERR_IO_PENDING);
475   }
476 
477   if (fail_requests_) {
478     return EntryResult::MakeError(ERR_CACHE_OPEN_FAILURE);
479   }
480 
481   auto it = entries_.find(key);
482   if (it == entries_.end()) {
483     return EntryResult::MakeError(ERR_CACHE_OPEN_FAILURE);
484   }
485 
486   if (it->second->is_doomed()) {
487     it->second->Release();
488     entries_.erase(it);
489     return EntryResult::MakeError(ERR_CACHE_OPEN_FAILURE);
490   }
491 
492   open_count_++;
493 
494   MockDiskEntry* entry = it->second;
495   entry->AddRef();
496 
497   if (soft_failures_ || soft_failures_one_instance_) {
498     entry->set_fail_requests(soft_failures_ | soft_failures_one_instance_);
499     soft_failures_one_instance_ = 0;
500   }
501 
502   entry->set_max_file_size(max_file_size_);
503 
504   EntryResult result = EntryResult::MakeOpened(entry);
505   if (GetTestModeForEntry(key) & TEST_MODE_SYNC_CACHE_START) {
506     return result;
507   }
508 
509   CallbackLater(base::BindOnce(std::move(callback), std::move(result)));
510   return EntryResult::MakeError(ERR_IO_PENDING);
511 }
512 
CreateEntry(const std::string & key,RequestPriority request_priority,EntryResultCallback callback)513 disk_cache::EntryResult MockDiskCache::CreateEntry(
514     const std::string& key,
515     RequestPriority request_priority,
516     EntryResultCallback callback) {
517   DCHECK(!callback.is_null());
518   if (force_fail_callback_later_) {
519     CallbackLater(base::BindOnce(
520         std::move(callback), EntryResult::MakeError(ERR_CACHE_CREATE_FAILURE)));
521     return EntryResult::MakeError(ERR_IO_PENDING);
522   }
523 
524   if (fail_requests_) {
525     return EntryResult::MakeError(ERR_CACHE_CREATE_FAILURE);
526   }
527 
528   auto it = entries_.find(key);
529   if (it != entries_.end()) {
530     if (!it->second->is_doomed()) {
531       if (double_create_check_) {
532         NOTREACHED();
533       } else {
534         return EntryResult::MakeError(ERR_CACHE_CREATE_FAILURE);
535       }
536     }
537     it->second->Release();
538     entries_.erase(it);
539   }
540 
541   create_count_++;
542 
543   MockDiskEntry* new_entry = new MockDiskEntry(key);
544 
545   new_entry->AddRef();
546   entries_[key] = new_entry;
547 
548   new_entry->AddRef();
549 
550   if (soft_failures_ || soft_failures_one_instance_) {
551     new_entry->set_fail_requests(soft_failures_ | soft_failures_one_instance_);
552     soft_failures_one_instance_ = 0;
553   }
554 
555   if (fail_sparse_requests_) {
556     new_entry->set_fail_sparse_requests();
557   }
558 
559   new_entry->set_max_file_size(max_file_size_);
560 
561   EntryResult result = EntryResult::MakeCreated(new_entry);
562   if (GetTestModeForEntry(key) & TEST_MODE_SYNC_CACHE_START) {
563     return result;
564   }
565 
566   // Pause and resume.
567   if (defer_op_ == MockDiskEntry::DEFER_CREATE) {
568     defer_op_ = MockDiskEntry::DEFER_NONE;
569     resume_callback_ = base::BindOnce(std::move(callback), std::move(result));
570     return EntryResult::MakeError(ERR_IO_PENDING);
571   }
572 
573   CallbackLater(base::BindOnce(std::move(callback), std::move(result)));
574   return EntryResult::MakeError(ERR_IO_PENDING);
575 }
576 
DoomEntry(const std::string & key,RequestPriority request_priority,CompletionOnceCallback callback)577 Error MockDiskCache::DoomEntry(const std::string& key,
578                                RequestPriority request_priority,
579                                CompletionOnceCallback callback) {
580   DCHECK(!callback.is_null());
581   if (force_fail_callback_later_) {
582     CallbackLater(base::BindOnce(std::move(callback), ERR_CACHE_DOOM_FAILURE));
583     return ERR_IO_PENDING;
584   }
585 
586   if (fail_requests_) {
587     return ERR_CACHE_DOOM_FAILURE;
588   }
589 
590   auto it = entries_.find(key);
591   if (it != entries_.end()) {
592     it->second->Release();
593     entries_.erase(it);
594     doomed_count_++;
595   }
596 
597   if (GetTestModeForEntry(key) & TEST_MODE_SYNC_CACHE_START) {
598     return OK;
599   }
600 
601   CallbackLater(base::BindOnce(std::move(callback), OK));
602   return ERR_IO_PENDING;
603 }
604 
DoomAllEntries(CompletionOnceCallback callback)605 Error MockDiskCache::DoomAllEntries(CompletionOnceCallback callback) {
606   return ERR_NOT_IMPLEMENTED;
607 }
608 
DoomEntriesBetween(const base::Time initial_time,const base::Time end_time,CompletionOnceCallback callback)609 Error MockDiskCache::DoomEntriesBetween(const base::Time initial_time,
610                                         const base::Time end_time,
611                                         CompletionOnceCallback callback) {
612   return ERR_NOT_IMPLEMENTED;
613 }
614 
DoomEntriesSince(const base::Time initial_time,CompletionOnceCallback callback)615 Error MockDiskCache::DoomEntriesSince(const base::Time initial_time,
616                                       CompletionOnceCallback callback) {
617   return ERR_NOT_IMPLEMENTED;
618 }
619 
CalculateSizeOfAllEntries(Int64CompletionOnceCallback callback)620 int64_t MockDiskCache::CalculateSizeOfAllEntries(
621     Int64CompletionOnceCallback callback) {
622   return ERR_NOT_IMPLEMENTED;
623 }
624 
625 class MockDiskCache::NotImplementedIterator : public Iterator {
626  public:
OpenNextEntry(EntryResultCallback callback)627   EntryResult OpenNextEntry(EntryResultCallback callback) override {
628     return EntryResult::MakeError(ERR_NOT_IMPLEMENTED);
629   }
630 };
631 
CreateIterator()632 std::unique_ptr<disk_cache::Backend::Iterator> MockDiskCache::CreateIterator() {
633   return std::make_unique<NotImplementedIterator>();
634 }
635 
GetStats(base::StringPairs * stats)636 void MockDiskCache::GetStats(base::StringPairs* stats) {}
637 
OnExternalCacheHit(const std::string & key)638 void MockDiskCache::OnExternalCacheHit(const std::string& key) {
639   external_cache_hits_.push_back(key);
640 }
641 
GetEntryInMemoryData(const std::string & key)642 uint8_t MockDiskCache::GetEntryInMemoryData(const std::string& key) {
643   if (!support_in_memory_entry_data_) {
644     return 0;
645   }
646 
647   auto it = entries_.find(key);
648   if (it != entries_.end()) {
649     return it->second->in_memory_data();
650   }
651   return 0;
652 }
653 
SetEntryInMemoryData(const std::string & key,uint8_t data)654 void MockDiskCache::SetEntryInMemoryData(const std::string& key, uint8_t data) {
655   auto it = entries_.find(key);
656   if (it != entries_.end()) {
657     it->second->set_in_memory_data(data);
658   }
659 }
660 
MaxFileSize() const661 int64_t MockDiskCache::MaxFileSize() const {
662   return max_file_size_;
663 }
664 
ReleaseAll()665 void MockDiskCache::ReleaseAll() {
666   for (auto entry : entries_) {
667     entry.second->Release();
668   }
669   entries_.clear();
670 }
671 
CallbackLater(base::OnceClosure callback)672 void MockDiskCache::CallbackLater(base::OnceClosure callback) {
673   base::SingleThreadTaskRunner::GetCurrentDefault()->PostTask(
674       FROM_HERE, std::move(callback));
675 }
676 
IsDiskEntryDoomed(const std::string & key)677 bool MockDiskCache::IsDiskEntryDoomed(const std::string& key) {
678   auto it = entries_.find(key);
679   if (it != entries_.end()) {
680     return it->second->is_doomed();
681   }
682 
683   return false;
684 }
685 
ResumeCacheOperation()686 void MockDiskCache::ResumeCacheOperation() {
687   DCHECK(!resume_callback_.is_null());
688   CallbackLater(std::move(resume_callback_));
689 }
690 
GetDiskEntryRef(const std::string & key)691 scoped_refptr<MockDiskEntry> MockDiskCache::GetDiskEntryRef(
692     const std::string& key) {
693   auto it = entries_.find(key);
694   if (it == entries_.end()) {
695     return nullptr;
696   }
697   return it->second.get();
698 }
699 
GetExternalCacheHits() const700 const std::vector<std::string>& MockDiskCache::GetExternalCacheHits() const {
701   return external_cache_hits_;
702 }
703 
704 //-----------------------------------------------------------------------------
705 
CreateBackend(NetLog * net_log,disk_cache::BackendResultCallback callback)706 disk_cache::BackendResult MockBackendFactory::CreateBackend(
707     NetLog* net_log,
708     disk_cache::BackendResultCallback callback) {
709   return disk_cache::BackendResult::Make(std::make_unique<MockDiskCache>());
710 }
711 
712 //-----------------------------------------------------------------------------
713 
MockHttpCache()714 MockHttpCache::MockHttpCache()
715     : MockHttpCache(std::make_unique<MockBackendFactory>()) {}
716 
MockHttpCache(std::unique_ptr<HttpCache::BackendFactory> disk_cache_factory)717 MockHttpCache::MockHttpCache(
718     std::unique_ptr<HttpCache::BackendFactory> disk_cache_factory)
719     : http_cache_(std::make_unique<MockNetworkLayer>(),
720                   std::move(disk_cache_factory)) {}
721 
backend()722 disk_cache::Backend* MockHttpCache::backend() {
723   TestGetBackendCompletionCallback cb;
724   HttpCache::GetBackendResult result = http_cache_.GetBackend(cb.callback());
725   result = cb.GetResult(result);
726   return (result.first == OK) ? result.second : nullptr;
727 }
728 
disk_cache()729 MockDiskCache* MockHttpCache::disk_cache() {
730   return static_cast<MockDiskCache*>(backend());
731 }
732 
CreateTransaction(std::unique_ptr<HttpTransaction> * trans)733 int MockHttpCache::CreateTransaction(std::unique_ptr<HttpTransaction>* trans) {
734   return http_cache_.CreateTransaction(DEFAULT_PRIORITY, trans);
735 }
736 
SimulateCacheLockTimeout()737 void MockHttpCache::SimulateCacheLockTimeout() {
738   http_cache_.SimulateCacheLockTimeoutForTesting();
739 }
740 
SimulateCacheLockTimeoutAfterHeaders()741 void MockHttpCache::SimulateCacheLockTimeoutAfterHeaders() {
742   http_cache_.SimulateCacheLockTimeoutAfterHeadersForTesting();
743 }
744 
FailConditionalizations()745 void MockHttpCache::FailConditionalizations() {
746   http_cache_.FailConditionalizationForTest();
747 }
748 
ReadResponseInfo(disk_cache::Entry * disk_entry,HttpResponseInfo * response_info,bool * response_truncated)749 bool MockHttpCache::ReadResponseInfo(disk_cache::Entry* disk_entry,
750                                      HttpResponseInfo* response_info,
751                                      bool* response_truncated) {
752   int size = disk_entry->GetDataSize(0);
753 
754   TestCompletionCallback cb;
755   auto buffer = base::MakeRefCounted<IOBufferWithSize>(size);
756   int rv = disk_entry->ReadData(0, 0, buffer.get(), size, cb.callback());
757   rv = cb.GetResult(rv);
758   EXPECT_EQ(size, rv);
759 
760   return HttpCache::ParseResponseInfo(buffer->span(), response_info,
761                                       response_truncated);
762 }
763 
WriteResponseInfo(disk_cache::Entry * disk_entry,const HttpResponseInfo * response_info,bool skip_transient_headers,bool response_truncated)764 bool MockHttpCache::WriteResponseInfo(disk_cache::Entry* disk_entry,
765                                       const HttpResponseInfo* response_info,
766                                       bool skip_transient_headers,
767                                       bool response_truncated) {
768   base::Pickle pickle;
769   response_info->Persist(&pickle, skip_transient_headers, response_truncated);
770 
771   TestCompletionCallback cb;
772   int len = static_cast<int>(pickle.size());
773   auto data = base::MakeRefCounted<WrappedIOBuffer>(pickle);
774 
775   int rv = disk_entry->WriteData(0, 0, data.get(), len, cb.callback(), true);
776   rv = cb.GetResult(rv);
777   return (rv == len);
778 }
779 
OpenBackendEntry(const std::string & key,disk_cache::Entry ** entry)780 bool MockHttpCache::OpenBackendEntry(const std::string& key,
781                                      disk_cache::Entry** entry) {
782   TestEntryResultCompletionCallback cb;
783   disk_cache::EntryResult result =
784       backend()->OpenEntry(key, HIGHEST, cb.callback());
785   result = cb.GetResult(std::move(result));
786   if (result.net_error() == OK) {
787     *entry = result.ReleaseEntry();
788     return true;
789   } else {
790     return false;
791   }
792 }
793 
CreateBackendEntry(const std::string & key,disk_cache::Entry ** entry,NetLog * net_log)794 bool MockHttpCache::CreateBackendEntry(const std::string& key,
795                                        disk_cache::Entry** entry,
796                                        NetLog* net_log) {
797   TestEntryResultCompletionCallback cb;
798   disk_cache::EntryResult result =
799       backend()->CreateEntry(key, HIGHEST, cb.callback());
800   result = cb.GetResult(std::move(result));
801   if (result.net_error() == OK) {
802     *entry = result.ReleaseEntry();
803     return true;
804   } else {
805     return false;
806   }
807 }
808 
809 // Static.
GetTestMode(int test_mode)810 int MockHttpCache::GetTestMode(int test_mode) {
811   if (!g_test_mode) {
812     return test_mode;
813   }
814 
815   return g_test_mode;
816 }
817 
818 // Static.
SetTestMode(int test_mode)819 void MockHttpCache::SetTestMode(int test_mode) {
820   g_test_mode = test_mode;
821 }
822 
IsWriterPresent(const std::string & key)823 bool MockHttpCache::IsWriterPresent(const std::string& key) {
824   auto entry = http_cache_.GetActiveEntry(key);
825   return entry && entry->HasWriters() && !entry->writers()->IsEmpty();
826 }
827 
IsHeadersTransactionPresent(const std::string & key)828 bool MockHttpCache::IsHeadersTransactionPresent(const std::string& key) {
829   auto entry = http_cache_.GetActiveEntry(key);
830   return entry && entry->headers_transaction();
831 }
832 
GetCountReaders(const std::string & key)833 int MockHttpCache::GetCountReaders(const std::string& key) {
834   auto entry = http_cache_.GetActiveEntry(key);
835   return entry ? entry->readers().size() : 0;
836 }
837 
GetCountAddToEntryQueue(const std::string & key)838 int MockHttpCache::GetCountAddToEntryQueue(const std::string& key) {
839   auto entry = http_cache_.GetActiveEntry(key);
840   return entry ? entry->add_to_entry_queue().size() : 0;
841 }
842 
GetCountDoneHeadersQueue(const std::string & key)843 int MockHttpCache::GetCountDoneHeadersQueue(const std::string& key) {
844   auto entry = http_cache_.GetActiveEntry(key);
845   return entry ? entry->done_headers_queue().size() : 0;
846 }
847 
GetCountWriterTransactions(const std::string & key)848 int MockHttpCache::GetCountWriterTransactions(const std::string& key) {
849   auto entry = http_cache_.GetActiveEntry(key);
850   return entry && entry->writers() ? entry->writers()->GetTransactionsCount()
851                                    : 0;
852 }
853 
GetWeakPtr()854 base::WeakPtr<HttpCache> MockHttpCache::GetWeakPtr() {
855   return http_cache_.GetWeakPtr();
856 }
857 
858 //-----------------------------------------------------------------------------
859 
CreateEntry(const std::string & key,RequestPriority request_priority,EntryResultCallback callback)860 disk_cache::EntryResult MockDiskCacheNoCB::CreateEntry(
861     const std::string& key,
862     RequestPriority request_priority,
863     EntryResultCallback callback) {
864   return EntryResult::MakeError(ERR_IO_PENDING);
865 }
866 
867 //-----------------------------------------------------------------------------
868 
CreateBackend(NetLog * net_log,disk_cache::BackendResultCallback callback)869 disk_cache::BackendResult MockBackendNoCbFactory::CreateBackend(
870     NetLog* net_log,
871     disk_cache::BackendResultCallback callback) {
872   return disk_cache::BackendResult::Make(std::make_unique<MockDiskCacheNoCB>());
873 }
874 
875 //-----------------------------------------------------------------------------
876 
877 MockBlockingBackendFactory::MockBlockingBackendFactory() = default;
878 MockBlockingBackendFactory::~MockBlockingBackendFactory() = default;
879 
CreateBackend(NetLog * net_log,disk_cache::BackendResultCallback callback)880 disk_cache::BackendResult MockBlockingBackendFactory::CreateBackend(
881     NetLog* net_log,
882     disk_cache::BackendResultCallback callback) {
883   if (!block_) {
884     return MakeResult();
885   }
886 
887   callback_ = std::move(callback);
888   return disk_cache::BackendResult::MakeError(ERR_IO_PENDING);
889 }
890 
FinishCreation()891 void MockBlockingBackendFactory::FinishCreation() {
892   block_ = false;
893   if (!callback_.is_null()) {
894     // Running the callback might delete |this|.
895     std::move(callback_).Run(MakeResult());
896   }
897 }
898 
MakeResult()899 disk_cache::BackendResult MockBlockingBackendFactory::MakeResult() {
900   if (fail_) {
901     return disk_cache::BackendResult::MakeError(ERR_FAILED);
902   } else {
903     return disk_cache::BackendResult::Make(std::make_unique<MockDiskCache>());
904   }
905 }
906 
907 }  // namespace net
908