1 // Copyright 2012 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "net/http/mock_http_cache.h"
6
7 #include <algorithm>
8 #include <limits>
9 #include <memory>
10 #include <utility>
11
12 #include "base/feature_list.h"
13 #include "base/functional/bind.h"
14 #include "base/functional/callback.h"
15 #include "base/functional/callback_helpers.h"
16 #include "base/location.h"
17 #include "base/task/single_thread_task_runner.h"
18 #include "net/base/features.h"
19 #include "net/base/net_errors.h"
20 #include "net/disk_cache/disk_cache_test_util.h"
21 #include "net/http/http_cache_writers.h"
22 #include "testing/gtest/include/gtest/gtest.h"
23
24 namespace net {
25
26 namespace {
27
28 // During testing, we are going to limit the size of a cache entry to this many
29 // bytes using DCHECKs in order to prevent a test from causing unbounded memory
30 // growth. In practice cache entry shouldn't come anywhere near this limit for
31 // tests that use the mock cache. If they do, that's likely a problem with the
32 // test. If a test requires using massive cache entries, they should use a real
33 // cache backend instead.
34 const int kMaxMockCacheEntrySize = 100 * 1000 * 1000;
35
36 // We can override the test mode for a given operation by setting this global
37 // variable.
38 int g_test_mode = 0;
39
GetTestModeForEntry(const std::string & key)40 int GetTestModeForEntry(const std::string& key) {
41 GURL url(HttpCache::GetResourceURLFromHttpCacheKey(key));
42 const MockTransaction* t = FindMockTransaction(url);
43 DCHECK(t);
44 return t->test_mode;
45 }
46
47 } // namespace
48
49 //-----------------------------------------------------------------------------
50
51 struct MockDiskEntry::CallbackInfo {
52 scoped_refptr<MockDiskEntry> entry;
53 base::OnceClosure callback;
54 };
55
MockDiskEntry(const std::string & key)56 MockDiskEntry::MockDiskEntry(const std::string& key)
57 : key_(key), max_file_size_(std::numeric_limits<int>::max()) {
58 test_mode_ = GetTestModeForEntry(key);
59 }
60
Doom()61 void MockDiskEntry::Doom() {
62 doomed_ = true;
63 }
64
Close()65 void MockDiskEntry::Close() {
66 Release();
67 }
68
GetKey() const69 std::string MockDiskEntry::GetKey() const {
70 return key_;
71 }
72
GetLastUsed() const73 base::Time MockDiskEntry::GetLastUsed() const {
74 return base::Time::Now();
75 }
76
GetLastModified() const77 base::Time MockDiskEntry::GetLastModified() const {
78 return base::Time::Now();
79 }
80
GetDataSize(int index) const81 int32_t MockDiskEntry::GetDataSize(int index) const {
82 DCHECK(index >= 0 && index < kNumCacheEntryDataIndices);
83 return static_cast<int32_t>(data_[index].size());
84 }
85
ReadData(int index,int offset,IOBuffer * buf,int buf_len,CompletionOnceCallback callback)86 int MockDiskEntry::ReadData(int index,
87 int offset,
88 IOBuffer* buf,
89 int buf_len,
90 CompletionOnceCallback callback) {
91 DCHECK(index >= 0 && index < kNumCacheEntryDataIndices);
92 DCHECK(!callback.is_null());
93
94 if (fail_requests_ & FAIL_READ)
95 return ERR_CACHE_READ_FAILURE;
96
97 if (offset < 0 || offset > static_cast<int>(data_[index].size()))
98 return ERR_FAILED;
99 if (static_cast<size_t>(offset) == data_[index].size())
100 return 0;
101
102 int num = std::min(buf_len, static_cast<int>(data_[index].size()) - offset);
103 memcpy(buf->data(), &data_[index][offset], num);
104
105 if (MockHttpCache::GetTestMode(test_mode_) & TEST_MODE_SYNC_CACHE_READ)
106 return num;
107
108 // Pause and resume.
109 if (defer_op_ == DEFER_READ) {
110 defer_op_ = DEFER_NONE;
111 resume_callback_ = std::move(callback);
112 resume_return_code_ = num;
113 return ERR_IO_PENDING;
114 }
115
116 CallbackLater(std::move(callback), num);
117 return ERR_IO_PENDING;
118 }
119
ResumeDiskEntryOperation()120 void MockDiskEntry::ResumeDiskEntryOperation() {
121 DCHECK(!resume_callback_.is_null());
122 CallbackLater(std::move(resume_callback_), resume_return_code_);
123 resume_return_code_ = 0;
124 }
125
WriteData(int index,int offset,IOBuffer * buf,int buf_len,CompletionOnceCallback callback,bool truncate)126 int MockDiskEntry::WriteData(int index,
127 int offset,
128 IOBuffer* buf,
129 int buf_len,
130 CompletionOnceCallback callback,
131 bool truncate) {
132 DCHECK(index >= 0 && index < kNumCacheEntryDataIndices);
133 DCHECK(!callback.is_null());
134 DCHECK(truncate);
135
136 if (fail_requests_ & FAIL_WRITE) {
137 CallbackLater(std::move(callback), ERR_CACHE_READ_FAILURE);
138 return ERR_IO_PENDING;
139 }
140
141 if (offset < 0 || offset > static_cast<int>(data_[index].size()))
142 return ERR_FAILED;
143
144 DCHECK_LT(offset + buf_len, kMaxMockCacheEntrySize);
145 if (offset + buf_len > max_file_size_ && index == 1)
146 return net::ERR_FAILED;
147
148 data_[index].resize(offset + buf_len);
149 if (buf_len)
150 memcpy(&data_[index][offset], buf->data(), buf_len);
151
152 if (MockHttpCache::GetTestMode(test_mode_) & TEST_MODE_SYNC_CACHE_WRITE)
153 return buf_len;
154
155 if (defer_op_ == DEFER_WRITE) {
156 defer_op_ = DEFER_NONE;
157 resume_callback_ = std::move(callback);
158 resume_return_code_ = buf_len;
159 return ERR_IO_PENDING;
160 }
161
162 CallbackLater(std::move(callback), buf_len);
163 return ERR_IO_PENDING;
164 }
165
ReadSparseData(int64_t offset,IOBuffer * buf,int buf_len,CompletionOnceCallback callback)166 int MockDiskEntry::ReadSparseData(int64_t offset,
167 IOBuffer* buf,
168 int buf_len,
169 CompletionOnceCallback callback) {
170 DCHECK(!callback.is_null());
171 if (fail_sparse_requests_)
172 return ERR_NOT_IMPLEMENTED;
173 if (!sparse_ || busy_ || cancel_)
174 return ERR_CACHE_OPERATION_NOT_SUPPORTED;
175 if (offset < 0)
176 return ERR_FAILED;
177
178 if (fail_requests_ & FAIL_READ_SPARSE)
179 return ERR_CACHE_READ_FAILURE;
180
181 DCHECK(offset < std::numeric_limits<int32_t>::max());
182 int real_offset = static_cast<int>(offset);
183 if (!buf_len)
184 return 0;
185
186 int num = std::min(static_cast<int>(data_[1].size()) - real_offset,
187 buf_len);
188 memcpy(buf->data(), &data_[1][real_offset], num);
189
190 if (MockHttpCache::GetTestMode(test_mode_) & TEST_MODE_SYNC_CACHE_READ)
191 return num;
192
193 CallbackLater(std::move(callback), num);
194 busy_ = true;
195 delayed_ = false;
196 return ERR_IO_PENDING;
197 }
198
WriteSparseData(int64_t offset,IOBuffer * buf,int buf_len,CompletionOnceCallback callback)199 int MockDiskEntry::WriteSparseData(int64_t offset,
200 IOBuffer* buf,
201 int buf_len,
202 CompletionOnceCallback callback) {
203 DCHECK(!callback.is_null());
204 if (fail_sparse_requests_)
205 return ERR_NOT_IMPLEMENTED;
206 if (busy_ || cancel_)
207 return ERR_CACHE_OPERATION_NOT_SUPPORTED;
208 if (!sparse_) {
209 if (data_[1].size())
210 return ERR_CACHE_OPERATION_NOT_SUPPORTED;
211 sparse_ = true;
212 }
213 if (offset < 0)
214 return ERR_FAILED;
215 if (!buf_len)
216 return 0;
217
218 if (fail_requests_ & FAIL_WRITE_SPARSE)
219 return ERR_CACHE_READ_FAILURE;
220
221 DCHECK(offset < std::numeric_limits<int32_t>::max());
222 int real_offset = static_cast<int>(offset);
223
224 if (static_cast<int>(data_[1].size()) < real_offset + buf_len) {
225 DCHECK_LT(real_offset + buf_len, kMaxMockCacheEntrySize);
226 data_[1].resize(real_offset + buf_len);
227 }
228
229 memcpy(&data_[1][real_offset], buf->data(), buf_len);
230 if (MockHttpCache::GetTestMode(test_mode_) & TEST_MODE_SYNC_CACHE_WRITE)
231 return buf_len;
232
233 CallbackLater(std::move(callback), buf_len);
234 return ERR_IO_PENDING;
235 }
236
GetAvailableRange(int64_t offset,int len,RangeResultCallback callback)237 disk_cache::RangeResult MockDiskEntry::GetAvailableRange(
238 int64_t offset,
239 int len,
240 RangeResultCallback callback) {
241 DCHECK(!callback.is_null());
242 if (!sparse_ || busy_ || cancel_)
243 return RangeResult(ERR_CACHE_OPERATION_NOT_SUPPORTED);
244 if (offset < 0)
245 return RangeResult(ERR_FAILED);
246
247 if (fail_requests_ & FAIL_GET_AVAILABLE_RANGE)
248 return RangeResult(ERR_CACHE_READ_FAILURE);
249
250 RangeResult result;
251 result.net_error = OK;
252 result.start = offset;
253 result.available_len = 0;
254 DCHECK(offset < std::numeric_limits<int32_t>::max());
255 int real_offset = static_cast<int>(offset);
256 if (static_cast<int>(data_[1].size()) < real_offset)
257 return result;
258
259 int num = std::min(static_cast<int>(data_[1].size()) - real_offset, len);
260 for (; num > 0; num--, real_offset++) {
261 if (!result.available_len) {
262 if (data_[1][real_offset]) {
263 result.available_len++;
264 result.start = real_offset;
265 }
266 } else {
267 if (!data_[1][real_offset])
268 break;
269 result.available_len++;
270 }
271 }
272 if (MockHttpCache::GetTestMode(test_mode_) & TEST_MODE_SYNC_CACHE_WRITE) {
273 return result;
274 }
275
276 CallbackLater(base::BindOnce(std::move(callback), result));
277 return RangeResult(ERR_IO_PENDING);
278 }
279
CouldBeSparse() const280 bool MockDiskEntry::CouldBeSparse() const {
281 if (fail_sparse_requests_)
282 return false;
283 return sparse_;
284 }
285
CancelSparseIO()286 void MockDiskEntry::CancelSparseIO() {
287 cancel_ = true;
288 }
289
ReadyForSparseIO(CompletionOnceCallback callback)290 net::Error MockDiskEntry::ReadyForSparseIO(CompletionOnceCallback callback) {
291 if (fail_sparse_requests_)
292 return ERR_NOT_IMPLEMENTED;
293 if (!cancel_)
294 return OK;
295
296 cancel_ = false;
297 DCHECK(!callback.is_null());
298 if (MockHttpCache::GetTestMode(test_mode_) & TEST_MODE_SYNC_CACHE_READ)
299 return OK;
300
301 // The pending operation is already in the message loop (and hopefully
302 // already in the second pass). Just notify the caller that it finished.
303 CallbackLater(std::move(callback), 0);
304 return ERR_IO_PENDING;
305 }
306
SetLastUsedTimeForTest(base::Time time)307 void MockDiskEntry::SetLastUsedTimeForTest(base::Time time) {
308 NOTREACHED();
309 }
310
311 // If |value| is true, don't deliver any completion callbacks until called
312 // again with |value| set to false. Caution: remember to enable callbacks
313 // again or all subsequent tests will fail.
314 // Static.
IgnoreCallbacks(bool value)315 void MockDiskEntry::IgnoreCallbacks(bool value) {
316 if (ignore_callbacks_ == value)
317 return;
318 ignore_callbacks_ = value;
319 if (!value)
320 StoreAndDeliverCallbacks(false, nullptr, base::OnceClosure());
321 }
322
323 MockDiskEntry::~MockDiskEntry() = default;
324
325 // Unlike the callbacks for MockHttpTransaction, we want this one to run even
326 // if the consumer called Close on the MockDiskEntry. We achieve that by
327 // leveraging the fact that this class is reference counted.
CallbackLater(base::OnceClosure callback)328 void MockDiskEntry::CallbackLater(base::OnceClosure callback) {
329 if (ignore_callbacks_)
330 return StoreAndDeliverCallbacks(true, this, std::move(callback));
331 base::SingleThreadTaskRunner::GetCurrentDefault()->PostTask(
332 FROM_HERE,
333 base::BindOnce(&MockDiskEntry::RunCallback, this, std::move(callback)));
334 }
335
CallbackLater(CompletionOnceCallback callback,int result)336 void MockDiskEntry::CallbackLater(CompletionOnceCallback callback, int result) {
337 CallbackLater(base::BindOnce(std::move(callback), result));
338 }
339
RunCallback(base::OnceClosure callback)340 void MockDiskEntry::RunCallback(base::OnceClosure callback) {
341 if (busy_) {
342 // This is kind of hacky, but controlling the behavior of just this entry
343 // from a test is sort of complicated. What we really want to do is
344 // delay the delivery of a sparse IO operation a little more so that the
345 // request start operation (async) will finish without seeing the end of
346 // this operation (already posted to the message loop)... and without
347 // just delaying for n mS (which may cause trouble with slow bots). So
348 // we re-post this operation (all async sparse IO operations will take two
349 // trips through the message loop instead of one).
350 if (!delayed_) {
351 delayed_ = true;
352 return CallbackLater(std::move(callback));
353 }
354 }
355 busy_ = false;
356 std::move(callback).Run();
357 }
358
359 // When |store| is true, stores the callback to be delivered later; otherwise
360 // delivers any callback previously stored.
361 // Static.
StoreAndDeliverCallbacks(bool store,MockDiskEntry * entry,base::OnceClosure callback)362 void MockDiskEntry::StoreAndDeliverCallbacks(bool store,
363 MockDiskEntry* entry,
364 base::OnceClosure callback) {
365 static std::vector<CallbackInfo> callback_list;
366 if (store) {
367 CallbackInfo c = {entry, std::move(callback)};
368 callback_list.push_back(std::move(c));
369 } else {
370 for (auto& callback_info : callback_list) {
371 callback_info.entry->CallbackLater(std::move(callback_info.callback));
372 }
373 callback_list.clear();
374 }
375 }
376
377 // Statics.
378 bool MockDiskEntry::ignore_callbacks_ = false;
379
380 //-----------------------------------------------------------------------------
381
MockDiskCache()382 MockDiskCache::MockDiskCache()
383 : Backend(DISK_CACHE), max_file_size_(std::numeric_limits<int>::max()) {}
384
~MockDiskCache()385 MockDiskCache::~MockDiskCache() {
386 ReleaseAll();
387 }
388
GetEntryCount() const389 int32_t MockDiskCache::GetEntryCount() const {
390 return static_cast<int32_t>(entries_.size());
391 }
392
OpenOrCreateEntry(const std::string & key,net::RequestPriority request_priority,EntryResultCallback callback)393 disk_cache::EntryResult MockDiskCache::OpenOrCreateEntry(
394 const std::string& key,
395 net::RequestPriority request_priority,
396 EntryResultCallback callback) {
397 DCHECK(!callback.is_null());
398
399 if (force_fail_callback_later_) {
400 CallbackLater(base::BindOnce(
401 std::move(callback),
402 EntryResult::MakeError(ERR_CACHE_OPEN_OR_CREATE_FAILURE)));
403 return EntryResult::MakeError(ERR_IO_PENDING);
404 }
405
406 if (fail_requests_)
407 return EntryResult::MakeError(ERR_CACHE_OPEN_OR_CREATE_FAILURE);
408
409 EntryResult result;
410
411 // First try opening the entry.
412 auto split_callback = base::SplitOnceCallback(std::move(callback));
413 result = OpenEntry(key, request_priority, std::move(split_callback.first));
414 if (result.net_error() == OK || result.net_error() == ERR_IO_PENDING)
415 return result;
416
417 // Unable to open, try creating the entry.
418 result = CreateEntry(key, request_priority, std::move(split_callback.second));
419 if (result.net_error() == OK || result.net_error() == ERR_IO_PENDING)
420 return result;
421
422 return EntryResult::MakeError(ERR_CACHE_OPEN_OR_CREATE_FAILURE);
423 }
424
OpenEntry(const std::string & key,net::RequestPriority request_priority,EntryResultCallback callback)425 disk_cache::EntryResult MockDiskCache::OpenEntry(
426 const std::string& key,
427 net::RequestPriority request_priority,
428 EntryResultCallback callback) {
429 DCHECK(!callback.is_null());
430 if (force_fail_callback_later_) {
431 CallbackLater(base::BindOnce(
432 std::move(callback), EntryResult::MakeError(ERR_CACHE_OPEN_FAILURE)));
433 return EntryResult::MakeError(ERR_IO_PENDING);
434 }
435
436 if (fail_requests_)
437 return EntryResult::MakeError(ERR_CACHE_OPEN_FAILURE);
438
439 auto it = entries_.find(key);
440 if (it == entries_.end())
441 return EntryResult::MakeError(ERR_CACHE_OPEN_FAILURE);
442
443 if (it->second->is_doomed()) {
444 it->second->Release();
445 entries_.erase(it);
446 return EntryResult::MakeError(ERR_CACHE_OPEN_FAILURE);
447 }
448
449 open_count_++;
450
451 MockDiskEntry* entry = it->second;
452 entry->AddRef();
453
454 if (soft_failures_ || soft_failures_one_instance_) {
455 entry->set_fail_requests(soft_failures_ | soft_failures_one_instance_);
456 soft_failures_one_instance_ = 0;
457 }
458
459 entry->set_max_file_size(max_file_size_);
460
461 EntryResult result = EntryResult::MakeOpened(entry);
462 if (GetTestModeForEntry(key) & TEST_MODE_SYNC_CACHE_START)
463 return result;
464
465 CallbackLater(base::BindOnce(std::move(callback), std::move(result)));
466 return EntryResult::MakeError(ERR_IO_PENDING);
467 }
468
CreateEntry(const std::string & key,net::RequestPriority request_priority,EntryResultCallback callback)469 disk_cache::EntryResult MockDiskCache::CreateEntry(
470 const std::string& key,
471 net::RequestPriority request_priority,
472 EntryResultCallback callback) {
473 DCHECK(!callback.is_null());
474 if (force_fail_callback_later_) {
475 CallbackLater(base::BindOnce(
476 std::move(callback), EntryResult::MakeError(ERR_CACHE_CREATE_FAILURE)));
477 return EntryResult::MakeError(ERR_IO_PENDING);
478 }
479
480 if (fail_requests_)
481 return EntryResult::MakeError(ERR_CACHE_CREATE_FAILURE);
482
483 auto it = entries_.find(key);
484 if (it != entries_.end()) {
485 if (!it->second->is_doomed()) {
486 if (double_create_check_)
487 NOTREACHED();
488 else
489 return EntryResult::MakeError(ERR_CACHE_CREATE_FAILURE);
490 }
491 it->second->Release();
492 entries_.erase(it);
493 }
494
495 create_count_++;
496
497 MockDiskEntry* new_entry = new MockDiskEntry(key);
498
499 new_entry->AddRef();
500 entries_[key] = new_entry;
501
502 new_entry->AddRef();
503
504 if (soft_failures_ || soft_failures_one_instance_) {
505 new_entry->set_fail_requests(soft_failures_ | soft_failures_one_instance_);
506 soft_failures_one_instance_ = 0;
507 }
508
509 if (fail_sparse_requests_)
510 new_entry->set_fail_sparse_requests();
511
512 new_entry->set_max_file_size(max_file_size_);
513
514 EntryResult result = EntryResult::MakeCreated(new_entry);
515 if (GetTestModeForEntry(key) & TEST_MODE_SYNC_CACHE_START)
516 return result;
517
518 // Pause and resume.
519 if (defer_op_ == MockDiskEntry::DEFER_CREATE) {
520 defer_op_ = MockDiskEntry::DEFER_NONE;
521 resume_callback_ = base::BindOnce(std::move(callback), std::move(result));
522 return EntryResult::MakeError(ERR_IO_PENDING);
523 }
524
525 CallbackLater(base::BindOnce(std::move(callback), std::move(result)));
526 return EntryResult::MakeError(ERR_IO_PENDING);
527 }
528
DoomEntry(const std::string & key,net::RequestPriority request_priority,CompletionOnceCallback callback)529 net::Error MockDiskCache::DoomEntry(const std::string& key,
530 net::RequestPriority request_priority,
531 CompletionOnceCallback callback) {
532 DCHECK(!callback.is_null());
533 if (force_fail_callback_later_) {
534 CallbackLater(base::BindOnce(std::move(callback), ERR_CACHE_DOOM_FAILURE));
535 return ERR_IO_PENDING;
536 }
537
538 if (fail_requests_)
539 return ERR_CACHE_DOOM_FAILURE;
540
541 auto it = entries_.find(key);
542 if (it != entries_.end()) {
543 it->second->Release();
544 entries_.erase(it);
545 doomed_count_++;
546 }
547
548 if (GetTestModeForEntry(key) & TEST_MODE_SYNC_CACHE_START)
549 return OK;
550
551 CallbackLater(base::BindOnce(std::move(callback), OK));
552 return ERR_IO_PENDING;
553 }
554
DoomAllEntries(CompletionOnceCallback callback)555 net::Error MockDiskCache::DoomAllEntries(CompletionOnceCallback callback) {
556 return ERR_NOT_IMPLEMENTED;
557 }
558
DoomEntriesBetween(const base::Time initial_time,const base::Time end_time,CompletionOnceCallback callback)559 net::Error MockDiskCache::DoomEntriesBetween(const base::Time initial_time,
560 const base::Time end_time,
561 CompletionOnceCallback callback) {
562 return ERR_NOT_IMPLEMENTED;
563 }
564
DoomEntriesSince(const base::Time initial_time,CompletionOnceCallback callback)565 net::Error MockDiskCache::DoomEntriesSince(const base::Time initial_time,
566 CompletionOnceCallback callback) {
567 return ERR_NOT_IMPLEMENTED;
568 }
569
CalculateSizeOfAllEntries(Int64CompletionOnceCallback callback)570 int64_t MockDiskCache::CalculateSizeOfAllEntries(
571 Int64CompletionOnceCallback callback) {
572 return ERR_NOT_IMPLEMENTED;
573 }
574
575 class MockDiskCache::NotImplementedIterator : public Iterator {
576 public:
OpenNextEntry(EntryResultCallback callback)577 EntryResult OpenNextEntry(EntryResultCallback callback) override {
578 return EntryResult::MakeError(ERR_NOT_IMPLEMENTED);
579 }
580 };
581
CreateIterator()582 std::unique_ptr<disk_cache::Backend::Iterator> MockDiskCache::CreateIterator() {
583 return std::make_unique<NotImplementedIterator>();
584 }
585
GetStats(base::StringPairs * stats)586 void MockDiskCache::GetStats(base::StringPairs* stats) {
587 }
588
OnExternalCacheHit(const std::string & key)589 void MockDiskCache::OnExternalCacheHit(const std::string& key) {
590 external_cache_hits_.push_back(key);
591 }
592
GetEntryInMemoryData(const std::string & key)593 uint8_t MockDiskCache::GetEntryInMemoryData(const std::string& key) {
594 if (!support_in_memory_entry_data_)
595 return 0;
596
597 auto it = entries_.find(key);
598 if (it != entries_.end())
599 return it->second->in_memory_data();
600 return 0;
601 }
602
SetEntryInMemoryData(const std::string & key,uint8_t data)603 void MockDiskCache::SetEntryInMemoryData(const std::string& key, uint8_t data) {
604 auto it = entries_.find(key);
605 if (it != entries_.end())
606 it->second->set_in_memory_data(data);
607 }
608
MaxFileSize() const609 int64_t MockDiskCache::MaxFileSize() const {
610 return max_file_size_;
611 }
612
ReleaseAll()613 void MockDiskCache::ReleaseAll() {
614 for (auto entry : entries_)
615 entry.second->Release();
616 entries_.clear();
617 }
618
CallbackLater(base::OnceClosure callback)619 void MockDiskCache::CallbackLater(base::OnceClosure callback) {
620 base::SingleThreadTaskRunner::GetCurrentDefault()->PostTask(
621 FROM_HERE, std::move(callback));
622 }
623
IsDiskEntryDoomed(const std::string & key)624 bool MockDiskCache::IsDiskEntryDoomed(const std::string& key) {
625 auto it = entries_.find(key);
626 if (it != entries_.end())
627 return it->second->is_doomed();
628
629 return false;
630 }
631
ResumeCacheOperation()632 void MockDiskCache::ResumeCacheOperation() {
633 DCHECK(!resume_callback_.is_null());
634 CallbackLater(std::move(resume_callback_));
635 }
636
GetDiskEntryRef(const std::string & key)637 scoped_refptr<MockDiskEntry> MockDiskCache::GetDiskEntryRef(
638 const std::string& key) {
639 auto it = entries_.find(key);
640 if (it == entries_.end())
641 return nullptr;
642 return it->second;
643 }
644
GetExternalCacheHits() const645 const std::vector<std::string>& MockDiskCache::GetExternalCacheHits() const {
646 return external_cache_hits_;
647 }
648
649 //-----------------------------------------------------------------------------
650
CreateBackend(NetLog * net_log,disk_cache::BackendResultCallback callback)651 disk_cache::BackendResult MockBackendFactory::CreateBackend(
652 NetLog* net_log,
653 disk_cache::BackendResultCallback callback) {
654 return disk_cache::BackendResult::Make(std::make_unique<MockDiskCache>());
655 }
656
657 //-----------------------------------------------------------------------------
658
MockHttpCache()659 MockHttpCache::MockHttpCache()
660 : MockHttpCache(std::make_unique<MockBackendFactory>()) {}
661
MockHttpCache(std::unique_ptr<HttpCache::BackendFactory> disk_cache_factory)662 MockHttpCache::MockHttpCache(
663 std::unique_ptr<HttpCache::BackendFactory> disk_cache_factory)
664 : http_cache_(std::make_unique<MockNetworkLayer>(),
665 std::move(disk_cache_factory)) {}
666
backend()667 disk_cache::Backend* MockHttpCache::backend() {
668 TestCompletionCallback cb;
669 disk_cache::Backend* backend;
670 int rv = http_cache_.GetBackend(&backend, cb.callback());
671 rv = cb.GetResult(rv);
672 return (rv == OK) ? backend : nullptr;
673 }
674
disk_cache()675 MockDiskCache* MockHttpCache::disk_cache() {
676 return static_cast<MockDiskCache*>(backend());
677 }
678
CreateTransaction(std::unique_ptr<HttpTransaction> * trans)679 int MockHttpCache::CreateTransaction(std::unique_ptr<HttpTransaction>* trans) {
680 return http_cache_.CreateTransaction(DEFAULT_PRIORITY, trans);
681 }
682
SimulateCacheLockTimeout()683 void MockHttpCache::SimulateCacheLockTimeout() {
684 http_cache_.SimulateCacheLockTimeoutForTesting();
685 }
686
SimulateCacheLockTimeoutAfterHeaders()687 void MockHttpCache::SimulateCacheLockTimeoutAfterHeaders() {
688 http_cache_.SimulateCacheLockTimeoutAfterHeadersForTesting();
689 }
690
FailConditionalizations()691 void MockHttpCache::FailConditionalizations() {
692 http_cache_.FailConditionalizationForTest();
693 }
694
ReadResponseInfo(disk_cache::Entry * disk_entry,HttpResponseInfo * response_info,bool * response_truncated)695 bool MockHttpCache::ReadResponseInfo(disk_cache::Entry* disk_entry,
696 HttpResponseInfo* response_info,
697 bool* response_truncated) {
698 int size = disk_entry->GetDataSize(0);
699
700 TestCompletionCallback cb;
701 scoped_refptr<IOBuffer> buffer = base::MakeRefCounted<IOBuffer>(size);
702 int rv = disk_entry->ReadData(0, 0, buffer.get(), size, cb.callback());
703 rv = cb.GetResult(rv);
704 EXPECT_EQ(size, rv);
705
706 return HttpCache::ParseResponseInfo(buffer->data(), size, response_info,
707 response_truncated);
708 }
709
WriteResponseInfo(disk_cache::Entry * disk_entry,const HttpResponseInfo * response_info,bool skip_transient_headers,bool response_truncated)710 bool MockHttpCache::WriteResponseInfo(disk_cache::Entry* disk_entry,
711 const HttpResponseInfo* response_info,
712 bool skip_transient_headers,
713 bool response_truncated) {
714 base::Pickle pickle;
715 response_info->Persist(
716 &pickle, skip_transient_headers, response_truncated);
717
718 TestCompletionCallback cb;
719 scoped_refptr<WrappedIOBuffer> data = base::MakeRefCounted<WrappedIOBuffer>(
720 reinterpret_cast<const char*>(pickle.data()));
721 int len = static_cast<int>(pickle.size());
722
723 int rv = disk_entry->WriteData(0, 0, data.get(), len, cb.callback(), true);
724 rv = cb.GetResult(rv);
725 return (rv == len);
726 }
727
OpenBackendEntry(const std::string & key,disk_cache::Entry ** entry)728 bool MockHttpCache::OpenBackendEntry(const std::string& key,
729 disk_cache::Entry** entry) {
730 TestEntryResultCompletionCallback cb;
731 disk_cache::EntryResult result =
732 backend()->OpenEntry(key, net::HIGHEST, cb.callback());
733 result = cb.GetResult(std::move(result));
734 if (result.net_error() == OK) {
735 *entry = result.ReleaseEntry();
736 return true;
737 } else {
738 return false;
739 }
740 }
741
CreateBackendEntry(const std::string & key,disk_cache::Entry ** entry,NetLog * net_log)742 bool MockHttpCache::CreateBackendEntry(const std::string& key,
743 disk_cache::Entry** entry,
744 NetLog* net_log) {
745 TestEntryResultCompletionCallback cb;
746 disk_cache::EntryResult result =
747 backend()->CreateEntry(key, net::HIGHEST, cb.callback());
748 result = cb.GetResult(std::move(result));
749 if (result.net_error() == OK) {
750 *entry = result.ReleaseEntry();
751 return true;
752 } else {
753 return false;
754 }
755 }
756
757 // Static.
GetTestMode(int test_mode)758 int MockHttpCache::GetTestMode(int test_mode) {
759 if (!g_test_mode)
760 return test_mode;
761
762 return g_test_mode;
763 }
764
765 // Static.
SetTestMode(int test_mode)766 void MockHttpCache::SetTestMode(int test_mode) {
767 g_test_mode = test_mode;
768 }
769
IsWriterPresent(const std::string & key)770 bool MockHttpCache::IsWriterPresent(const std::string& key) {
771 HttpCache::ActiveEntry* entry = http_cache_.FindActiveEntry(key);
772 return entry && entry->writers && !entry->writers->IsEmpty();
773 }
774
IsHeadersTransactionPresent(const std::string & key)775 bool MockHttpCache::IsHeadersTransactionPresent(const std::string& key) {
776 HttpCache::ActiveEntry* entry = http_cache_.FindActiveEntry(key);
777 return entry && entry->headers_transaction;
778 }
779
GetCountReaders(const std::string & key)780 int MockHttpCache::GetCountReaders(const std::string& key) {
781 HttpCache::ActiveEntry* entry = http_cache_.FindActiveEntry(key);
782 return entry ? entry->readers.size() : 0;
783 }
784
GetCountAddToEntryQueue(const std::string & key)785 int MockHttpCache::GetCountAddToEntryQueue(const std::string& key) {
786 HttpCache::ActiveEntry* entry = http_cache_.FindActiveEntry(key);
787 return entry ? entry->add_to_entry_queue.size() : 0;
788 }
789
GetCountDoneHeadersQueue(const std::string & key)790 int MockHttpCache::GetCountDoneHeadersQueue(const std::string& key) {
791 HttpCache::ActiveEntry* entry = http_cache_.FindActiveEntry(key);
792 return entry ? entry->done_headers_queue.size() : 0;
793 }
794
GetCountWriterTransactions(const std::string & key)795 int MockHttpCache::GetCountWriterTransactions(const std::string& key) {
796 HttpCache::ActiveEntry* entry = http_cache_.FindActiveEntry(key);
797 return entry && entry->writers ? entry->writers->GetTransactionsCount() : 0;
798 }
799
800 //-----------------------------------------------------------------------------
801
CreateEntry(const std::string & key,net::RequestPriority request_priority,EntryResultCallback callback)802 disk_cache::EntryResult MockDiskCacheNoCB::CreateEntry(
803 const std::string& key,
804 net::RequestPriority request_priority,
805 EntryResultCallback callback) {
806 return EntryResult::MakeError(ERR_IO_PENDING);
807 }
808
809 //-----------------------------------------------------------------------------
810
CreateBackend(NetLog * net_log,disk_cache::BackendResultCallback callback)811 disk_cache::BackendResult MockBackendNoCbFactory::CreateBackend(
812 NetLog* net_log,
813 disk_cache::BackendResultCallback callback) {
814 return disk_cache::BackendResult::Make(std::make_unique<MockDiskCacheNoCB>());
815 }
816
817 //-----------------------------------------------------------------------------
818
819 MockBlockingBackendFactory::MockBlockingBackendFactory() = default;
820 MockBlockingBackendFactory::~MockBlockingBackendFactory() = default;
821
CreateBackend(NetLog * net_log,disk_cache::BackendResultCallback callback)822 disk_cache::BackendResult MockBlockingBackendFactory::CreateBackend(
823 NetLog* net_log,
824 disk_cache::BackendResultCallback callback) {
825 if (!block_) {
826 return MakeResult();
827 }
828
829 callback_ = std::move(callback);
830 return disk_cache::BackendResult::MakeError(ERR_IO_PENDING);
831 }
832
FinishCreation()833 void MockBlockingBackendFactory::FinishCreation() {
834 block_ = false;
835 if (!callback_.is_null()) {
836 // Running the callback might delete |this|.
837 std::move(callback_).Run(MakeResult());
838 }
839 }
840
MakeResult()841 disk_cache::BackendResult MockBlockingBackendFactory::MakeResult() {
842 if (fail_)
843 return disk_cache::BackendResult::MakeError(ERR_FAILED);
844 else
845 return disk_cache::BackendResult::Make(std::make_unique<MockDiskCache>());
846 }
847
848 } // namespace net
849