• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2013 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "net/disk_cache/simple/simple_backend_impl.h"
6 
7 #include <algorithm>
8 #include <cstdlib>
9 #include <functional>
10 #include <limits>
11 
12 #include "base/functional/callback_helpers.h"
13 #include "base/task/sequenced_task_runner.h"
14 #include "base/task/thread_pool.h"
15 #include "build/build_config.h"
16 
17 #if BUILDFLAG(IS_POSIX)
18 #include <sys/resource.h>
19 #endif
20 
21 #include "base/files/file_util.h"
22 #include "base/functional/bind.h"
23 #include "base/functional/callback.h"
24 #include "base/lazy_instance.h"
25 #include "base/location.h"
26 #include "base/memory/ptr_util.h"
27 #include "base/metrics/field_trial.h"
28 #include "base/metrics/field_trial_params.h"
29 #include "base/metrics/histogram_functions.h"
30 #include "base/metrics/histogram_macros.h"
31 #include "base/system/sys_info.h"
32 #include "base/task/thread_pool/thread_pool_instance.h"
33 #include "base/time/time.h"
34 #include "build/build_config.h"
35 #include "net/base/net_errors.h"
36 #include "net/base/prioritized_task_runner.h"
37 #include "net/disk_cache/backend_cleanup_tracker.h"
38 #include "net/disk_cache/cache_util.h"
39 #include "net/disk_cache/simple/simple_entry_format.h"
40 #include "net/disk_cache/simple/simple_entry_impl.h"
41 #include "net/disk_cache/simple/simple_file_tracker.h"
42 #include "net/disk_cache/simple/simple_histogram_macros.h"
43 #include "net/disk_cache/simple/simple_index.h"
44 #include "net/disk_cache/simple/simple_index_file.h"
45 #include "net/disk_cache/simple/simple_synchronous_entry.h"
46 #include "net/disk_cache/simple/simple_util.h"
47 #include "net/disk_cache/simple/simple_version_upgrade.h"
48 
49 using base::FilePath;
50 using base::Time;
51 
52 namespace disk_cache {
53 
54 namespace {
55 
56 // Maximum fraction of the cache that one entry can consume.
57 const int kMaxFileRatio = 8;
58 
59 // Native code entries can be large. Rather than increasing the overall cache
60 // size, allow an individual entry to occupy up to half of the cache.
61 const int kMaxNativeCodeFileRatio = 2;
62 
63 // Overrides the above.
64 const int64_t kMinFileSizeLimit = 5 * 1024 * 1024;
65 
66 // Global context of all the files we have open --- this permits some to be
67 // closed on demand if too many FDs are being used, to avoid running out.
68 base::LazyInstance<SimpleFileTracker>::Leaky g_simple_file_tracker =
69     LAZY_INSTANCE_INITIALIZER;
70 
71 // Detects if the files in the cache directory match the current disk cache
72 // backend type and version. If the directory contains no cache, occupies it
73 // with the fresh structure.
FileStructureConsistent(BackendFileOperations * file_operations,const base::FilePath & path)74 SimpleCacheConsistencyResult FileStructureConsistent(
75     BackendFileOperations* file_operations,
76     const base::FilePath& path) {
77   if (!file_operations->PathExists(path) &&
78       !file_operations->CreateDirectory(path)) {
79     LOG(ERROR) << "Failed to create directory: " << path.LossyDisplayName();
80     return SimpleCacheConsistencyResult::kCreateDirectoryFailed;
81   }
82   return disk_cache::UpgradeSimpleCacheOnDisk(file_operations, path);
83 }
84 
85 // A context used by a BarrierCompletionCallback to track state.
86 struct BarrierContext {
BarrierContextdisk_cache::__anon7b20e0020111::BarrierContext87   explicit BarrierContext(net::CompletionOnceCallback final_callback,
88                           int expected)
89       : final_callback_(std::move(final_callback)), expected(expected) {}
90 
91   net::CompletionOnceCallback final_callback_;
92   const int expected;
93   int count = 0;
94   bool had_error = false;
95 };
96 
BarrierCompletionCallbackImpl(BarrierContext * context,int result)97 void BarrierCompletionCallbackImpl(
98     BarrierContext* context,
99     int result) {
100   DCHECK_GT(context->expected, context->count);
101   if (context->had_error)
102     return;
103   if (result != net::OK) {
104     context->had_error = true;
105     std::move(context->final_callback_).Run(result);
106     return;
107   }
108   ++context->count;
109   if (context->count == context->expected)
110     std::move(context->final_callback_).Run(net::OK);
111 }
112 
113 // A barrier completion callback is a repeatable callback that waits for
114 // |count| successful results before invoking |final_callback|. In the case of
115 // an error, the first error is passed to |final_callback| and all others
116 // are ignored.
MakeBarrierCompletionCallback(int count,net::CompletionOnceCallback final_callback)117 base::RepeatingCallback<void(int)> MakeBarrierCompletionCallback(
118     int count,
119     net::CompletionOnceCallback final_callback) {
120   BarrierContext* context =
121       new BarrierContext(std::move(final_callback), count);
122   return base::BindRepeating(&BarrierCompletionCallbackImpl,
123                              base::Owned(context));
124 }
125 
126 // A short bindable thunk that ensures a completion callback is always called
127 // after running an operation asynchronously. Checks for backend liveness first.
RunOperationAndCallback(base::WeakPtr<SimpleBackendImpl> backend,base::OnceCallback<net::Error (net::CompletionOnceCallback)> operation,net::CompletionOnceCallback operation_callback)128 void RunOperationAndCallback(
129     base::WeakPtr<SimpleBackendImpl> backend,
130     base::OnceCallback<net::Error(net::CompletionOnceCallback)> operation,
131     net::CompletionOnceCallback operation_callback) {
132   if (!backend)
133     return;
134 
135   auto split_callback = base::SplitOnceCallback(std::move(operation_callback));
136   const int operation_result =
137       std::move(operation).Run(std::move(split_callback.first));
138   if (operation_result != net::ERR_IO_PENDING && split_callback.second)
139     std::move(split_callback.second).Run(operation_result);
140 }
141 
142 // Same but for things that work with EntryResult.
RunEntryResultOperationAndCallback(base::WeakPtr<SimpleBackendImpl> backend,base::OnceCallback<EntryResult (EntryResultCallback)> operation,EntryResultCallback operation_callback)143 void RunEntryResultOperationAndCallback(
144     base::WeakPtr<SimpleBackendImpl> backend,
145     base::OnceCallback<EntryResult(EntryResultCallback)> operation,
146     EntryResultCallback operation_callback) {
147   if (!backend)
148     return;
149 
150   auto split_callback = base::SplitOnceCallback(std::move(operation_callback));
151   EntryResult operation_result =
152       std::move(operation).Run(std::move(split_callback.first));
153   if (operation_result.net_error() != net::ERR_IO_PENDING &&
154       split_callback.second) {
155     std::move(split_callback.second).Run(std::move(operation_result));
156   }
157 }
158 
RecordIndexLoad(net::CacheType cache_type,base::TimeTicks constructed_since,int result)159 void RecordIndexLoad(net::CacheType cache_type,
160                      base::TimeTicks constructed_since,
161                      int result) {
162   const base::TimeDelta creation_to_index = base::TimeTicks::Now() -
163                                             constructed_since;
164   if (result == net::OK) {
165     SIMPLE_CACHE_UMA(TIMES, "CreationToIndex", cache_type, creation_to_index);
166   } else {
167     SIMPLE_CACHE_UMA(TIMES,
168                      "CreationToIndexFail", cache_type, creation_to_index);
169   }
170 }
171 
CacheTypeToOperationsMode(net::CacheType type)172 SimpleEntryImpl::OperationsMode CacheTypeToOperationsMode(net::CacheType type) {
173   return (type == net::DISK_CACHE || type == net::GENERATED_BYTE_CODE_CACHE ||
174           type == net::GENERATED_NATIVE_CODE_CACHE ||
175           type == net::GENERATED_WEBUI_BYTE_CODE_CACHE)
176              ? SimpleEntryImpl::OPTIMISTIC_OPERATIONS
177              : SimpleEntryImpl::NON_OPTIMISTIC_OPERATIONS;
178 }
179 
180 }  // namespace
181 
182 class SimpleBackendImpl::ActiveEntryProxy
183     : public SimpleEntryImpl::ActiveEntryProxy {
184  public:
~ActiveEntryProxy()185   ~ActiveEntryProxy() override {
186     if (backend_) {
187       DCHECK_EQ(1U, backend_->active_entries_.count(entry_hash_));
188       backend_->active_entries_.erase(entry_hash_);
189     }
190   }
191 
Create(int64_t entry_hash,SimpleBackendImpl * backend)192   static std::unique_ptr<SimpleEntryImpl::ActiveEntryProxy> Create(
193       int64_t entry_hash,
194       SimpleBackendImpl* backend) {
195     return base::WrapUnique(new ActiveEntryProxy(entry_hash, backend));
196   }
197 
198  private:
ActiveEntryProxy(uint64_t entry_hash,SimpleBackendImpl * backend)199   ActiveEntryProxy(uint64_t entry_hash, SimpleBackendImpl* backend)
200       : entry_hash_(entry_hash), backend_(backend->AsWeakPtr()) {}
201 
202   uint64_t entry_hash_;
203   base::WeakPtr<SimpleBackendImpl> backend_;
204 };
205 
SimpleBackendImpl(scoped_refptr<BackendFileOperationsFactory> file_operations_factory,const FilePath & path,scoped_refptr<BackendCleanupTracker> cleanup_tracker,SimpleFileTracker * file_tracker,int64_t max_bytes,net::CacheType cache_type,net::NetLog * net_log)206 SimpleBackendImpl::SimpleBackendImpl(
207     scoped_refptr<BackendFileOperationsFactory> file_operations_factory,
208     const FilePath& path,
209     scoped_refptr<BackendCleanupTracker> cleanup_tracker,
210     SimpleFileTracker* file_tracker,
211     int64_t max_bytes,
212     net::CacheType cache_type,
213     net::NetLog* net_log)
214     : Backend(cache_type),
215       file_operations_factory_(
216           file_operations_factory
217               ? std::move(file_operations_factory)
218               : base::MakeRefCounted<TrivialFileOperationsFactory>()),
219       cleanup_tracker_(std::move(cleanup_tracker)),
220       file_tracker_(file_tracker ? file_tracker
221                                  : g_simple_file_tracker.Pointer()),
222       path_(path),
223       orig_max_size_(max_bytes),
224       entry_operations_mode_(CacheTypeToOperationsMode(cache_type)),
225       post_doom_waiting_(
226           base::MakeRefCounted<SimplePostDoomWaiterTable>(cache_type)),
227       net_log_(net_log) {
228   // Treat negative passed-in sizes same as SetMaxSize would here and in other
229   // backends, as default (if first call).
230   if (orig_max_size_ < 0)
231     orig_max_size_ = 0;
232 }
233 
~SimpleBackendImpl()234 SimpleBackendImpl::~SimpleBackendImpl() {
235   // Write the index out if there is a pending write from a
236   // previous operation.
237   if (index_->HasPendingWrite())
238     index_->WriteToDisk(SimpleIndex::INDEX_WRITE_REASON_SHUTDOWN);
239 }
240 
SetTaskRunnerForTesting(scoped_refptr<base::SequencedTaskRunner> task_runner)241 void SimpleBackendImpl::SetTaskRunnerForTesting(
242     scoped_refptr<base::SequencedTaskRunner> task_runner) {
243   prioritized_task_runner_ =
244       base::MakeRefCounted<net::PrioritizedTaskRunner>(kWorkerPoolTaskTraits);
245   prioritized_task_runner_->SetTaskRunnerForTesting(  // IN-TEST
246       std::move(task_runner));
247 }
248 
Init(CompletionOnceCallback completion_callback)249 void SimpleBackendImpl::Init(CompletionOnceCallback completion_callback) {
250   auto index_task_runner = base::ThreadPool::CreateSequencedTaskRunner(
251       {base::MayBlock(), base::WithBaseSyncPrimitives(),
252        base::TaskPriority::USER_BLOCKING,
253        base::TaskShutdownBehavior::BLOCK_SHUTDOWN});
254 
255   prioritized_task_runner_ =
256       base::MakeRefCounted<net::PrioritizedTaskRunner>(kWorkerPoolTaskTraits);
257 
258   index_ = std::make_unique<SimpleIndex>(
259       base::SequencedTaskRunner::GetCurrentDefault(), cleanup_tracker_.get(),
260       this, GetCacheType(),
261       std::make_unique<SimpleIndexFile>(
262           index_task_runner, file_operations_factory_, GetCacheType(), path_));
263   index_->ExecuteWhenReady(
264       base::BindOnce(&RecordIndexLoad, GetCacheType(), base::TimeTicks::Now()));
265 
266   auto file_operations = file_operations_factory_->Create(index_task_runner);
267   index_task_runner->PostTaskAndReplyWithResult(
268       FROM_HERE,
269       base::BindOnce(&SimpleBackendImpl::InitCacheStructureOnDisk,
270                      std::move(file_operations), path_, orig_max_size_,
271                      GetCacheType()),
272       base::BindOnce(&SimpleBackendImpl::InitializeIndex, AsWeakPtr(),
273                      std::move(completion_callback)));
274 }
275 
SetMaxSize(int64_t max_bytes)276 bool SimpleBackendImpl::SetMaxSize(int64_t max_bytes) {
277   if (max_bytes < 0)
278     return false;
279   orig_max_size_ = max_bytes;
280   index_->SetMaxSize(max_bytes);
281   return true;
282 }
283 
MaxFileSize() const284 int64_t SimpleBackendImpl::MaxFileSize() const {
285   uint64_t file_size_ratio = GetCacheType() == net::GENERATED_NATIVE_CODE_CACHE
286                                  ? kMaxNativeCodeFileRatio
287                                  : kMaxFileRatio;
288   return std::max(
289       base::saturated_cast<int64_t>(index_->max_size() / file_size_ratio),
290       kMinFileSizeLimit);
291 }
292 
OnDoomStart(uint64_t entry_hash)293 scoped_refptr<SimplePostDoomWaiterTable> SimpleBackendImpl::OnDoomStart(
294     uint64_t entry_hash) {
295   post_doom_waiting_->OnDoomStart(entry_hash);
296   return post_doom_waiting_;
297 }
298 
DoomEntries(std::vector<uint64_t> * entry_hashes,net::CompletionOnceCallback callback)299 void SimpleBackendImpl::DoomEntries(std::vector<uint64_t>* entry_hashes,
300                                     net::CompletionOnceCallback callback) {
301   auto mass_doom_entry_hashes = std::make_unique<std::vector<uint64_t>>();
302   mass_doom_entry_hashes->swap(*entry_hashes);
303 
304   std::vector<uint64_t> to_doom_individually_hashes;
305 
306   // For each of the entry hashes, there are two cases:
307   // 1. There are corresponding entries in active set, pending doom, or both
308   //    sets, and so the hash should be doomed individually to avoid flakes.
309   // 2. The hash is not in active use at all, so we can call
310   //    SimpleSynchronousEntry::DeleteEntrySetFiles and delete the files en
311   //    masse.
312   for (int i = mass_doom_entry_hashes->size() - 1; i >= 0; --i) {
313     const uint64_t entry_hash = (*mass_doom_entry_hashes)[i];
314     if (!active_entries_.count(entry_hash) &&
315         !post_doom_waiting_->Has(entry_hash)) {
316       continue;
317     }
318 
319     to_doom_individually_hashes.push_back(entry_hash);
320 
321     (*mass_doom_entry_hashes)[i] = mass_doom_entry_hashes->back();
322     mass_doom_entry_hashes->resize(mass_doom_entry_hashes->size() - 1);
323   }
324 
325   base::RepeatingCallback<void(int)> barrier_callback =
326       MakeBarrierCompletionCallback(to_doom_individually_hashes.size() + 1,
327                                     std::move(callback));
328   for (std::vector<uint64_t>::const_iterator
329            it = to_doom_individually_hashes.begin(),
330            end = to_doom_individually_hashes.end();
331        it != end; ++it) {
332     const int doom_result = DoomEntryFromHash(*it, barrier_callback);
333     DCHECK_EQ(net::ERR_IO_PENDING, doom_result);
334     index_->Remove(*it);
335   }
336 
337   for (std::vector<uint64_t>::const_iterator
338            it = mass_doom_entry_hashes->begin(),
339            end = mass_doom_entry_hashes->end();
340        it != end; ++it) {
341     index_->Remove(*it);
342     OnDoomStart(*it);
343   }
344 
345   // Taking this pointer here avoids undefined behaviour from calling
346   // std::move() before mass_doom_entry_hashes.get().
347   std::vector<uint64_t>* mass_doom_entry_hashes_ptr =
348       mass_doom_entry_hashes.get();
349 
350   // We don't use priorities (i.e., `prioritized_task_runner_`) here because
351   // we don't actually have them here (since this is for eviction based on
352   // index).
353   auto task_runner =
354       base::ThreadPool::CreateSequencedTaskRunner(kWorkerPoolTaskTraits);
355   task_runner->PostTaskAndReplyWithResult(
356       FROM_HERE,
357       base::BindOnce(&SimpleSynchronousEntry::DeleteEntrySetFiles,
358                      mass_doom_entry_hashes_ptr, path_,
359                      file_operations_factory_->CreateUnbound()),
360       base::BindOnce(&SimpleBackendImpl::DoomEntriesComplete, AsWeakPtr(),
361                      std::move(mass_doom_entry_hashes), barrier_callback));
362 }
363 
GetEntryCount() const364 int32_t SimpleBackendImpl::GetEntryCount() const {
365   // TODO(pasko): Use directory file count when index is not ready.
366   return index_->GetEntryCount();
367 }
368 
OpenEntry(const std::string & key,net::RequestPriority request_priority,EntryResultCallback callback)369 EntryResult SimpleBackendImpl::OpenEntry(const std::string& key,
370                                          net::RequestPriority request_priority,
371                                          EntryResultCallback callback) {
372   const uint64_t entry_hash = simple_util::GetEntryHashKey(key);
373 
374   std::vector<SimplePostDoomWaiter>* post_doom = nullptr;
375   scoped_refptr<SimpleEntryImpl> simple_entry = CreateOrFindActiveOrDoomedEntry(
376       entry_hash, key, request_priority, &post_doom);
377   if (!simple_entry) {
378     if (post_doom->empty() &&
379         entry_operations_mode_ == SimpleEntryImpl::OPTIMISTIC_OPERATIONS) {
380       // The entry is doomed, and no other backend operations are queued for the
381       // entry, thus the open must fail and it's safe to return synchronously.
382       net::NetLogWithSource log_for_entry(net::NetLogWithSource::Make(
383           net_log_, net::NetLogSourceType::DISK_CACHE_ENTRY));
384       log_for_entry.AddEvent(
385           net::NetLogEventType::SIMPLE_CACHE_ENTRY_OPEN_CALL);
386       log_for_entry.AddEventWithNetErrorCode(
387           net::NetLogEventType::SIMPLE_CACHE_ENTRY_OPEN_END, net::ERR_FAILED);
388       return EntryResult::MakeError(net::ERR_FAILED);
389     }
390 
391     base::OnceCallback<EntryResult(EntryResultCallback)> operation =
392         base::BindOnce(&SimpleBackendImpl::OpenEntry, base::Unretained(this),
393                        key, request_priority);
394     post_doom->emplace_back(base::BindOnce(&RunEntryResultOperationAndCallback,
395                                            AsWeakPtr(), std::move(operation),
396                                            std::move(callback)));
397     return EntryResult::MakeError(net::ERR_IO_PENDING);
398   }
399   return simple_entry->OpenEntry(std::move(callback));
400 }
401 
CreateEntry(const std::string & key,net::RequestPriority request_priority,EntryResultCallback callback)402 EntryResult SimpleBackendImpl::CreateEntry(
403     const std::string& key,
404     net::RequestPriority request_priority,
405     EntryResultCallback callback) {
406   DCHECK_LT(0u, key.size());
407   const uint64_t entry_hash = simple_util::GetEntryHashKey(key);
408 
409   std::vector<SimplePostDoomWaiter>* post_doom = nullptr;
410   scoped_refptr<SimpleEntryImpl> simple_entry = CreateOrFindActiveOrDoomedEntry(
411       entry_hash, key, request_priority, &post_doom);
412 
413   // If couldn't grab an entry object due to pending doom, see if circumstances
414   // are right for an optimistic create.
415   if (!simple_entry) {
416     simple_entry = MaybeOptimisticCreateForPostDoom(
417         entry_hash, key, request_priority, post_doom);
418   }
419 
420   // If that doesn't work either, retry this once doom is done.
421   if (!simple_entry) {
422     base::OnceCallback<EntryResult(EntryResultCallback)> operation =
423         base::BindOnce(&SimpleBackendImpl::CreateEntry, base::Unretained(this),
424                        key, request_priority);
425     post_doom->emplace_back(base::BindOnce(&RunEntryResultOperationAndCallback,
426                                            AsWeakPtr(), std::move(operation),
427                                            std::move(callback)));
428     return EntryResult::MakeError(net::ERR_IO_PENDING);
429   }
430 
431   return simple_entry->CreateEntry(std::move(callback));
432 }
433 
OpenOrCreateEntry(const std::string & key,net::RequestPriority request_priority,EntryResultCallback callback)434 EntryResult SimpleBackendImpl::OpenOrCreateEntry(
435     const std::string& key,
436     net::RequestPriority request_priority,
437     EntryResultCallback callback) {
438   DCHECK_LT(0u, key.size());
439   const uint64_t entry_hash = simple_util::GetEntryHashKey(key);
440 
441   std::vector<SimplePostDoomWaiter>* post_doom = nullptr;
442   scoped_refptr<SimpleEntryImpl> simple_entry = CreateOrFindActiveOrDoomedEntry(
443       entry_hash, key, request_priority, &post_doom);
444 
445   // If couldn't grab an entry object due to pending doom, see if circumstances
446   // are right for an optimistic create.
447   if (!simple_entry) {
448     simple_entry = MaybeOptimisticCreateForPostDoom(
449         entry_hash, key, request_priority, post_doom);
450     if (simple_entry) {
451       return simple_entry->CreateEntry(std::move(callback));
452     } else {
453       // If that doesn't work either, retry this once doom is done.
454       base::OnceCallback<EntryResult(EntryResultCallback)> operation =
455           base::BindOnce(&SimpleBackendImpl::OpenOrCreateEntry,
456                          base::Unretained(this), key, request_priority);
457       post_doom->emplace_back(
458           base::BindOnce(&RunEntryResultOperationAndCallback, AsWeakPtr(),
459                          std::move(operation), std::move(callback)));
460       return EntryResult::MakeError(net::ERR_IO_PENDING);
461     }
462   }
463 
464   return simple_entry->OpenOrCreateEntry(std::move(callback));
465 }
466 
467 scoped_refptr<SimpleEntryImpl>
MaybeOptimisticCreateForPostDoom(uint64_t entry_hash,const std::string & key,net::RequestPriority request_priority,std::vector<SimplePostDoomWaiter> * post_doom)468 SimpleBackendImpl::MaybeOptimisticCreateForPostDoom(
469     uint64_t entry_hash,
470     const std::string& key,
471     net::RequestPriority request_priority,
472     std::vector<SimplePostDoomWaiter>* post_doom) {
473   scoped_refptr<SimpleEntryImpl> simple_entry;
474   // We would like to optimistically have create go ahead, for benefit of
475   // HTTP cache use. This can only be sanely done if we are the only op
476   // serialized after doom's completion.
477   if (post_doom->empty() &&
478       entry_operations_mode_ == SimpleEntryImpl::OPTIMISTIC_OPERATIONS) {
479     simple_entry = base::MakeRefCounted<SimpleEntryImpl>(
480         GetCacheType(), path_, cleanup_tracker_.get(), entry_hash,
481         entry_operations_mode_, this, file_tracker_, file_operations_factory_,
482         net_log_, GetNewEntryPriority(request_priority));
483     simple_entry->SetKey(key);
484     simple_entry->SetActiveEntryProxy(
485         ActiveEntryProxy::Create(entry_hash, this));
486     simple_entry->SetCreatePendingDoom();
487     std::pair<EntryMap::iterator, bool> insert_result = active_entries_.insert(
488         EntryMap::value_type(entry_hash, simple_entry.get()));
489     post_doom->emplace_back(base::BindOnce(
490         &SimpleEntryImpl::NotifyDoomBeforeCreateComplete, simple_entry));
491     DCHECK(insert_result.second);
492   }
493 
494   return simple_entry;
495 }
496 
DoomEntry(const std::string & key,net::RequestPriority priority,CompletionOnceCallback callback)497 net::Error SimpleBackendImpl::DoomEntry(const std::string& key,
498                                         net::RequestPriority priority,
499                                         CompletionOnceCallback callback) {
500   const uint64_t entry_hash = simple_util::GetEntryHashKey(key);
501 
502   std::vector<SimplePostDoomWaiter>* post_doom = nullptr;
503   scoped_refptr<SimpleEntryImpl> simple_entry =
504       CreateOrFindActiveOrDoomedEntry(entry_hash, key, priority, &post_doom);
505   if (!simple_entry) {
506     // At first glance, it appears exceedingly silly to queue up a doom
507     // when we get here because the files corresponding to our key are being
508     // deleted... but it's possible that one of the things in post_doom is a
509     // create for our key, in which case we still have work to do.
510     base::OnceCallback<net::Error(CompletionOnceCallback)> operation =
511         base::BindOnce(&SimpleBackendImpl::DoomEntry, base::Unretained(this),
512                        key, priority);
513     post_doom->emplace_back(base::BindOnce(&RunOperationAndCallback,
514                                            AsWeakPtr(), std::move(operation),
515                                            std::move(callback)));
516     return net::ERR_IO_PENDING;
517   }
518 
519   return simple_entry->DoomEntry(std::move(callback));
520 }
521 
DoomAllEntries(CompletionOnceCallback callback)522 net::Error SimpleBackendImpl::DoomAllEntries(CompletionOnceCallback callback) {
523   return DoomEntriesBetween(Time(), Time(), std::move(callback));
524 }
525 
DoomEntriesBetween(const Time initial_time,const Time end_time,CompletionOnceCallback callback)526 net::Error SimpleBackendImpl::DoomEntriesBetween(
527     const Time initial_time,
528     const Time end_time,
529     CompletionOnceCallback callback) {
530   index_->ExecuteWhenReady(base::BindOnce(&SimpleBackendImpl::IndexReadyForDoom,
531                                           AsWeakPtr(), initial_time, end_time,
532                                           std::move(callback)));
533   return net::ERR_IO_PENDING;
534 }
535 
DoomEntriesSince(const Time initial_time,CompletionOnceCallback callback)536 net::Error SimpleBackendImpl::DoomEntriesSince(
537     const Time initial_time,
538     CompletionOnceCallback callback) {
539   return DoomEntriesBetween(initial_time, Time(), std::move(callback));
540 }
541 
CalculateSizeOfAllEntries(Int64CompletionOnceCallback callback)542 int64_t SimpleBackendImpl::CalculateSizeOfAllEntries(
543     Int64CompletionOnceCallback callback) {
544   index_->ExecuteWhenReady(
545       base::BindOnce(&SimpleBackendImpl::IndexReadyForSizeCalculation,
546                      AsWeakPtr(), std::move(callback)));
547   return net::ERR_IO_PENDING;
548 }
549 
CalculateSizeOfEntriesBetween(base::Time initial_time,base::Time end_time,Int64CompletionOnceCallback callback)550 int64_t SimpleBackendImpl::CalculateSizeOfEntriesBetween(
551     base::Time initial_time,
552     base::Time end_time,
553     Int64CompletionOnceCallback callback) {
554   index_->ExecuteWhenReady(
555       base::BindOnce(&SimpleBackendImpl::IndexReadyForSizeBetweenCalculation,
556                      AsWeakPtr(), initial_time, end_time, std::move(callback)));
557   return net::ERR_IO_PENDING;
558 }
559 
560 class SimpleBackendImpl::SimpleIterator final : public Iterator {
561  public:
SimpleIterator(base::WeakPtr<SimpleBackendImpl> backend)562   explicit SimpleIterator(base::WeakPtr<SimpleBackendImpl> backend)
563       : backend_(backend) {}
564 
565   // From Backend::Iterator:
OpenNextEntry(EntryResultCallback callback)566   EntryResult OpenNextEntry(EntryResultCallback callback) override {
567     if (!backend_)
568       return EntryResult::MakeError(net::ERR_FAILED);
569     CompletionOnceCallback open_next_entry_impl =
570         base::BindOnce(&SimpleIterator::OpenNextEntryImpl,
571                        weak_factory_.GetWeakPtr(), std::move(callback));
572     backend_->index_->ExecuteWhenReady(std::move(open_next_entry_impl));
573     return EntryResult::MakeError(net::ERR_IO_PENDING);
574   }
575 
OpenNextEntryImpl(EntryResultCallback callback,int index_initialization_error_code)576   void OpenNextEntryImpl(EntryResultCallback callback,
577                          int index_initialization_error_code) {
578     if (!backend_) {
579       std::move(callback).Run(EntryResult::MakeError(net::ERR_FAILED));
580       return;
581     }
582     if (index_initialization_error_code != net::OK) {
583       std::move(callback).Run(EntryResult::MakeError(
584           static_cast<net::Error>(index_initialization_error_code)));
585       return;
586     }
587     if (!hashes_to_enumerate_)
588       hashes_to_enumerate_ = backend_->index()->GetAllHashes();
589 
590     while (!hashes_to_enumerate_->empty()) {
591       uint64_t entry_hash = hashes_to_enumerate_->back();
592       hashes_to_enumerate_->pop_back();
593       if (backend_->index()->Has(entry_hash)) {
594         auto split_callback = base::SplitOnceCallback(std::move(callback));
595         callback = std::move(split_callback.first);
596         EntryResultCallback continue_iteration = base::BindOnce(
597             &SimpleIterator::CheckIterationReturnValue,
598             weak_factory_.GetWeakPtr(), std::move(split_callback.second));
599         EntryResult open_result = backend_->OpenEntryFromHash(
600             entry_hash, std::move(continue_iteration));
601         if (open_result.net_error() == net::ERR_IO_PENDING)
602           return;
603         if (open_result.net_error() != net::ERR_FAILED) {
604           std::move(callback).Run(std::move(open_result));
605           return;
606         }
607       }
608     }
609     std::move(callback).Run(EntryResult::MakeError(net::ERR_FAILED));
610   }
611 
CheckIterationReturnValue(EntryResultCallback callback,EntryResult result)612   void CheckIterationReturnValue(EntryResultCallback callback,
613                                  EntryResult result) {
614     if (result.net_error() == net::ERR_FAILED) {
615       OpenNextEntry(std::move(callback));
616       return;
617     }
618     std::move(callback).Run(std::move(result));
619   }
620 
621  private:
622   base::WeakPtr<SimpleBackendImpl> backend_;
623   std::unique_ptr<std::vector<uint64_t>> hashes_to_enumerate_;
624   base::WeakPtrFactory<SimpleIterator> weak_factory_{this};
625 };
626 
CreateIterator()627 std::unique_ptr<Backend::Iterator> SimpleBackendImpl::CreateIterator() {
628   return std::make_unique<SimpleIterator>(AsWeakPtr());
629 }
630 
GetStats(base::StringPairs * stats)631 void SimpleBackendImpl::GetStats(base::StringPairs* stats) {
632   std::pair<std::string, std::string> item;
633   item.first = "Cache type";
634   item.second = "Simple Cache";
635   stats->push_back(item);
636 }
637 
OnExternalCacheHit(const std::string & key)638 void SimpleBackendImpl::OnExternalCacheHit(const std::string& key) {
639   index_->UseIfExists(simple_util::GetEntryHashKey(key));
640 }
641 
GetEntryInMemoryData(const std::string & key)642 uint8_t SimpleBackendImpl::GetEntryInMemoryData(const std::string& key) {
643   const uint64_t entry_hash = simple_util::GetEntryHashKey(key);
644   return index_->GetEntryInMemoryData(entry_hash);
645 }
646 
SetEntryInMemoryData(const std::string & key,uint8_t data)647 void SimpleBackendImpl::SetEntryInMemoryData(const std::string& key,
648                                              uint8_t data) {
649   const uint64_t entry_hash = simple_util::GetEntryHashKey(key);
650   index_->SetEntryInMemoryData(entry_hash, data);
651 }
652 
InitializeIndex(CompletionOnceCallback callback,const DiskStatResult & result)653 void SimpleBackendImpl::InitializeIndex(CompletionOnceCallback callback,
654                                         const DiskStatResult& result) {
655   if (result.net_error == net::OK) {
656     index_->SetMaxSize(result.max_size);
657 #if BUILDFLAG(IS_ANDROID)
658     if (app_status_listener_)
659       index_->set_app_status_listener(app_status_listener_);
660 #endif
661     index_->Initialize(result.cache_dir_mtime);
662   }
663   std::move(callback).Run(result.net_error);
664 }
665 
IndexReadyForDoom(Time initial_time,Time end_time,CompletionOnceCallback callback,int result)666 void SimpleBackendImpl::IndexReadyForDoom(Time initial_time,
667                                           Time end_time,
668                                           CompletionOnceCallback callback,
669                                           int result) {
670   if (result != net::OK) {
671     std::move(callback).Run(result);
672     return;
673   }
674   std::unique_ptr<std::vector<uint64_t>> removed_key_hashes(
675       index_->GetEntriesBetween(initial_time, end_time).release());
676   DoomEntries(removed_key_hashes.get(), std::move(callback));
677 }
678 
IndexReadyForSizeCalculation(Int64CompletionOnceCallback callback,int result)679 void SimpleBackendImpl::IndexReadyForSizeCalculation(
680     Int64CompletionOnceCallback callback,
681     int result) {
682   int64_t rv = result == net::OK ? index_->GetCacheSize() : result;
683   std::move(callback).Run(rv);
684 }
685 
IndexReadyForSizeBetweenCalculation(base::Time initial_time,base::Time end_time,Int64CompletionOnceCallback callback,int result)686 void SimpleBackendImpl::IndexReadyForSizeBetweenCalculation(
687     base::Time initial_time,
688     base::Time end_time,
689     Int64CompletionOnceCallback callback,
690     int result) {
691   int64_t rv = result == net::OK
692                    ? index_->GetCacheSizeBetween(initial_time, end_time)
693                    : result;
694   std::move(callback).Run(rv);
695 }
696 
697 // static
InitCacheStructureOnDisk(std::unique_ptr<BackendFileOperations> file_operations,const base::FilePath & path,uint64_t suggested_max_size,net::CacheType cache_type)698 SimpleBackendImpl::DiskStatResult SimpleBackendImpl::InitCacheStructureOnDisk(
699     std::unique_ptr<BackendFileOperations> file_operations,
700     const base::FilePath& path,
701     uint64_t suggested_max_size,
702     net::CacheType cache_type) {
703   DiskStatResult result;
704   result.max_size = suggested_max_size;
705   result.net_error = net::OK;
706   SimpleCacheConsistencyResult consistency =
707       FileStructureConsistent(file_operations.get(), path);
708   SIMPLE_CACHE_UMA(ENUMERATION, "ConsistencyResult", cache_type, consistency);
709 
710   // If the cache structure is inconsistent make a single attempt at
711   // recovering it.  Previously there were bugs that could cause a partially
712   // written fake index file to be left in an otherwise empty cache.  In
713   // that case we can delete the index files and start over.  Also, some
714   // consistency failures may leave an empty directory directly and we can
715   // retry those cases as well.
716   if (consistency != SimpleCacheConsistencyResult::kOK) {
717     bool deleted_files = disk_cache::DeleteIndexFilesIfCacheIsEmpty(path);
718     SIMPLE_CACHE_UMA(BOOLEAN, "DidDeleteIndexFilesAfterFailedConsistency",
719                      cache_type, deleted_files);
720     if (base::IsDirectoryEmpty(path)) {
721       SimpleCacheConsistencyResult orig_consistency = consistency;
722       consistency = FileStructureConsistent(file_operations.get(), path);
723       SIMPLE_CACHE_UMA(ENUMERATION, "RetryConsistencyResult", cache_type,
724                        consistency);
725       if (consistency == SimpleCacheConsistencyResult::kOK) {
726         SIMPLE_CACHE_UMA(ENUMERATION,
727                          "OriginalConsistencyResultBeforeSuccessfulRetry",
728                          cache_type, orig_consistency);
729       }
730     }
731     if (deleted_files) {
732       SIMPLE_CACHE_UMA(ENUMERATION, "ConsistencyResultAfterIndexFilesDeleted",
733                        cache_type, consistency);
734     }
735   }
736 
737   if (consistency != SimpleCacheConsistencyResult::kOK) {
738     LOG(ERROR) << "Simple Cache Backend: wrong file structure on disk: "
739                << static_cast<int>(consistency)
740                << " path: " << path.LossyDisplayName();
741     result.net_error = net::ERR_FAILED;
742   } else {
743     absl::optional<base::File::Info> file_info =
744         file_operations->GetFileInfo(path);
745     if (!file_info.has_value()) {
746       // Something deleted the directory between when we set it up and the
747       // mstat; this is not uncommon on some test fixtures which erase their
748       // tempdir while some worker threads may still be running.
749       LOG(ERROR) << "Simple Cache Backend: cache directory inaccessible right "
750                     "after creation; path: "
751                  << path.LossyDisplayName();
752       result.net_error = net::ERR_FAILED;
753     } else {
754       result.cache_dir_mtime = file_info->last_modified;
755       if (!result.max_size) {
756         int64_t available = base::SysInfo::AmountOfFreeDiskSpace(path);
757         result.max_size = disk_cache::PreferredCacheSize(available, cache_type);
758         DCHECK(result.max_size);
759       }
760     }
761   }
762   return result;
763 }
764 
765 scoped_refptr<SimpleEntryImpl>
CreateOrFindActiveOrDoomedEntry(const uint64_t entry_hash,const std::string & key,net::RequestPriority request_priority,std::vector<SimplePostDoomWaiter> ** post_doom)766 SimpleBackendImpl::CreateOrFindActiveOrDoomedEntry(
767     const uint64_t entry_hash,
768     const std::string& key,
769     net::RequestPriority request_priority,
770     std::vector<SimplePostDoomWaiter>** post_doom) {
771   DCHECK_EQ(entry_hash, simple_util::GetEntryHashKey(key));
772 
773   // If there is a doom pending, we would want to serialize after it.
774   *post_doom = post_doom_waiting_->Find(entry_hash);
775   if (*post_doom)
776     return nullptr;
777 
778   std::pair<EntryMap::iterator, bool> insert_result =
779       active_entries_.insert(EntryMap::value_type(entry_hash, nullptr));
780   EntryMap::iterator& it = insert_result.first;
781   const bool did_insert = insert_result.second;
782   if (did_insert) {
783     SimpleEntryImpl* entry = it->second = new SimpleEntryImpl(
784         GetCacheType(), path_, cleanup_tracker_.get(), entry_hash,
785         entry_operations_mode_, this, file_tracker_, file_operations_factory_,
786         net_log_, GetNewEntryPriority(request_priority));
787     entry->SetKey(key);
788     entry->SetActiveEntryProxy(ActiveEntryProxy::Create(entry_hash, this));
789   }
790   // TODO(jkarlin): In case of recycling a half-closed entry, we might want to
791   // update its priority.
792   DCHECK(it->second);
793   // It's possible, but unlikely, that we have an entry hash collision with a
794   // currently active entry.
795   if (key != it->second->key()) {
796     it->second->Doom();
797     DCHECK_EQ(0U, active_entries_.count(entry_hash));
798     DCHECK(post_doom_waiting_->Has(entry_hash));
799     // Re-run ourselves to handle the now-pending doom.
800     return CreateOrFindActiveOrDoomedEntry(entry_hash, key, request_priority,
801                                            post_doom);
802   }
803   return base::WrapRefCounted(it->second);
804 }
805 
OpenEntryFromHash(uint64_t entry_hash,EntryResultCallback callback)806 EntryResult SimpleBackendImpl::OpenEntryFromHash(uint64_t entry_hash,
807                                                  EntryResultCallback callback) {
808   std::vector<SimplePostDoomWaiter>* post_doom =
809       post_doom_waiting_->Find(entry_hash);
810   if (post_doom) {
811     base::OnceCallback<EntryResult(EntryResultCallback)> operation =
812         base::BindOnce(&SimpleBackendImpl::OpenEntryFromHash,
813                        base::Unretained(this), entry_hash);
814     // TODO(https://crbug.com/1019682) The cancellation behavior looks wrong.
815     post_doom->emplace_back(base::BindOnce(&RunEntryResultOperationAndCallback,
816                                            AsWeakPtr(), std::move(operation),
817                                            std::move(callback)));
818     return EntryResult::MakeError(net::ERR_IO_PENDING);
819   }
820 
821   auto has_active = active_entries_.find(entry_hash);
822   if (has_active != active_entries_.end()) {
823     return OpenEntry(has_active->second->key(), net::HIGHEST,
824                      std::move(callback));
825   }
826 
827   auto simple_entry = base::MakeRefCounted<SimpleEntryImpl>(
828       GetCacheType(), path_, cleanup_tracker_.get(), entry_hash,
829       entry_operations_mode_, this, file_tracker_, file_operations_factory_,
830       net_log_, GetNewEntryPriority(net::HIGHEST));
831   EntryResultCallback backend_callback =
832       base::BindOnce(&SimpleBackendImpl::OnEntryOpenedFromHash, AsWeakPtr(),
833                      entry_hash, simple_entry, std::move(callback));
834   return simple_entry->OpenEntry(std::move(backend_callback));
835 }
836 
DoomEntryFromHash(uint64_t entry_hash,CompletionOnceCallback callback)837 net::Error SimpleBackendImpl::DoomEntryFromHash(
838     uint64_t entry_hash,
839     CompletionOnceCallback callback) {
840   std::vector<SimplePostDoomWaiter>* post_doom =
841       post_doom_waiting_->Find(entry_hash);
842   if (post_doom) {
843     base::OnceCallback<net::Error(CompletionOnceCallback)> operation =
844         base::BindOnce(&SimpleBackendImpl::DoomEntryFromHash,
845                        base::Unretained(this), entry_hash);
846     post_doom->emplace_back(base::BindOnce(&RunOperationAndCallback,
847                                            AsWeakPtr(), std::move(operation),
848                                            std::move(callback)));
849     return net::ERR_IO_PENDING;
850   }
851 
852   auto active_it = active_entries_.find(entry_hash);
853   if (active_it != active_entries_.end())
854     return active_it->second->DoomEntry(std::move(callback));
855 
856   // There's no pending dooms, nor any open entry. We can make a trivial
857   // call to DoomEntries() to delete this entry.
858   std::vector<uint64_t> entry_hash_vector;
859   entry_hash_vector.push_back(entry_hash);
860   DoomEntries(&entry_hash_vector, std::move(callback));
861   return net::ERR_IO_PENDING;
862 }
863 
OnEntryOpenedFromHash(uint64_t hash,const scoped_refptr<SimpleEntryImpl> & simple_entry,EntryResultCallback callback,EntryResult result)864 void SimpleBackendImpl::OnEntryOpenedFromHash(
865     uint64_t hash,
866     const scoped_refptr<SimpleEntryImpl>& simple_entry,
867     EntryResultCallback callback,
868     EntryResult result) {
869   if (result.net_error() != net::OK) {
870     std::move(callback).Run(std::move(result));
871     return;
872   }
873 
874   std::pair<EntryMap::iterator, bool> insert_result =
875       active_entries_.insert(EntryMap::value_type(hash, simple_entry.get()));
876   EntryMap::iterator& it = insert_result.first;
877   const bool did_insert = insert_result.second;
878   if (did_insert) {
879     // There was no active entry corresponding to this hash. We've already put
880     // the entry opened from hash in the |active_entries_|. We now provide the
881     // proxy object to the entry.
882     it->second->SetActiveEntryProxy(ActiveEntryProxy::Create(hash, this));
883     std::move(callback).Run(std::move(result));
884   } else {
885     // The entry was made active while we waiting for the open from hash to
886     // finish. The entry created from hash needs to be closed, and the one
887     // in |active_entries_| can be returned to the caller.
888     Entry* entry_from_result = result.ReleaseEntry();
889     DCHECK_EQ(entry_from_result, simple_entry.get());
890     simple_entry->Close();
891     EntryResult reopen_result = it->second->OpenEntry(std::move(callback));
892     DCHECK_EQ(reopen_result.net_error(), net::ERR_IO_PENDING);
893   }
894 }
895 
DoomEntriesComplete(std::unique_ptr<std::vector<uint64_t>> entry_hashes,CompletionOnceCallback callback,int result)896 void SimpleBackendImpl::DoomEntriesComplete(
897     std::unique_ptr<std::vector<uint64_t>> entry_hashes,
898     CompletionOnceCallback callback,
899     int result) {
900   for (const uint64_t& entry_hash : *entry_hashes)
901     post_doom_waiting_->OnDoomComplete(entry_hash);
902   std::move(callback).Run(result);
903 }
904 
GetNewEntryPriority(net::RequestPriority request_priority)905 uint32_t SimpleBackendImpl::GetNewEntryPriority(
906     net::RequestPriority request_priority) {
907   // Lower priority is better, so give high network priority the least bump.
908   return ((net::RequestPriority::MAXIMUM_PRIORITY - request_priority) * 10000) +
909          entry_count_++;
910 }
911 
912 }  // namespace disk_cache
913