1 // Copyright 2017 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 // Internal helper used to sequence cleanup and reuse of cache directories
6 // among different objects.
7
8 #include "net/disk_cache/backend_cleanup_tracker.h"
9
10 #include <unordered_map>
11 #include <utility>
12
13 #include "base/files/file_path.h"
14 #include "base/functional/callback.h"
15 #include "base/lazy_instance.h"
16 #include "base/memory/raw_ptr.h"
17 #include "base/memory/ref_counted.h"
18 #include "base/synchronization/lock.h"
19 #include "base/task/sequenced_task_runner.h"
20
21 namespace disk_cache {
22
23 namespace {
24
25 using TrackerMap =
26 std::unordered_map<base::FilePath,
27 raw_ptr<BackendCleanupTracker, CtnExperimental>>;
28 struct AllBackendCleanupTrackers {
29 TrackerMap map;
30
31 // Since clients can potentially call CreateCacheBackend from multiple
32 // threads, we need to lock the map keeping track of cleanup trackers
33 // for these backends. Our overall strategy is to have TryCreate
34 // acts as an arbitrator --- whatever thread grabs one, gets to operate
35 // on the tracker freely until it gets destroyed.
36 base::Lock lock;
37 };
38
39 static base::LazyInstance<AllBackendCleanupTrackers>::Leaky g_all_trackers;
40
41 } // namespace.
42
43 // static
TryCreate(const base::FilePath & path,base::OnceClosure retry_closure)44 scoped_refptr<BackendCleanupTracker> BackendCleanupTracker::TryCreate(
45 const base::FilePath& path,
46 base::OnceClosure retry_closure) {
47 AllBackendCleanupTrackers* all_trackers = g_all_trackers.Pointer();
48 base::AutoLock lock(all_trackers->lock);
49
50 std::pair<TrackerMap::iterator, bool> insert_result =
51 all_trackers->map.insert(
52 std::pair<base::FilePath, BackendCleanupTracker*>(path, nullptr));
53 if (insert_result.second) {
54 auto tracker = base::WrapRefCounted(new BackendCleanupTracker(path));
55 insert_result.first->second = tracker.get();
56 return tracker;
57 } else {
58 insert_result.first->second->AddPostCleanupCallbackImpl(
59 std::move(retry_closure));
60 return nullptr;
61 }
62 }
63
AddPostCleanupCallback(base::OnceClosure cb)64 void BackendCleanupTracker::AddPostCleanupCallback(base::OnceClosure cb) {
65 DCHECK_CALLED_ON_VALID_SEQUENCE(seq_checker_);
66 // Despite the sequencing requirement we need to grab the table lock since
67 // this may otherwise race against TryMakeContext.
68 base::AutoLock lock(g_all_trackers.Get().lock);
69 AddPostCleanupCallbackImpl(std::move(cb));
70 }
71
AddPostCleanupCallbackImpl(base::OnceClosure cb)72 void BackendCleanupTracker::AddPostCleanupCallbackImpl(base::OnceClosure cb) {
73 post_cleanup_cbs_.emplace_back(base::SequencedTaskRunner::GetCurrentDefault(),
74 std::move(cb));
75 }
76
BackendCleanupTracker(const base::FilePath & path)77 BackendCleanupTracker::BackendCleanupTracker(const base::FilePath& path)
78 : path_(path) {}
79
~BackendCleanupTracker()80 BackendCleanupTracker::~BackendCleanupTracker() {
81 DCHECK_CALLED_ON_VALID_SEQUENCE(seq_checker_);
82
83 {
84 AllBackendCleanupTrackers* all_trackers = g_all_trackers.Pointer();
85 base::AutoLock lock(all_trackers->lock);
86 int rv = all_trackers->map.erase(path_);
87 DCHECK_EQ(1, rv);
88 }
89
90 while (!post_cleanup_cbs_.empty()) {
91 post_cleanup_cbs_.back().first->PostTask(
92 FROM_HERE, std::move(post_cleanup_cbs_.back().second));
93 post_cleanup_cbs_.pop_back();
94 }
95 }
96
97 } // namespace disk_cache
98