• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2012 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "net/disk_cache/memory/mem_backend_impl.h"
6 
7 #include <algorithm>
8 #include <functional>
9 #include <memory>
10 #include <utility>
11 
12 #include "base/functional/bind.h"
13 #include "base/logging.h"
14 #include "base/system/sys_info.h"
15 #include "base/task/sequenced_task_runner.h"
16 #include "base/time/clock.h"
17 #include "net/base/net_errors.h"
18 #include "net/disk_cache/cache_util.h"
19 #include "net/disk_cache/memory/mem_entry_impl.h"
20 
21 using base::Time;
22 
23 namespace disk_cache {
24 
25 namespace {
26 
27 const int kDefaultInMemoryCacheSize = 10 * 1024 * 1024;
28 const int kDefaultEvictionSize = kDefaultInMemoryCacheSize / 10;
29 
30 // Returns the next entry after |node| in |lru_list| that's not a child
31 // of |node|.  This is useful when dooming, since dooming a parent entry
32 // will also doom its children.
NextSkippingChildren(const base::LinkedList<MemEntryImpl> & lru_list,base::LinkNode<MemEntryImpl> * node)33 base::LinkNode<MemEntryImpl>* NextSkippingChildren(
34     const base::LinkedList<MemEntryImpl>& lru_list,
35     base::LinkNode<MemEntryImpl>* node) {
36   MemEntryImpl* cur = node->value();
37   do {
38     node = node->next();
39   } while (node != lru_list.end() && node->value()->parent() == cur);
40   return node;
41 }
42 
43 }  // namespace
44 
MemBackendImpl(net::NetLog * net_log)45 MemBackendImpl::MemBackendImpl(net::NetLog* net_log)
46     : Backend(net::MEMORY_CACHE),
47       net_log_(net_log),
48       memory_pressure_listener_(
49           FROM_HERE,
50           base::BindRepeating(&MemBackendImpl::OnMemoryPressure,
51                               base::Unretained(this))) {}
52 
~MemBackendImpl()53 MemBackendImpl::~MemBackendImpl() {
54   while (!entries_.empty())
55     entries_.begin()->second->Doom();
56 
57   if (!post_cleanup_callback_.is_null())
58     base::SequencedTaskRunner::GetCurrentDefault()->PostTask(
59         FROM_HERE, std::move(post_cleanup_callback_));
60 }
61 
62 // static
CreateBackend(int64_t max_bytes,net::NetLog * net_log)63 std::unique_ptr<MemBackendImpl> MemBackendImpl::CreateBackend(
64     int64_t max_bytes,
65     net::NetLog* net_log) {
66   std::unique_ptr<MemBackendImpl> cache(
67       std::make_unique<MemBackendImpl>(net_log));
68   if (cache->SetMaxSize(max_bytes) && cache->Init())
69     return cache;
70 
71   LOG(ERROR) << "Unable to create cache";
72   return nullptr;
73 }
74 
Init()75 bool MemBackendImpl::Init() {
76   if (max_size_)
77     return true;
78 
79   uint64_t total_memory = base::SysInfo::AmountOfPhysicalMemory();
80 
81   if (total_memory == 0) {
82     max_size_ = kDefaultInMemoryCacheSize;
83     return true;
84   }
85 
86   // We want to use up to 2% of the computer's memory, with a limit of 50 MB,
87   // reached on system with more than 2.5 GB of RAM.
88   total_memory = total_memory * 2 / 100;
89   if (total_memory > static_cast<uint64_t>(kDefaultInMemoryCacheSize) * 5)
90     max_size_ = kDefaultInMemoryCacheSize * 5;
91   else
92     max_size_ = static_cast<int32_t>(total_memory);
93 
94   return true;
95 }
96 
SetMaxSize(int64_t max_bytes)97 bool MemBackendImpl::SetMaxSize(int64_t max_bytes) {
98   if (max_bytes < 0 || max_bytes > std::numeric_limits<int>::max())
99     return false;
100 
101   // Zero size means use the default.
102   if (!max_bytes)
103     return true;
104 
105   max_size_ = max_bytes;
106   return true;
107 }
108 
MaxFileSize() const109 int64_t MemBackendImpl::MaxFileSize() const {
110   return max_size_ / 8;
111 }
112 
OnEntryInserted(MemEntryImpl * entry)113 void MemBackendImpl::OnEntryInserted(MemEntryImpl* entry) {
114   lru_list_.Append(entry);
115 }
116 
OnEntryUpdated(MemEntryImpl * entry)117 void MemBackendImpl::OnEntryUpdated(MemEntryImpl* entry) {
118   // LinkedList<>::RemoveFromList() removes |entry| from |lru_list_|.
119   entry->RemoveFromList();
120   lru_list_.Append(entry);
121 }
122 
OnEntryDoomed(MemEntryImpl * entry)123 void MemBackendImpl::OnEntryDoomed(MemEntryImpl* entry) {
124   if (entry->type() == MemEntryImpl::EntryType::kParent)
125     entries_.erase(entry->key());
126   // LinkedList<>::RemoveFromList() removes |entry| from |lru_list_|.
127   entry->RemoveFromList();
128 }
129 
ModifyStorageSize(int32_t delta)130 void MemBackendImpl::ModifyStorageSize(int32_t delta) {
131   current_size_ += delta;
132   if (delta > 0)
133     EvictIfNeeded();
134 }
135 
HasExceededStorageSize() const136 bool MemBackendImpl::HasExceededStorageSize() const {
137   return current_size_ > max_size_;
138 }
139 
SetPostCleanupCallback(base::OnceClosure cb)140 void MemBackendImpl::SetPostCleanupCallback(base::OnceClosure cb) {
141   DCHECK(post_cleanup_callback_.is_null());
142   post_cleanup_callback_ = std::move(cb);
143 }
144 
145 // static
Now(const base::WeakPtr<MemBackendImpl> & self)146 base::Time MemBackendImpl::Now(const base::WeakPtr<MemBackendImpl>& self) {
147   MemBackendImpl* instance = self.get();
148   if (instance && instance->custom_clock_for_testing_)
149     return instance->custom_clock_for_testing_->Now();
150   return Time::Now();
151 }
152 
SetClockForTesting(base::Clock * clock)153 void MemBackendImpl::SetClockForTesting(base::Clock* clock) {
154   custom_clock_for_testing_ = clock;
155 }
156 
GetEntryCount() const157 int32_t MemBackendImpl::GetEntryCount() const {
158   return static_cast<int32_t>(entries_.size());
159 }
160 
OpenOrCreateEntry(const std::string & key,net::RequestPriority priority,EntryResultCallback callback)161 EntryResult MemBackendImpl::OpenOrCreateEntry(const std::string& key,
162                                               net::RequestPriority priority,
163                                               EntryResultCallback callback) {
164   EntryResult result = OpenEntry(key, priority, EntryResultCallback());
165   if (result.net_error() == net::OK)
166     return result;
167 
168   // Key was not opened, try creating it instead.
169   return CreateEntry(key, priority, EntryResultCallback());
170 }
171 
OpenEntry(const std::string & key,net::RequestPriority request_priority,EntryResultCallback callback)172 EntryResult MemBackendImpl::OpenEntry(const std::string& key,
173                                       net::RequestPriority request_priority,
174                                       EntryResultCallback callback) {
175   auto it = entries_.find(key);
176   if (it == entries_.end())
177     return EntryResult::MakeError(net::ERR_FAILED);
178 
179   it->second->Open();
180 
181   return EntryResult::MakeOpened(it->second);
182 }
183 
CreateEntry(const std::string & key,net::RequestPriority request_priority,EntryResultCallback callback)184 EntryResult MemBackendImpl::CreateEntry(const std::string& key,
185                                         net::RequestPriority request_priority,
186                                         EntryResultCallback callback) {
187   std::pair<EntryMap::iterator, bool> create_result =
188       entries_.insert(EntryMap::value_type(key, nullptr));
189   const bool did_insert = create_result.second;
190   if (!did_insert)
191     return EntryResult::MakeError(net::ERR_FAILED);
192 
193   MemEntryImpl* cache_entry =
194       new MemEntryImpl(weak_factory_.GetWeakPtr(), key, net_log_);
195   create_result.first->second = cache_entry;
196   return EntryResult::MakeCreated(cache_entry);
197 }
198 
DoomEntry(const std::string & key,net::RequestPriority priority,CompletionOnceCallback callback)199 net::Error MemBackendImpl::DoomEntry(const std::string& key,
200                                      net::RequestPriority priority,
201                                      CompletionOnceCallback callback) {
202   auto it = entries_.find(key);
203   if (it == entries_.end())
204     return net::ERR_FAILED;
205 
206   it->second->Doom();
207   return net::OK;
208 }
209 
DoomAllEntries(CompletionOnceCallback callback)210 net::Error MemBackendImpl::DoomAllEntries(CompletionOnceCallback callback) {
211   return DoomEntriesBetween(Time(), Time(), std::move(callback));
212 }
213 
DoomEntriesBetween(Time initial_time,Time end_time,CompletionOnceCallback callback)214 net::Error MemBackendImpl::DoomEntriesBetween(Time initial_time,
215                                               Time end_time,
216                                               CompletionOnceCallback callback) {
217   if (end_time.is_null())
218     end_time = Time::Max();
219   DCHECK_GE(end_time, initial_time);
220 
221   base::LinkNode<MemEntryImpl>* node = lru_list_.head();
222   while (node != lru_list_.end()) {
223     MemEntryImpl* candidate = node->value();
224     node = NextSkippingChildren(lru_list_, node);
225 
226     if (candidate->GetLastUsed() >= initial_time &&
227         candidate->GetLastUsed() < end_time) {
228       candidate->Doom();
229     }
230   }
231 
232   return net::OK;
233 }
234 
DoomEntriesSince(Time initial_time,CompletionOnceCallback callback)235 net::Error MemBackendImpl::DoomEntriesSince(Time initial_time,
236                                             CompletionOnceCallback callback) {
237   return DoomEntriesBetween(initial_time, Time::Max(), std::move(callback));
238 }
239 
CalculateSizeOfAllEntries(Int64CompletionOnceCallback callback)240 int64_t MemBackendImpl::CalculateSizeOfAllEntries(
241     Int64CompletionOnceCallback callback) {
242   return current_size_;
243 }
244 
CalculateSizeOfEntriesBetween(base::Time initial_time,base::Time end_time,Int64CompletionOnceCallback callback)245 int64_t MemBackendImpl::CalculateSizeOfEntriesBetween(
246     base::Time initial_time,
247     base::Time end_time,
248     Int64CompletionOnceCallback callback) {
249   if (end_time.is_null())
250     end_time = Time::Max();
251   DCHECK_GE(end_time, initial_time);
252 
253   int size = 0;
254   base::LinkNode<MemEntryImpl>* node = lru_list_.head();
255   while (node != lru_list_.end()) {
256     MemEntryImpl* entry = node->value();
257     if (entry->GetLastUsed() >= initial_time &&
258         entry->GetLastUsed() < end_time) {
259       size += entry->GetStorageSize();
260     }
261     node = node->next();
262   }
263   return size;
264 }
265 
266 class MemBackendImpl::MemIterator final : public Backend::Iterator {
267  public:
MemIterator(base::WeakPtr<MemBackendImpl> backend)268   explicit MemIterator(base::WeakPtr<MemBackendImpl> backend)
269       : backend_(backend) {}
270 
OpenNextEntry(EntryResultCallback callback)271   EntryResult OpenNextEntry(EntryResultCallback callback) override {
272     if (!backend_)
273       return EntryResult::MakeError(net::ERR_FAILED);
274 
275     if (!backend_keys_) {
276       backend_keys_ = std::make_unique<Strings>(backend_->entries_.size());
277       for (const auto& iter : backend_->entries_)
278         backend_keys_->push_back(iter.first);
279       current_ = backend_keys_->begin();
280     } else {
281       current_++;
282     }
283 
284     while (true) {
285       if (current_ == backend_keys_->end()) {
286         backend_keys_.reset();
287         return EntryResult::MakeError(net::ERR_FAILED);
288       }
289 
290       const auto& entry_iter = backend_->entries_.find(*current_);
291       if (entry_iter == backend_->entries_.end()) {
292         // The key is no longer in the cache, move on to the next key.
293         current_++;
294         continue;
295       }
296 
297       entry_iter->second->Open();
298       return EntryResult::MakeOpened(entry_iter->second);
299     }
300   }
301 
302  private:
303   using Strings = std::vector<std::string>;
304 
305   base::WeakPtr<MemBackendImpl> backend_;
306   std::unique_ptr<Strings> backend_keys_;
307   Strings::iterator current_;
308 };
309 
CreateIterator()310 std::unique_ptr<Backend::Iterator> MemBackendImpl::CreateIterator() {
311   return std::make_unique<MemIterator>(weak_factory_.GetWeakPtr());
312 }
313 
OnExternalCacheHit(const std::string & key)314 void MemBackendImpl::OnExternalCacheHit(const std::string& key) {
315   auto it = entries_.find(key);
316   if (it != entries_.end())
317     it->second->UpdateStateOnUse(MemEntryImpl::ENTRY_WAS_NOT_MODIFIED);
318 }
319 
EvictIfNeeded()320 void MemBackendImpl::EvictIfNeeded() {
321   if (current_size_ <= max_size_)
322     return;
323   int target_size = std::max(0, max_size_ - kDefaultEvictionSize);
324   EvictTill(target_size);
325 }
326 
EvictTill(int target_size)327 void MemBackendImpl::EvictTill(int target_size) {
328   base::LinkNode<MemEntryImpl>* entry = lru_list_.head();
329   while (current_size_ > target_size && entry != lru_list_.end()) {
330     MemEntryImpl* to_doom = entry->value();
331     entry = NextSkippingChildren(lru_list_, entry);
332 
333     if (!to_doom->InUse())
334       to_doom->Doom();
335   }
336 }
337 
OnMemoryPressure(base::MemoryPressureListener::MemoryPressureLevel memory_pressure_level)338 void MemBackendImpl::OnMemoryPressure(
339     base::MemoryPressureListener::MemoryPressureLevel memory_pressure_level) {
340   switch (memory_pressure_level) {
341     case base::MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE:
342       break;
343     case base::MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE:
344       EvictTill(max_size_ / 2);
345       break;
346     case base::MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL:
347       EvictTill(max_size_ / 10);
348       break;
349   }
350 }
351 
352 }  // namespace disk_cache
353