• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright (c) 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "net/disk_cache/simple/simple_backend_impl.h"
6 
7 #include <algorithm>
8 #include <cstdlib>
9 #include <functional>
10 
11 #if defined(OS_POSIX)
12 #include <sys/resource.h>
13 #endif
14 
15 #include "base/bind.h"
16 #include "base/callback.h"
17 #include "base/files/file_util.h"
18 #include "base/location.h"
19 #include "base/metrics/field_trial.h"
20 #include "base/metrics/histogram.h"
21 #include "base/metrics/sparse_histogram.h"
22 #include "base/single_thread_task_runner.h"
23 #include "base/sys_info.h"
24 #include "base/task_runner_util.h"
25 #include "base/thread_task_runner_handle.h"
26 #include "base/threading/sequenced_worker_pool.h"
27 #include "base/time/time.h"
28 #include "net/base/net_errors.h"
29 #include "net/disk_cache/cache_util.h"
30 #include "net/disk_cache/simple/simple_entry_format.h"
31 #include "net/disk_cache/simple/simple_entry_impl.h"
32 #include "net/disk_cache/simple/simple_histogram_macros.h"
33 #include "net/disk_cache/simple/simple_index.h"
34 #include "net/disk_cache/simple/simple_index_file.h"
35 #include "net/disk_cache/simple/simple_synchronous_entry.h"
36 #include "net/disk_cache/simple/simple_util.h"
37 #include "net/disk_cache/simple/simple_version_upgrade.h"
38 
39 using base::Callback;
40 using base::Closure;
41 using base::FilePath;
42 using base::SequencedWorkerPool;
43 using base::Time;
44 using base::DirectoryExists;
45 using base::CreateDirectory;
46 
47 namespace disk_cache {
48 
49 namespace {
50 
51 // Maximum number of concurrent worker pool threads, which also is the limit
52 // on concurrent IO (as we use one thread per IO request).
53 const int kDefaultMaxWorkerThreads = 50;
54 
55 const char kThreadNamePrefix[] = "SimpleCache";
56 
57 // Maximum fraction of the cache that one entry can consume.
58 const int kMaxFileRatio = 8;
59 
60 // A global sequenced worker pool to use for launching all tasks.
61 SequencedWorkerPool* g_sequenced_worker_pool = NULL;
62 
MaybeCreateSequencedWorkerPool()63 void MaybeCreateSequencedWorkerPool() {
64   if (!g_sequenced_worker_pool) {
65     int max_worker_threads = kDefaultMaxWorkerThreads;
66 
67     const std::string thread_count_field_trial =
68         base::FieldTrialList::FindFullName("SimpleCacheMaxThreads");
69     if (!thread_count_field_trial.empty()) {
70       max_worker_threads =
71           std::max(1, std::atoi(thread_count_field_trial.c_str()));
72     }
73 
74     g_sequenced_worker_pool = new SequencedWorkerPool(max_worker_threads,
75                                                       kThreadNamePrefix);
76     g_sequenced_worker_pool->AddRef();  // Leak it.
77   }
78 }
79 
80 bool g_fd_limit_histogram_has_been_populated = false;
81 
MaybeHistogramFdLimit(net::CacheType cache_type)82 void MaybeHistogramFdLimit(net::CacheType cache_type) {
83   if (g_fd_limit_histogram_has_been_populated)
84     return;
85 
86   // Used in histograms; add new entries at end.
87   enum FdLimitStatus {
88     FD_LIMIT_STATUS_UNSUPPORTED = 0,
89     FD_LIMIT_STATUS_FAILED      = 1,
90     FD_LIMIT_STATUS_SUCCEEDED   = 2,
91     FD_LIMIT_STATUS_MAX         = 3
92   };
93   FdLimitStatus fd_limit_status = FD_LIMIT_STATUS_UNSUPPORTED;
94   int soft_fd_limit = 0;
95   int hard_fd_limit = 0;
96 
97 #if defined(OS_POSIX)
98   struct rlimit nofile;
99   if (!getrlimit(RLIMIT_NOFILE, &nofile)) {
100     soft_fd_limit = nofile.rlim_cur;
101     hard_fd_limit = nofile.rlim_max;
102     fd_limit_status = FD_LIMIT_STATUS_SUCCEEDED;
103   } else {
104     fd_limit_status = FD_LIMIT_STATUS_FAILED;
105   }
106 #endif
107 
108   SIMPLE_CACHE_UMA(ENUMERATION,
109                    "FileDescriptorLimitStatus", cache_type,
110                    fd_limit_status, FD_LIMIT_STATUS_MAX);
111   if (fd_limit_status == FD_LIMIT_STATUS_SUCCEEDED) {
112     SIMPLE_CACHE_UMA(SPARSE_SLOWLY,
113                      "FileDescriptorLimitSoft", cache_type, soft_fd_limit);
114     SIMPLE_CACHE_UMA(SPARSE_SLOWLY,
115                      "FileDescriptorLimitHard", cache_type, hard_fd_limit);
116   }
117 
118   g_fd_limit_histogram_has_been_populated = true;
119 }
120 
121 // Detects if the files in the cache directory match the current disk cache
122 // backend type and version. If the directory contains no cache, occupies it
123 // with the fresh structure.
FileStructureConsistent(const base::FilePath & path)124 bool FileStructureConsistent(const base::FilePath& path) {
125   if (!base::PathExists(path) && !base::CreateDirectory(path)) {
126     LOG(ERROR) << "Failed to create directory: " << path.LossyDisplayName();
127     return false;
128   }
129   return disk_cache::UpgradeSimpleCacheOnDisk(path);
130 }
131 
132 // A context used by a BarrierCompletionCallback to track state.
133 struct BarrierContext {
BarrierContextdisk_cache::__anon437a2cf40111::BarrierContext134   BarrierContext(int expected)
135       : expected(expected),
136         count(0),
137         had_error(false) {}
138 
139   const int expected;
140   int count;
141   bool had_error;
142 };
143 
BarrierCompletionCallbackImpl(BarrierContext * context,const net::CompletionCallback & final_callback,int result)144 void BarrierCompletionCallbackImpl(
145     BarrierContext* context,
146     const net::CompletionCallback& final_callback,
147     int result) {
148   DCHECK_GT(context->expected, context->count);
149   if (context->had_error)
150     return;
151   if (result != net::OK) {
152     context->had_error = true;
153     final_callback.Run(result);
154     return;
155   }
156   ++context->count;
157   if (context->count == context->expected)
158     final_callback.Run(net::OK);
159 }
160 
161 // A barrier completion callback is a net::CompletionCallback that waits for
162 // |count| successful results before invoking |final_callback|. In the case of
163 // an error, the first error is passed to |final_callback| and all others
164 // are ignored.
MakeBarrierCompletionCallback(int count,const net::CompletionCallback & final_callback)165 net::CompletionCallback MakeBarrierCompletionCallback(
166     int count,
167     const net::CompletionCallback& final_callback) {
168   BarrierContext* context = new BarrierContext(count);
169   return base::Bind(&BarrierCompletionCallbackImpl,
170                     base::Owned(context), final_callback);
171 }
172 
173 // A short bindable thunk that ensures a completion callback is always called
174 // after running an operation asynchronously.
RunOperationAndCallback(const Callback<int (const net::CompletionCallback &)> & operation,const net::CompletionCallback & operation_callback)175 void RunOperationAndCallback(
176     const Callback<int(const net::CompletionCallback&)>& operation,
177     const net::CompletionCallback& operation_callback) {
178   const int operation_result = operation.Run(operation_callback);
179   if (operation_result != net::ERR_IO_PENDING)
180     operation_callback.Run(operation_result);
181 }
182 
RecordIndexLoad(net::CacheType cache_type,base::TimeTicks constructed_since,int result)183 void RecordIndexLoad(net::CacheType cache_type,
184                      base::TimeTicks constructed_since,
185                      int result) {
186   const base::TimeDelta creation_to_index = base::TimeTicks::Now() -
187                                             constructed_since;
188   if (result == net::OK) {
189     SIMPLE_CACHE_UMA(TIMES, "CreationToIndex", cache_type, creation_to_index);
190   } else {
191     SIMPLE_CACHE_UMA(TIMES,
192                      "CreationToIndexFail", cache_type, creation_to_index);
193   }
194 }
195 
196 }  // namespace
197 
198 class SimpleBackendImpl::ActiveEntryProxy
199     : public SimpleEntryImpl::ActiveEntryProxy {
200  public:
~ActiveEntryProxy()201   virtual ~ActiveEntryProxy() {
202     if (backend_) {
203       DCHECK_EQ(1U, backend_->active_entries_.count(entry_hash_));
204       backend_->active_entries_.erase(entry_hash_);
205     }
206   }
207 
Create(int64 entry_hash,SimpleBackendImpl * backend)208   static scoped_ptr<SimpleEntryImpl::ActiveEntryProxy> Create(
209       int64 entry_hash,
210       SimpleBackendImpl* backend) {
211     scoped_ptr<SimpleEntryImpl::ActiveEntryProxy>
212         proxy(new ActiveEntryProxy(entry_hash, backend));
213     return proxy.Pass();
214   }
215 
216  private:
ActiveEntryProxy(uint64 entry_hash,SimpleBackendImpl * backend)217   ActiveEntryProxy(uint64 entry_hash,
218                    SimpleBackendImpl* backend)
219       : entry_hash_(entry_hash),
220         backend_(backend->AsWeakPtr()) {}
221 
222   uint64 entry_hash_;
223   base::WeakPtr<SimpleBackendImpl> backend_;
224 };
225 
SimpleBackendImpl(const FilePath & path,int max_bytes,net::CacheType cache_type,const scoped_refptr<base::SingleThreadTaskRunner> & cache_thread,net::NetLog * net_log)226 SimpleBackendImpl::SimpleBackendImpl(
227     const FilePath& path,
228     int max_bytes,
229     net::CacheType cache_type,
230     const scoped_refptr<base::SingleThreadTaskRunner>& cache_thread,
231     net::NetLog* net_log)
232     : path_(path),
233       cache_type_(cache_type),
234       cache_thread_(cache_thread),
235       orig_max_size_(max_bytes),
236       entry_operations_mode_(cache_type == net::DISK_CACHE ?
237                                  SimpleEntryImpl::OPTIMISTIC_OPERATIONS :
238                                  SimpleEntryImpl::NON_OPTIMISTIC_OPERATIONS),
239       net_log_(net_log) {
240   MaybeHistogramFdLimit(cache_type_);
241 }
242 
~SimpleBackendImpl()243 SimpleBackendImpl::~SimpleBackendImpl() {
244   index_->WriteToDisk();
245 }
246 
Init(const CompletionCallback & completion_callback)247 int SimpleBackendImpl::Init(const CompletionCallback& completion_callback) {
248   MaybeCreateSequencedWorkerPool();
249 
250   worker_pool_ = g_sequenced_worker_pool->GetTaskRunnerWithShutdownBehavior(
251       SequencedWorkerPool::CONTINUE_ON_SHUTDOWN);
252 
253   index_.reset(new SimpleIndex(
254       base::ThreadTaskRunnerHandle::Get(),
255       this,
256       cache_type_,
257       make_scoped_ptr(new SimpleIndexFile(
258           cache_thread_, worker_pool_.get(), cache_type_, path_))));
259   index_->ExecuteWhenReady(
260       base::Bind(&RecordIndexLoad, cache_type_, base::TimeTicks::Now()));
261 
262   PostTaskAndReplyWithResult(
263       cache_thread_.get(),
264       FROM_HERE,
265       base::Bind(
266           &SimpleBackendImpl::InitCacheStructureOnDisk, path_, orig_max_size_),
267       base::Bind(&SimpleBackendImpl::InitializeIndex,
268                  AsWeakPtr(),
269                  completion_callback));
270   return net::ERR_IO_PENDING;
271 }
272 
SetMaxSize(int max_bytes)273 bool SimpleBackendImpl::SetMaxSize(int max_bytes) {
274   orig_max_size_ = max_bytes;
275   return index_->SetMaxSize(max_bytes);
276 }
277 
GetMaxFileSize() const278 int SimpleBackendImpl::GetMaxFileSize() const {
279   return index_->max_size() / kMaxFileRatio;
280 }
281 
OnDoomStart(uint64 entry_hash)282 void SimpleBackendImpl::OnDoomStart(uint64 entry_hash) {
283   DCHECK_EQ(0u, entries_pending_doom_.count(entry_hash));
284   entries_pending_doom_.insert(
285       std::make_pair(entry_hash, std::vector<Closure>()));
286 }
287 
OnDoomComplete(uint64 entry_hash)288 void SimpleBackendImpl::OnDoomComplete(uint64 entry_hash) {
289   DCHECK_EQ(1u, entries_pending_doom_.count(entry_hash));
290   base::hash_map<uint64, std::vector<Closure> >::iterator it =
291       entries_pending_doom_.find(entry_hash);
292   std::vector<Closure> to_run_closures;
293   to_run_closures.swap(it->second);
294   entries_pending_doom_.erase(it);
295 
296   std::for_each(to_run_closures.begin(), to_run_closures.end(),
297                 std::mem_fun_ref(&Closure::Run));
298 }
299 
DoomEntries(std::vector<uint64> * entry_hashes,const net::CompletionCallback & callback)300 void SimpleBackendImpl::DoomEntries(std::vector<uint64>* entry_hashes,
301                                     const net::CompletionCallback& callback) {
302   scoped_ptr<std::vector<uint64> >
303       mass_doom_entry_hashes(new std::vector<uint64>());
304   mass_doom_entry_hashes->swap(*entry_hashes);
305 
306   std::vector<uint64> to_doom_individually_hashes;
307 
308   // For each of the entry hashes, there are two cases:
309   // 1. The entry is either open or pending doom, and so it should be doomed
310   //    individually to avoid flakes.
311   // 2. The entry is not in use at all, so we can call
312   //    SimpleSynchronousEntry::DoomEntrySet and delete the files en masse.
313   for (int i = mass_doom_entry_hashes->size() - 1; i >= 0; --i) {
314     const uint64 entry_hash = (*mass_doom_entry_hashes)[i];
315     DCHECK(active_entries_.count(entry_hash) == 0 ||
316            entries_pending_doom_.count(entry_hash) == 0);
317     if (!active_entries_.count(entry_hash) &&
318         !entries_pending_doom_.count(entry_hash)) {
319       continue;
320     }
321 
322     to_doom_individually_hashes.push_back(entry_hash);
323 
324     (*mass_doom_entry_hashes)[i] = mass_doom_entry_hashes->back();
325     mass_doom_entry_hashes->resize(mass_doom_entry_hashes->size() - 1);
326   }
327 
328   net::CompletionCallback barrier_callback =
329       MakeBarrierCompletionCallback(to_doom_individually_hashes.size() + 1,
330                                     callback);
331   for (std::vector<uint64>::const_iterator
332            it = to_doom_individually_hashes.begin(),
333            end = to_doom_individually_hashes.end(); it != end; ++it) {
334     const int doom_result = DoomEntryFromHash(*it, barrier_callback);
335     DCHECK_EQ(net::ERR_IO_PENDING, doom_result);
336     index_->Remove(*it);
337   }
338 
339   for (std::vector<uint64>::const_iterator it = mass_doom_entry_hashes->begin(),
340                                            end = mass_doom_entry_hashes->end();
341        it != end; ++it) {
342     index_->Remove(*it);
343     OnDoomStart(*it);
344   }
345 
346   // Taking this pointer here avoids undefined behaviour from calling
347   // base::Passed before mass_doom_entry_hashes.get().
348   std::vector<uint64>* mass_doom_entry_hashes_ptr =
349       mass_doom_entry_hashes.get();
350   PostTaskAndReplyWithResult(worker_pool_.get(),
351                              FROM_HERE,
352                              base::Bind(&SimpleSynchronousEntry::DoomEntrySet,
353                                         mass_doom_entry_hashes_ptr,
354                                         path_),
355                              base::Bind(&SimpleBackendImpl::DoomEntriesComplete,
356                                         AsWeakPtr(),
357                                         base::Passed(&mass_doom_entry_hashes),
358                                         barrier_callback));
359 }
360 
GetCacheType() const361 net::CacheType SimpleBackendImpl::GetCacheType() const {
362   return net::DISK_CACHE;
363 }
364 
GetEntryCount() const365 int32 SimpleBackendImpl::GetEntryCount() const {
366   // TODO(pasko): Use directory file count when index is not ready.
367   return index_->GetEntryCount();
368 }
369 
OpenEntry(const std::string & key,Entry ** entry,const CompletionCallback & callback)370 int SimpleBackendImpl::OpenEntry(const std::string& key,
371                                  Entry** entry,
372                                  const CompletionCallback& callback) {
373   const uint64 entry_hash = simple_util::GetEntryHashKey(key);
374 
375   // TODO(gavinp): Factor out this (not quite completely) repetitive code
376   // block from OpenEntry/CreateEntry/DoomEntry.
377   base::hash_map<uint64, std::vector<Closure> >::iterator it =
378       entries_pending_doom_.find(entry_hash);
379   if (it != entries_pending_doom_.end()) {
380     Callback<int(const net::CompletionCallback&)> operation =
381         base::Bind(&SimpleBackendImpl::OpenEntry,
382                    base::Unretained(this), key, entry);
383     it->second.push_back(base::Bind(&RunOperationAndCallback,
384                                     operation, callback));
385     return net::ERR_IO_PENDING;
386   }
387   scoped_refptr<SimpleEntryImpl> simple_entry =
388       CreateOrFindActiveEntry(entry_hash, key);
389   CompletionCallback backend_callback =
390       base::Bind(&SimpleBackendImpl::OnEntryOpenedFromKey,
391                  AsWeakPtr(),
392                  key,
393                  entry,
394                  simple_entry,
395                  callback);
396   return simple_entry->OpenEntry(entry, backend_callback);
397 }
398 
CreateEntry(const std::string & key,Entry ** entry,const CompletionCallback & callback)399 int SimpleBackendImpl::CreateEntry(const std::string& key,
400                                    Entry** entry,
401                                    const CompletionCallback& callback) {
402   DCHECK_LT(0u, key.size());
403   const uint64 entry_hash = simple_util::GetEntryHashKey(key);
404 
405   base::hash_map<uint64, std::vector<Closure> >::iterator it =
406       entries_pending_doom_.find(entry_hash);
407   if (it != entries_pending_doom_.end()) {
408     Callback<int(const net::CompletionCallback&)> operation =
409         base::Bind(&SimpleBackendImpl::CreateEntry,
410                    base::Unretained(this), key, entry);
411     it->second.push_back(base::Bind(&RunOperationAndCallback,
412                                     operation, callback));
413     return net::ERR_IO_PENDING;
414   }
415   scoped_refptr<SimpleEntryImpl> simple_entry =
416       CreateOrFindActiveEntry(entry_hash, key);
417   return simple_entry->CreateEntry(entry, callback);
418 }
419 
DoomEntry(const std::string & key,const net::CompletionCallback & callback)420 int SimpleBackendImpl::DoomEntry(const std::string& key,
421                                  const net::CompletionCallback& callback) {
422   const uint64 entry_hash = simple_util::GetEntryHashKey(key);
423 
424   base::hash_map<uint64, std::vector<Closure> >::iterator it =
425       entries_pending_doom_.find(entry_hash);
426   if (it != entries_pending_doom_.end()) {
427     Callback<int(const net::CompletionCallback&)> operation =
428         base::Bind(&SimpleBackendImpl::DoomEntry, base::Unretained(this), key);
429     it->second.push_back(base::Bind(&RunOperationAndCallback,
430                                     operation, callback));
431     return net::ERR_IO_PENDING;
432   }
433   scoped_refptr<SimpleEntryImpl> simple_entry =
434       CreateOrFindActiveEntry(entry_hash, key);
435   return simple_entry->DoomEntry(callback);
436 }
437 
DoomAllEntries(const CompletionCallback & callback)438 int SimpleBackendImpl::DoomAllEntries(const CompletionCallback& callback) {
439   return DoomEntriesBetween(Time(), Time(), callback);
440 }
441 
IndexReadyForDoom(Time initial_time,Time end_time,const CompletionCallback & callback,int result)442 void SimpleBackendImpl::IndexReadyForDoom(Time initial_time,
443                                           Time end_time,
444                                           const CompletionCallback& callback,
445                                           int result) {
446   if (result != net::OK) {
447     callback.Run(result);
448     return;
449   }
450   scoped_ptr<std::vector<uint64> > removed_key_hashes(
451       index_->GetEntriesBetween(initial_time, end_time).release());
452   DoomEntries(removed_key_hashes.get(), callback);
453 }
454 
DoomEntriesBetween(const Time initial_time,const Time end_time,const CompletionCallback & callback)455 int SimpleBackendImpl::DoomEntriesBetween(
456     const Time initial_time,
457     const Time end_time,
458     const CompletionCallback& callback) {
459   return index_->ExecuteWhenReady(
460       base::Bind(&SimpleBackendImpl::IndexReadyForDoom, AsWeakPtr(),
461                  initial_time, end_time, callback));
462 }
463 
DoomEntriesSince(const Time initial_time,const CompletionCallback & callback)464 int SimpleBackendImpl::DoomEntriesSince(
465     const Time initial_time,
466     const CompletionCallback& callback) {
467   return DoomEntriesBetween(initial_time, Time(), callback);
468 }
469 
470 class SimpleBackendImpl::SimpleIterator FINAL : public Iterator {
471  public:
SimpleIterator(base::WeakPtr<SimpleBackendImpl> backend)472   explicit SimpleIterator(base::WeakPtr<SimpleBackendImpl> backend)
473       : backend_(backend),
474         weak_factory_(this) {
475   }
476 
477   // From Backend::Iterator:
OpenNextEntry(Entry ** next_entry,const CompletionCallback & callback)478   virtual int OpenNextEntry(Entry** next_entry,
479                             const CompletionCallback& callback) OVERRIDE {
480     CompletionCallback open_next_entry_impl =
481         base::Bind(&SimpleIterator::OpenNextEntryImpl,
482                    weak_factory_.GetWeakPtr(), next_entry, callback);
483     return backend_->index_->ExecuteWhenReady(open_next_entry_impl);
484   }
485 
OpenNextEntryImpl(Entry ** next_entry,const CompletionCallback & callback,int index_initialization_error_code)486   void OpenNextEntryImpl(Entry** next_entry,
487                          const CompletionCallback& callback,
488                          int index_initialization_error_code) {
489     if (!backend_) {
490       callback.Run(net::ERR_FAILED);
491       return;
492     }
493     if (index_initialization_error_code != net::OK) {
494       callback.Run(index_initialization_error_code);
495       return;
496     }
497     if (!hashes_to_enumerate_)
498       hashes_to_enumerate_ = backend_->index()->GetAllHashes().Pass();
499 
500     while (!hashes_to_enumerate_->empty()) {
501       uint64 entry_hash = hashes_to_enumerate_->back();
502       hashes_to_enumerate_->pop_back();
503       if (backend_->index()->Has(entry_hash)) {
504         *next_entry = NULL;
505         CompletionCallback continue_iteration = base::Bind(
506             &SimpleIterator::CheckIterationReturnValue,
507             weak_factory_.GetWeakPtr(),
508             next_entry,
509             callback);
510         int error_code_open = backend_->OpenEntryFromHash(entry_hash,
511                                                           next_entry,
512                                                           continue_iteration);
513         if (error_code_open == net::ERR_IO_PENDING)
514           return;
515         if (error_code_open != net::ERR_FAILED) {
516           callback.Run(error_code_open);
517           return;
518         }
519       }
520     }
521     callback.Run(net::ERR_FAILED);
522   }
523 
CheckIterationReturnValue(Entry ** entry,const CompletionCallback & callback,int error_code)524   void CheckIterationReturnValue(Entry** entry,
525                                  const CompletionCallback& callback,
526                                  int error_code) {
527     if (error_code == net::ERR_FAILED) {
528       OpenNextEntry(entry, callback);
529       return;
530     }
531     callback.Run(error_code);
532   }
533 
534  private:
535   base::WeakPtr<SimpleBackendImpl> backend_;
536   scoped_ptr<std::vector<uint64> > hashes_to_enumerate_;
537   base::WeakPtrFactory<SimpleIterator> weak_factory_;
538 };
539 
CreateIterator()540 scoped_ptr<Backend::Iterator> SimpleBackendImpl::CreateIterator() {
541   return scoped_ptr<Iterator>(new SimpleIterator(AsWeakPtr()));
542 }
543 
GetStats(std::vector<std::pair<std::string,std::string>> * stats)544 void SimpleBackendImpl::GetStats(
545     std::vector<std::pair<std::string, std::string> >* stats) {
546   std::pair<std::string, std::string> item;
547   item.first = "Cache type";
548   item.second = "Simple Cache";
549   stats->push_back(item);
550 }
551 
OnExternalCacheHit(const std::string & key)552 void SimpleBackendImpl::OnExternalCacheHit(const std::string& key) {
553   index_->UseIfExists(simple_util::GetEntryHashKey(key));
554 }
555 
InitializeIndex(const CompletionCallback & callback,const DiskStatResult & result)556 void SimpleBackendImpl::InitializeIndex(const CompletionCallback& callback,
557                                         const DiskStatResult& result) {
558   if (result.net_error == net::OK) {
559     index_->SetMaxSize(result.max_size);
560     index_->Initialize(result.cache_dir_mtime);
561   }
562   callback.Run(result.net_error);
563 }
564 
InitCacheStructureOnDisk(const base::FilePath & path,uint64 suggested_max_size)565 SimpleBackendImpl::DiskStatResult SimpleBackendImpl::InitCacheStructureOnDisk(
566     const base::FilePath& path,
567     uint64 suggested_max_size) {
568   DiskStatResult result;
569   result.max_size = suggested_max_size;
570   result.net_error = net::OK;
571   if (!FileStructureConsistent(path)) {
572     LOG(ERROR) << "Simple Cache Backend: wrong file structure on disk: "
573                << path.LossyDisplayName();
574     result.net_error = net::ERR_FAILED;
575   } else {
576     bool mtime_result =
577         disk_cache::simple_util::GetMTime(path, &result.cache_dir_mtime);
578     DCHECK(mtime_result);
579     if (!result.max_size) {
580       int64 available = base::SysInfo::AmountOfFreeDiskSpace(path);
581       result.max_size = disk_cache::PreferredCacheSize(available);
582     }
583     DCHECK(result.max_size);
584   }
585   return result;
586 }
587 
CreateOrFindActiveEntry(const uint64 entry_hash,const std::string & key)588 scoped_refptr<SimpleEntryImpl> SimpleBackendImpl::CreateOrFindActiveEntry(
589     const uint64 entry_hash,
590     const std::string& key) {
591   DCHECK_EQ(entry_hash, simple_util::GetEntryHashKey(key));
592   std::pair<EntryMap::iterator, bool> insert_result =
593       active_entries_.insert(EntryMap::value_type(entry_hash, NULL));
594   EntryMap::iterator& it = insert_result.first;
595   const bool did_insert = insert_result.second;
596   if (did_insert) {
597     SimpleEntryImpl* entry = it->second =
598         new SimpleEntryImpl(cache_type_, path_, entry_hash,
599                             entry_operations_mode_,this, net_log_);
600     entry->SetKey(key);
601     entry->SetActiveEntryProxy(ActiveEntryProxy::Create(entry_hash, this));
602   }
603   DCHECK(it->second);
604   // It's possible, but unlikely, that we have an entry hash collision with a
605   // currently active entry.
606   if (key != it->second->key()) {
607     it->second->Doom();
608     DCHECK_EQ(0U, active_entries_.count(entry_hash));
609     return CreateOrFindActiveEntry(entry_hash, key);
610   }
611   return make_scoped_refptr(it->second);
612 }
613 
OpenEntryFromHash(uint64 entry_hash,Entry ** entry,const CompletionCallback & callback)614 int SimpleBackendImpl::OpenEntryFromHash(uint64 entry_hash,
615                                          Entry** entry,
616                                          const CompletionCallback& callback) {
617   base::hash_map<uint64, std::vector<Closure> >::iterator it =
618       entries_pending_doom_.find(entry_hash);
619   if (it != entries_pending_doom_.end()) {
620     Callback<int(const net::CompletionCallback&)> operation =
621         base::Bind(&SimpleBackendImpl::OpenEntryFromHash,
622                    base::Unretained(this), entry_hash, entry);
623     it->second.push_back(base::Bind(&RunOperationAndCallback,
624                                     operation, callback));
625     return net::ERR_IO_PENDING;
626   }
627 
628   EntryMap::iterator has_active = active_entries_.find(entry_hash);
629   if (has_active != active_entries_.end()) {
630     return OpenEntry(has_active->second->key(), entry, callback);
631   }
632 
633   scoped_refptr<SimpleEntryImpl> simple_entry = new SimpleEntryImpl(
634       cache_type_, path_, entry_hash, entry_operations_mode_, this, net_log_);
635   CompletionCallback backend_callback =
636       base::Bind(&SimpleBackendImpl::OnEntryOpenedFromHash,
637                  AsWeakPtr(), entry_hash, entry, simple_entry, callback);
638   return simple_entry->OpenEntry(entry, backend_callback);
639 }
640 
DoomEntryFromHash(uint64 entry_hash,const CompletionCallback & callback)641 int SimpleBackendImpl::DoomEntryFromHash(uint64 entry_hash,
642                                          const CompletionCallback& callback) {
643   Entry** entry = new Entry*();
644   scoped_ptr<Entry*> scoped_entry(entry);
645 
646   base::hash_map<uint64, std::vector<Closure> >::iterator pending_it =
647       entries_pending_doom_.find(entry_hash);
648   if (pending_it != entries_pending_doom_.end()) {
649     Callback<int(const net::CompletionCallback&)> operation =
650         base::Bind(&SimpleBackendImpl::DoomEntryFromHash,
651                    base::Unretained(this), entry_hash);
652     pending_it->second.push_back(base::Bind(&RunOperationAndCallback,
653                                     operation, callback));
654     return net::ERR_IO_PENDING;
655   }
656 
657   EntryMap::iterator active_it = active_entries_.find(entry_hash);
658   if (active_it != active_entries_.end())
659     return active_it->second->DoomEntry(callback);
660 
661   // There's no pending dooms, nor any open entry. We can make a trivial
662   // call to DoomEntries() to delete this entry.
663   std::vector<uint64> entry_hash_vector;
664   entry_hash_vector.push_back(entry_hash);
665   DoomEntries(&entry_hash_vector, callback);
666   return net::ERR_IO_PENDING;
667 }
668 
OnEntryOpenedFromHash(uint64 hash,Entry ** entry,const scoped_refptr<SimpleEntryImpl> & simple_entry,const CompletionCallback & callback,int error_code)669 void SimpleBackendImpl::OnEntryOpenedFromHash(
670     uint64 hash,
671     Entry** entry,
672     const scoped_refptr<SimpleEntryImpl>& simple_entry,
673     const CompletionCallback& callback,
674     int error_code) {
675   if (error_code != net::OK) {
676     callback.Run(error_code);
677     return;
678   }
679   DCHECK(*entry);
680   std::pair<EntryMap::iterator, bool> insert_result =
681       active_entries_.insert(EntryMap::value_type(hash, simple_entry.get()));
682   EntryMap::iterator& it = insert_result.first;
683   const bool did_insert = insert_result.second;
684   if (did_insert) {
685     // There was no active entry corresponding to this hash. We've already put
686     // the entry opened from hash in the |active_entries_|. We now provide the
687     // proxy object to the entry.
688     it->second->SetActiveEntryProxy(ActiveEntryProxy::Create(hash, this));
689     callback.Run(net::OK);
690   } else {
691     // The entry was made active while we waiting for the open from hash to
692     // finish. The entry created from hash needs to be closed, and the one
693     // in |active_entries_| can be returned to the caller.
694     simple_entry->Close();
695     it->second->OpenEntry(entry, callback);
696   }
697 }
698 
OnEntryOpenedFromKey(const std::string key,Entry ** entry,const scoped_refptr<SimpleEntryImpl> & simple_entry,const CompletionCallback & callback,int error_code)699 void SimpleBackendImpl::OnEntryOpenedFromKey(
700     const std::string key,
701     Entry** entry,
702     const scoped_refptr<SimpleEntryImpl>& simple_entry,
703     const CompletionCallback& callback,
704     int error_code) {
705   int final_code = error_code;
706   if (final_code == net::OK) {
707     bool key_matches = key.compare(simple_entry->key()) == 0;
708     if (!key_matches) {
709       // TODO(clamy): Add a unit test to check this code path.
710       DLOG(WARNING) << "Key mismatch on open.";
711       simple_entry->Doom();
712       simple_entry->Close();
713       final_code = net::ERR_FAILED;
714     } else {
715       DCHECK_EQ(simple_entry->entry_hash(), simple_util::GetEntryHashKey(key));
716     }
717     SIMPLE_CACHE_UMA(BOOLEAN, "KeyMatchedOnOpen", cache_type_, key_matches);
718   }
719   callback.Run(final_code);
720 }
721 
DoomEntriesComplete(scoped_ptr<std::vector<uint64>> entry_hashes,const net::CompletionCallback & callback,int result)722 void SimpleBackendImpl::DoomEntriesComplete(
723     scoped_ptr<std::vector<uint64> > entry_hashes,
724     const net::CompletionCallback& callback,
725     int result) {
726   std::for_each(
727       entry_hashes->begin(), entry_hashes->end(),
728       std::bind1st(std::mem_fun(&SimpleBackendImpl::OnDoomComplete),
729                    this));
730   callback.Run(result);
731 }
732 
FlushWorkerPoolForTesting()733 void SimpleBackendImpl::FlushWorkerPoolForTesting() {
734   if (g_sequenced_worker_pool)
735     g_sequenced_worker_pool->FlushForTesting();
736 }
737 
738 }  // namespace disk_cache
739