• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright (c) 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "net/disk_cache/simple/simple_entry_impl.h"
6 
7 #include <algorithm>
8 #include <cstring>
9 #include <vector>
10 
11 #include "base/bind.h"
12 #include "base/bind_helpers.h"
13 #include "base/callback.h"
14 #include "base/location.h"
15 #include "base/logging.h"
16 #include "base/message_loop/message_loop_proxy.h"
17 #include "base/task_runner.h"
18 #include "base/task_runner_util.h"
19 #include "base/time/time.h"
20 #include "net/base/io_buffer.h"
21 #include "net/base/net_errors.h"
22 #include "net/disk_cache/net_log_parameters.h"
23 #include "net/disk_cache/simple/simple_backend_impl.h"
24 #include "net/disk_cache/simple/simple_histogram_macros.h"
25 #include "net/disk_cache/simple/simple_index.h"
26 #include "net/disk_cache/simple/simple_net_log_parameters.h"
27 #include "net/disk_cache/simple/simple_synchronous_entry.h"
28 #include "net/disk_cache/simple/simple_util.h"
29 #include "third_party/zlib/zlib.h"
30 
31 namespace disk_cache {
32 namespace {
33 
34 // An entry can store sparse data taking up to 1 / kMaxSparseDataSizeDivisor of
35 // the cache.
36 const int64 kMaxSparseDataSizeDivisor = 10;
37 
38 // Used in histograms, please only add entries at the end.
39 enum ReadResult {
40   READ_RESULT_SUCCESS = 0,
41   READ_RESULT_INVALID_ARGUMENT = 1,
42   READ_RESULT_NONBLOCK_EMPTY_RETURN = 2,
43   READ_RESULT_BAD_STATE = 3,
44   READ_RESULT_FAST_EMPTY_RETURN = 4,
45   READ_RESULT_SYNC_READ_FAILURE = 5,
46   READ_RESULT_SYNC_CHECKSUM_FAILURE = 6,
47   READ_RESULT_MAX = 7,
48 };
49 
50 // Used in histograms, please only add entries at the end.
51 enum WriteResult {
52   WRITE_RESULT_SUCCESS = 0,
53   WRITE_RESULT_INVALID_ARGUMENT = 1,
54   WRITE_RESULT_OVER_MAX_SIZE = 2,
55   WRITE_RESULT_BAD_STATE = 3,
56   WRITE_RESULT_SYNC_WRITE_FAILURE = 4,
57   WRITE_RESULT_FAST_EMPTY_RETURN = 5,
58   WRITE_RESULT_MAX = 6,
59 };
60 
61 // Used in histograms, please only add entries at the end.
62 enum HeaderSizeChange {
63   HEADER_SIZE_CHANGE_INITIAL,
64   HEADER_SIZE_CHANGE_SAME,
65   HEADER_SIZE_CHANGE_INCREASE,
66   HEADER_SIZE_CHANGE_DECREASE,
67   HEADER_SIZE_CHANGE_UNEXPECTED_WRITE,
68   HEADER_SIZE_CHANGE_MAX
69 };
70 
RecordReadResult(net::CacheType cache_type,ReadResult result)71 void RecordReadResult(net::CacheType cache_type, ReadResult result) {
72   SIMPLE_CACHE_UMA(ENUMERATION,
73                    "ReadResult", cache_type, result, READ_RESULT_MAX);
74 }
75 
RecordWriteResult(net::CacheType cache_type,WriteResult result)76 void RecordWriteResult(net::CacheType cache_type, WriteResult result) {
77   SIMPLE_CACHE_UMA(ENUMERATION,
78                    "WriteResult2", cache_type, result, WRITE_RESULT_MAX);
79 }
80 
81 // TODO(ttuttle): Consider removing this once we have a good handle on header
82 // size changes.
RecordHeaderSizeChange(net::CacheType cache_type,int old_size,int new_size)83 void RecordHeaderSizeChange(net::CacheType cache_type,
84                             int old_size, int new_size) {
85   HeaderSizeChange size_change;
86 
87   SIMPLE_CACHE_UMA(COUNTS_10000, "HeaderSize", cache_type, new_size);
88 
89   if (old_size == 0) {
90     size_change = HEADER_SIZE_CHANGE_INITIAL;
91   } else if (new_size == old_size) {
92     size_change = HEADER_SIZE_CHANGE_SAME;
93   } else if (new_size > old_size) {
94     int delta = new_size - old_size;
95     SIMPLE_CACHE_UMA(COUNTS_10000,
96                      "HeaderSizeIncreaseAbsolute", cache_type, delta);
97     SIMPLE_CACHE_UMA(PERCENTAGE,
98                      "HeaderSizeIncreasePercentage", cache_type,
99                      delta * 100 / old_size);
100     size_change = HEADER_SIZE_CHANGE_INCREASE;
101   } else {  // new_size < old_size
102     int delta = old_size - new_size;
103     SIMPLE_CACHE_UMA(COUNTS_10000,
104                      "HeaderSizeDecreaseAbsolute", cache_type, delta);
105     SIMPLE_CACHE_UMA(PERCENTAGE,
106                      "HeaderSizeDecreasePercentage", cache_type,
107                      delta * 100 / old_size);
108     size_change = HEADER_SIZE_CHANGE_DECREASE;
109   }
110 
111   SIMPLE_CACHE_UMA(ENUMERATION,
112                    "HeaderSizeChange", cache_type,
113                    size_change, HEADER_SIZE_CHANGE_MAX);
114 }
115 
RecordUnexpectedStream0Write(net::CacheType cache_type)116 void RecordUnexpectedStream0Write(net::CacheType cache_type) {
117   SIMPLE_CACHE_UMA(ENUMERATION,
118                    "HeaderSizeChange", cache_type,
119                    HEADER_SIZE_CHANGE_UNEXPECTED_WRITE, HEADER_SIZE_CHANGE_MAX);
120 }
121 
122 int g_open_entry_count = 0;
123 
AdjustOpenEntryCountBy(net::CacheType cache_type,int offset)124 void AdjustOpenEntryCountBy(net::CacheType cache_type, int offset) {
125   g_open_entry_count += offset;
126   SIMPLE_CACHE_UMA(COUNTS_10000,
127                    "GlobalOpenEntryCount", cache_type, g_open_entry_count);
128 }
129 
InvokeCallbackIfBackendIsAlive(const base::WeakPtr<SimpleBackendImpl> & backend,const net::CompletionCallback & completion_callback,int result)130 void InvokeCallbackIfBackendIsAlive(
131     const base::WeakPtr<SimpleBackendImpl>& backend,
132     const net::CompletionCallback& completion_callback,
133     int result) {
134   DCHECK(!completion_callback.is_null());
135   if (!backend.get())
136     return;
137   completion_callback.Run(result);
138 }
139 
140 }  // namespace
141 
142 using base::Closure;
143 using base::FilePath;
144 using base::MessageLoopProxy;
145 using base::Time;
146 using base::TaskRunner;
147 
148 // A helper class to insure that RunNextOperationIfNeeded() is called when
149 // exiting the current stack frame.
150 class SimpleEntryImpl::ScopedOperationRunner {
151  public:
ScopedOperationRunner(SimpleEntryImpl * entry)152   explicit ScopedOperationRunner(SimpleEntryImpl* entry) : entry_(entry) {
153   }
154 
~ScopedOperationRunner()155   ~ScopedOperationRunner() {
156     entry_->RunNextOperationIfNeeded();
157   }
158 
159  private:
160   SimpleEntryImpl* const entry_;
161 };
162 
SimpleEntryImpl(net::CacheType cache_type,const FilePath & path,const uint64 entry_hash,OperationsMode operations_mode,SimpleBackendImpl * backend,net::NetLog * net_log)163 SimpleEntryImpl::SimpleEntryImpl(net::CacheType cache_type,
164                                  const FilePath& path,
165                                  const uint64 entry_hash,
166                                  OperationsMode operations_mode,
167                                  SimpleBackendImpl* backend,
168                                  net::NetLog* net_log)
169     : backend_(backend->AsWeakPtr()),
170       cache_type_(cache_type),
171       worker_pool_(backend->worker_pool()),
172       path_(path),
173       entry_hash_(entry_hash),
174       use_optimistic_operations_(operations_mode == OPTIMISTIC_OPERATIONS),
175       last_used_(Time::Now()),
176       last_modified_(last_used_),
177       sparse_data_size_(0),
178       open_count_(0),
179       doomed_(false),
180       state_(STATE_UNINITIALIZED),
181       synchronous_entry_(NULL),
182       net_log_(net::BoundNetLog::Make(
183           net_log, net::NetLog::SOURCE_DISK_CACHE_ENTRY)),
184       stream_0_data_(new net::GrowableIOBuffer()) {
185   COMPILE_ASSERT(arraysize(data_size_) == arraysize(crc32s_end_offset_),
186                  arrays_should_be_same_size);
187   COMPILE_ASSERT(arraysize(data_size_) == arraysize(crc32s_),
188                  arrays_should_be_same_size);
189   COMPILE_ASSERT(arraysize(data_size_) == arraysize(have_written_),
190                  arrays_should_be_same_size);
191   COMPILE_ASSERT(arraysize(data_size_) == arraysize(crc_check_state_),
192                  arrays_should_be_same_size);
193   MakeUninitialized();
194   net_log_.BeginEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY,
195       CreateNetLogSimpleEntryConstructionCallback(this));
196 }
197 
OpenEntry(Entry ** out_entry,const CompletionCallback & callback)198 int SimpleEntryImpl::OpenEntry(Entry** out_entry,
199                                const CompletionCallback& callback) {
200   DCHECK(backend_.get());
201 
202   net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_OPEN_CALL);
203 
204   bool have_index = backend_->index()->initialized();
205   // This enumeration is used in histograms, add entries only at end.
206   enum OpenEntryIndexEnum {
207     INDEX_NOEXIST = 0,
208     INDEX_MISS = 1,
209     INDEX_HIT = 2,
210     INDEX_MAX = 3,
211   };
212   OpenEntryIndexEnum open_entry_index_enum = INDEX_NOEXIST;
213   if (have_index) {
214     if (backend_->index()->Has(entry_hash_))
215       open_entry_index_enum = INDEX_HIT;
216     else
217       open_entry_index_enum = INDEX_MISS;
218   }
219   SIMPLE_CACHE_UMA(ENUMERATION,
220                    "OpenEntryIndexState", cache_type_,
221                    open_entry_index_enum, INDEX_MAX);
222 
223   // If entry is not known to the index, initiate fast failover to the network.
224   if (open_entry_index_enum == INDEX_MISS) {
225     net_log_.AddEventWithNetErrorCode(
226         net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_OPEN_END,
227         net::ERR_FAILED);
228     return net::ERR_FAILED;
229   }
230 
231   pending_operations_.push(SimpleEntryOperation::OpenOperation(
232       this, have_index, callback, out_entry));
233   RunNextOperationIfNeeded();
234   return net::ERR_IO_PENDING;
235 }
236 
CreateEntry(Entry ** out_entry,const CompletionCallback & callback)237 int SimpleEntryImpl::CreateEntry(Entry** out_entry,
238                                  const CompletionCallback& callback) {
239   DCHECK(backend_.get());
240   DCHECK_EQ(entry_hash_, simple_util::GetEntryHashKey(key_));
241 
242   net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CREATE_CALL);
243 
244   bool have_index = backend_->index()->initialized();
245   int ret_value = net::ERR_FAILED;
246   if (use_optimistic_operations_ &&
247       state_ == STATE_UNINITIALIZED && pending_operations_.size() == 0) {
248     net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CREATE_OPTIMISTIC);
249 
250     ReturnEntryToCaller(out_entry);
251     pending_operations_.push(SimpleEntryOperation::CreateOperation(
252         this, have_index, CompletionCallback(), static_cast<Entry**>(NULL)));
253     ret_value = net::OK;
254   } else {
255     pending_operations_.push(SimpleEntryOperation::CreateOperation(
256         this, have_index, callback, out_entry));
257     ret_value = net::ERR_IO_PENDING;
258   }
259 
260   // We insert the entry in the index before creating the entry files in the
261   // SimpleSynchronousEntry, because this way the worst scenario is when we
262   // have the entry in the index but we don't have the created files yet, this
263   // way we never leak files. CreationOperationComplete will remove the entry
264   // from the index if the creation fails.
265   backend_->index()->Insert(entry_hash_);
266 
267   RunNextOperationIfNeeded();
268   return ret_value;
269 }
270 
DoomEntry(const CompletionCallback & callback)271 int SimpleEntryImpl::DoomEntry(const CompletionCallback& callback) {
272   if (doomed_)
273     return net::OK;
274   net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_DOOM_CALL);
275   net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_DOOM_BEGIN);
276 
277   MarkAsDoomed();
278   if (backend_.get())
279     backend_->OnDoomStart(entry_hash_);
280   pending_operations_.push(SimpleEntryOperation::DoomOperation(this, callback));
281   RunNextOperationIfNeeded();
282   return net::ERR_IO_PENDING;
283 }
284 
SetKey(const std::string & key)285 void SimpleEntryImpl::SetKey(const std::string& key) {
286   key_ = key;
287   net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_SET_KEY,
288       net::NetLog::StringCallback("key", &key));
289 }
290 
Doom()291 void SimpleEntryImpl::Doom() {
292   DoomEntry(CompletionCallback());
293 }
294 
Close()295 void SimpleEntryImpl::Close() {
296   DCHECK(io_thread_checker_.CalledOnValidThread());
297   DCHECK_LT(0, open_count_);
298 
299   net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CLOSE_CALL);
300 
301   if (--open_count_ > 0) {
302     DCHECK(!HasOneRef());
303     Release();  // Balanced in ReturnEntryToCaller().
304     return;
305   }
306 
307   pending_operations_.push(SimpleEntryOperation::CloseOperation(this));
308   DCHECK(!HasOneRef());
309   Release();  // Balanced in ReturnEntryToCaller().
310   RunNextOperationIfNeeded();
311 }
312 
GetKey() const313 std::string SimpleEntryImpl::GetKey() const {
314   DCHECK(io_thread_checker_.CalledOnValidThread());
315   return key_;
316 }
317 
GetLastUsed() const318 Time SimpleEntryImpl::GetLastUsed() const {
319   DCHECK(io_thread_checker_.CalledOnValidThread());
320   return last_used_;
321 }
322 
GetLastModified() const323 Time SimpleEntryImpl::GetLastModified() const {
324   DCHECK(io_thread_checker_.CalledOnValidThread());
325   return last_modified_;
326 }
327 
GetDataSize(int stream_index) const328 int32 SimpleEntryImpl::GetDataSize(int stream_index) const {
329   DCHECK(io_thread_checker_.CalledOnValidThread());
330   DCHECK_LE(0, data_size_[stream_index]);
331   return data_size_[stream_index];
332 }
333 
ReadData(int stream_index,int offset,net::IOBuffer * buf,int buf_len,const CompletionCallback & callback)334 int SimpleEntryImpl::ReadData(int stream_index,
335                               int offset,
336                               net::IOBuffer* buf,
337                               int buf_len,
338                               const CompletionCallback& callback) {
339   DCHECK(io_thread_checker_.CalledOnValidThread());
340 
341   if (net_log_.IsLogging()) {
342     net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_CALL,
343         CreateNetLogReadWriteDataCallback(stream_index, offset, buf_len,
344                                           false));
345   }
346 
347   if (stream_index < 0 || stream_index >= kSimpleEntryStreamCount ||
348       buf_len < 0) {
349     if (net_log_.IsLogging()) {
350       net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END,
351           CreateNetLogReadWriteCompleteCallback(net::ERR_INVALID_ARGUMENT));
352     }
353 
354     RecordReadResult(cache_type_, READ_RESULT_INVALID_ARGUMENT);
355     return net::ERR_INVALID_ARGUMENT;
356   }
357   if (pending_operations_.empty() && (offset >= GetDataSize(stream_index) ||
358                                       offset < 0 || !buf_len)) {
359     if (net_log_.IsLogging()) {
360       net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END,
361           CreateNetLogReadWriteCompleteCallback(0));
362     }
363 
364     RecordReadResult(cache_type_, READ_RESULT_NONBLOCK_EMPTY_RETURN);
365     return 0;
366   }
367 
368   // TODO(clamy): return immediatly when reading from stream 0.
369 
370   // TODO(felipeg): Optimization: Add support for truly parallel read
371   // operations.
372   bool alone_in_queue =
373       pending_operations_.size() == 0 && state_ == STATE_READY;
374   pending_operations_.push(SimpleEntryOperation::ReadOperation(
375       this, stream_index, offset, buf_len, buf, callback, alone_in_queue));
376   RunNextOperationIfNeeded();
377   return net::ERR_IO_PENDING;
378 }
379 
WriteData(int stream_index,int offset,net::IOBuffer * buf,int buf_len,const CompletionCallback & callback,bool truncate)380 int SimpleEntryImpl::WriteData(int stream_index,
381                                int offset,
382                                net::IOBuffer* buf,
383                                int buf_len,
384                                const CompletionCallback& callback,
385                                bool truncate) {
386   DCHECK(io_thread_checker_.CalledOnValidThread());
387 
388   if (net_log_.IsLogging()) {
389     net_log_.AddEvent(
390         net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_CALL,
391         CreateNetLogReadWriteDataCallback(stream_index, offset, buf_len,
392                                           truncate));
393   }
394 
395   if (stream_index < 0 || stream_index >= kSimpleEntryStreamCount ||
396       offset < 0 || buf_len < 0) {
397     if (net_log_.IsLogging()) {
398       net_log_.AddEvent(
399           net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_END,
400           CreateNetLogReadWriteCompleteCallback(net::ERR_INVALID_ARGUMENT));
401     }
402     RecordWriteResult(cache_type_, WRITE_RESULT_INVALID_ARGUMENT);
403     return net::ERR_INVALID_ARGUMENT;
404   }
405   if (backend_.get() && offset + buf_len > backend_->GetMaxFileSize()) {
406     if (net_log_.IsLogging()) {
407       net_log_.AddEvent(
408           net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_END,
409           CreateNetLogReadWriteCompleteCallback(net::ERR_FAILED));
410     }
411     RecordWriteResult(cache_type_, WRITE_RESULT_OVER_MAX_SIZE);
412     return net::ERR_FAILED;
413   }
414   ScopedOperationRunner operation_runner(this);
415 
416   // Stream 0 data is kept in memory, so can be written immediatly if there are
417   // no IO operations pending.
418   if (stream_index == 0 && state_ == STATE_READY &&
419       pending_operations_.size() == 0)
420     return SetStream0Data(buf, offset, buf_len, truncate);
421 
422   // We can only do optimistic Write if there is no pending operations, so
423   // that we are sure that the next call to RunNextOperationIfNeeded will
424   // actually run the write operation that sets the stream size. It also
425   // prevents from previous possibly-conflicting writes that could be stacked
426   // in the |pending_operations_|. We could optimize this for when we have
427   // only read operations enqueued.
428   const bool optimistic =
429       (use_optimistic_operations_ && state_ == STATE_READY &&
430        pending_operations_.size() == 0);
431   CompletionCallback op_callback;
432   scoped_refptr<net::IOBuffer> op_buf;
433   int ret_value = net::ERR_FAILED;
434   if (!optimistic) {
435     op_buf = buf;
436     op_callback = callback;
437     ret_value = net::ERR_IO_PENDING;
438   } else {
439     // TODO(gavinp,pasko): For performance, don't use a copy of an IOBuffer
440     // here to avoid paying the price of the RefCountedThreadSafe atomic
441     // operations.
442     if (buf) {
443       op_buf = new IOBuffer(buf_len);
444       memcpy(op_buf->data(), buf->data(), buf_len);
445     }
446     op_callback = CompletionCallback();
447     ret_value = buf_len;
448     if (net_log_.IsLogging()) {
449       net_log_.AddEvent(
450           net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_OPTIMISTIC,
451           CreateNetLogReadWriteCompleteCallback(buf_len));
452     }
453   }
454 
455   pending_operations_.push(SimpleEntryOperation::WriteOperation(this,
456                                                                 stream_index,
457                                                                 offset,
458                                                                 buf_len,
459                                                                 op_buf.get(),
460                                                                 truncate,
461                                                                 optimistic,
462                                                                 op_callback));
463   return ret_value;
464 }
465 
ReadSparseData(int64 offset,net::IOBuffer * buf,int buf_len,const CompletionCallback & callback)466 int SimpleEntryImpl::ReadSparseData(int64 offset,
467                                     net::IOBuffer* buf,
468                                     int buf_len,
469                                     const CompletionCallback& callback) {
470   DCHECK(io_thread_checker_.CalledOnValidThread());
471 
472   ScopedOperationRunner operation_runner(this);
473   pending_operations_.push(SimpleEntryOperation::ReadSparseOperation(
474       this, offset, buf_len, buf, callback));
475   return net::ERR_IO_PENDING;
476 }
477 
WriteSparseData(int64 offset,net::IOBuffer * buf,int buf_len,const CompletionCallback & callback)478 int SimpleEntryImpl::WriteSparseData(int64 offset,
479                                      net::IOBuffer* buf,
480                                      int buf_len,
481                                      const CompletionCallback& callback) {
482   DCHECK(io_thread_checker_.CalledOnValidThread());
483 
484   ScopedOperationRunner operation_runner(this);
485   pending_operations_.push(SimpleEntryOperation::WriteSparseOperation(
486       this, offset, buf_len, buf, callback));
487   return net::ERR_IO_PENDING;
488 }
489 
GetAvailableRange(int64 offset,int len,int64 * start,const CompletionCallback & callback)490 int SimpleEntryImpl::GetAvailableRange(int64 offset,
491                                        int len,
492                                        int64* start,
493                                        const CompletionCallback& callback) {
494   DCHECK(io_thread_checker_.CalledOnValidThread());
495 
496   ScopedOperationRunner operation_runner(this);
497   pending_operations_.push(SimpleEntryOperation::GetAvailableRangeOperation(
498       this, offset, len, start, callback));
499   return net::ERR_IO_PENDING;
500 }
501 
CouldBeSparse() const502 bool SimpleEntryImpl::CouldBeSparse() const {
503   DCHECK(io_thread_checker_.CalledOnValidThread());
504   // TODO(ttuttle): Actually check.
505   return true;
506 }
507 
CancelSparseIO()508 void SimpleEntryImpl::CancelSparseIO() {
509   DCHECK(io_thread_checker_.CalledOnValidThread());
510   // The Simple Cache does not return distinct objects for the same non-doomed
511   // entry, so there's no need to coordinate which object is performing sparse
512   // I/O.  Therefore, CancelSparseIO and ReadyForSparseIO succeed instantly.
513 }
514 
ReadyForSparseIO(const CompletionCallback & callback)515 int SimpleEntryImpl::ReadyForSparseIO(const CompletionCallback& callback) {
516   DCHECK(io_thread_checker_.CalledOnValidThread());
517   // The simple Cache does not return distinct objects for the same non-doomed
518   // entry, so there's no need to coordinate which object is performing sparse
519   // I/O.  Therefore, CancelSparseIO and ReadyForSparseIO succeed instantly.
520   return net::OK;
521 }
522 
~SimpleEntryImpl()523 SimpleEntryImpl::~SimpleEntryImpl() {
524   DCHECK(io_thread_checker_.CalledOnValidThread());
525   DCHECK_EQ(0U, pending_operations_.size());
526   DCHECK(state_ == STATE_UNINITIALIZED || state_ == STATE_FAILURE);
527   DCHECK(!synchronous_entry_);
528   RemoveSelfFromBackend();
529   net_log_.EndEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY);
530 }
531 
PostClientCallback(const CompletionCallback & callback,int result)532 void SimpleEntryImpl::PostClientCallback(const CompletionCallback& callback,
533                                          int result) {
534   if (callback.is_null())
535     return;
536   // Note that the callback is posted rather than directly invoked to avoid
537   // reentrancy issues.
538   MessageLoopProxy::current()->PostTask(
539       FROM_HERE,
540       base::Bind(&InvokeCallbackIfBackendIsAlive, backend_, callback, result));
541 }
542 
MakeUninitialized()543 void SimpleEntryImpl::MakeUninitialized() {
544   state_ = STATE_UNINITIALIZED;
545   std::memset(crc32s_end_offset_, 0, sizeof(crc32s_end_offset_));
546   std::memset(crc32s_, 0, sizeof(crc32s_));
547   std::memset(have_written_, 0, sizeof(have_written_));
548   std::memset(data_size_, 0, sizeof(data_size_));
549   for (size_t i = 0; i < arraysize(crc_check_state_); ++i) {
550     crc_check_state_[i] = CRC_CHECK_NEVER_READ_AT_ALL;
551   }
552 }
553 
ReturnEntryToCaller(Entry ** out_entry)554 void SimpleEntryImpl::ReturnEntryToCaller(Entry** out_entry) {
555   DCHECK(out_entry);
556   ++open_count_;
557   AddRef();  // Balanced in Close()
558   if (!backend_.get()) {
559     // This method can be called when an asynchronous operation completed.
560     // If the backend no longer exists, the callback won't be invoked, and so we
561     // must close ourselves to avoid leaking. As well, there's no guarantee the
562     // client-provided pointer (|out_entry|) hasn't been freed, and no point
563     // dereferencing it, either.
564     Close();
565     return;
566   }
567   *out_entry = this;
568 }
569 
RemoveSelfFromBackend()570 void SimpleEntryImpl::RemoveSelfFromBackend() {
571   if (!backend_.get())
572     return;
573   backend_->OnDeactivated(this);
574 }
575 
MarkAsDoomed()576 void SimpleEntryImpl::MarkAsDoomed() {
577   doomed_ = true;
578   if (!backend_.get())
579     return;
580   backend_->index()->Remove(entry_hash_);
581   RemoveSelfFromBackend();
582 }
583 
RunNextOperationIfNeeded()584 void SimpleEntryImpl::RunNextOperationIfNeeded() {
585   DCHECK(io_thread_checker_.CalledOnValidThread());
586   SIMPLE_CACHE_UMA(CUSTOM_COUNTS,
587                    "EntryOperationsPending", cache_type_,
588                    pending_operations_.size(), 0, 100, 20);
589   if (!pending_operations_.empty() && state_ != STATE_IO_PENDING) {
590     scoped_ptr<SimpleEntryOperation> operation(
591         new SimpleEntryOperation(pending_operations_.front()));
592     pending_operations_.pop();
593     switch (operation->type()) {
594       case SimpleEntryOperation::TYPE_OPEN:
595         OpenEntryInternal(operation->have_index(),
596                           operation->callback(),
597                           operation->out_entry());
598         break;
599       case SimpleEntryOperation::TYPE_CREATE:
600         CreateEntryInternal(operation->have_index(),
601                             operation->callback(),
602                             operation->out_entry());
603         break;
604       case SimpleEntryOperation::TYPE_CLOSE:
605         CloseInternal();
606         break;
607       case SimpleEntryOperation::TYPE_READ:
608         RecordReadIsParallelizable(*operation);
609         ReadDataInternal(operation->index(),
610                          operation->offset(),
611                          operation->buf(),
612                          operation->length(),
613                          operation->callback());
614         break;
615       case SimpleEntryOperation::TYPE_WRITE:
616         RecordWriteDependencyType(*operation);
617         WriteDataInternal(operation->index(),
618                           operation->offset(),
619                           operation->buf(),
620                           operation->length(),
621                           operation->callback(),
622                           operation->truncate());
623         break;
624       case SimpleEntryOperation::TYPE_READ_SPARSE:
625         ReadSparseDataInternal(operation->sparse_offset(),
626                                operation->buf(),
627                                operation->length(),
628                                operation->callback());
629         break;
630       case SimpleEntryOperation::TYPE_WRITE_SPARSE:
631         WriteSparseDataInternal(operation->sparse_offset(),
632                                 operation->buf(),
633                                 operation->length(),
634                                 operation->callback());
635         break;
636       case SimpleEntryOperation::TYPE_GET_AVAILABLE_RANGE:
637         GetAvailableRangeInternal(operation->sparse_offset(),
638                                   operation->length(),
639                                   operation->out_start(),
640                                   operation->callback());
641         break;
642       case SimpleEntryOperation::TYPE_DOOM:
643         DoomEntryInternal(operation->callback());
644         break;
645       default:
646         NOTREACHED();
647     }
648     // The operation is kept for histograms. Makes sure it does not leak
649     // resources.
650     executing_operation_.swap(operation);
651     executing_operation_->ReleaseReferences();
652     // |this| may have been deleted.
653   }
654 }
655 
OpenEntryInternal(bool have_index,const CompletionCallback & callback,Entry ** out_entry)656 void SimpleEntryImpl::OpenEntryInternal(bool have_index,
657                                         const CompletionCallback& callback,
658                                         Entry** out_entry) {
659   ScopedOperationRunner operation_runner(this);
660 
661   net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_OPEN_BEGIN);
662 
663   if (state_ == STATE_READY) {
664     ReturnEntryToCaller(out_entry);
665     PostClientCallback(callback, net::OK);
666     net_log_.AddEvent(
667         net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_OPEN_END,
668         CreateNetLogSimpleEntryCreationCallback(this, net::OK));
669     return;
670   }
671   if (state_ == STATE_FAILURE) {
672     PostClientCallback(callback, net::ERR_FAILED);
673     net_log_.AddEvent(
674         net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_OPEN_END,
675         CreateNetLogSimpleEntryCreationCallback(this, net::ERR_FAILED));
676     return;
677   }
678 
679   DCHECK_EQ(STATE_UNINITIALIZED, state_);
680   DCHECK(!synchronous_entry_);
681   state_ = STATE_IO_PENDING;
682   const base::TimeTicks start_time = base::TimeTicks::Now();
683   scoped_ptr<SimpleEntryCreationResults> results(
684       new SimpleEntryCreationResults(
685           SimpleEntryStat(last_used_, last_modified_, data_size_,
686                           sparse_data_size_)));
687   Closure task = base::Bind(&SimpleSynchronousEntry::OpenEntry,
688                             cache_type_,
689                             path_,
690                             entry_hash_,
691                             have_index,
692                             results.get());
693   Closure reply = base::Bind(&SimpleEntryImpl::CreationOperationComplete,
694                              this,
695                              callback,
696                              start_time,
697                              base::Passed(&results),
698                              out_entry,
699                              net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_OPEN_END);
700   worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
701 }
702 
CreateEntryInternal(bool have_index,const CompletionCallback & callback,Entry ** out_entry)703 void SimpleEntryImpl::CreateEntryInternal(bool have_index,
704                                           const CompletionCallback& callback,
705                                           Entry** out_entry) {
706   ScopedOperationRunner operation_runner(this);
707 
708   net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CREATE_BEGIN);
709 
710   if (state_ != STATE_UNINITIALIZED) {
711     // There is already an active normal entry.
712     net_log_.AddEvent(
713         net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CREATE_END,
714         CreateNetLogSimpleEntryCreationCallback(this, net::ERR_FAILED));
715     PostClientCallback(callback, net::ERR_FAILED);
716     return;
717   }
718   DCHECK_EQ(STATE_UNINITIALIZED, state_);
719   DCHECK(!synchronous_entry_);
720 
721   state_ = STATE_IO_PENDING;
722 
723   // Since we don't know the correct values for |last_used_| and
724   // |last_modified_| yet, we make this approximation.
725   last_used_ = last_modified_ = base::Time::Now();
726 
727   // If creation succeeds, we should mark all streams to be saved on close.
728   for (int i = 0; i < kSimpleEntryStreamCount; ++i)
729     have_written_[i] = true;
730 
731   const base::TimeTicks start_time = base::TimeTicks::Now();
732   scoped_ptr<SimpleEntryCreationResults> results(
733       new SimpleEntryCreationResults(
734           SimpleEntryStat(last_used_, last_modified_, data_size_,
735                           sparse_data_size_)));
736   Closure task = base::Bind(&SimpleSynchronousEntry::CreateEntry,
737                             cache_type_,
738                             path_,
739                             key_,
740                             entry_hash_,
741                             have_index,
742                             results.get());
743   Closure reply = base::Bind(&SimpleEntryImpl::CreationOperationComplete,
744                              this,
745                              callback,
746                              start_time,
747                              base::Passed(&results),
748                              out_entry,
749                              net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CREATE_END);
750   worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
751 }
752 
CloseInternal()753 void SimpleEntryImpl::CloseInternal() {
754   DCHECK(io_thread_checker_.CalledOnValidThread());
755   typedef SimpleSynchronousEntry::CRCRecord CRCRecord;
756   scoped_ptr<std::vector<CRCRecord> >
757       crc32s_to_write(new std::vector<CRCRecord>());
758 
759   net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CLOSE_BEGIN);
760 
761   if (state_ == STATE_READY) {
762     DCHECK(synchronous_entry_);
763     state_ = STATE_IO_PENDING;
764     for (int i = 0; i < kSimpleEntryStreamCount; ++i) {
765       if (have_written_[i]) {
766         if (GetDataSize(i) == crc32s_end_offset_[i]) {
767           int32 crc = GetDataSize(i) == 0 ? crc32(0, Z_NULL, 0) : crc32s_[i];
768           crc32s_to_write->push_back(CRCRecord(i, true, crc));
769         } else {
770           crc32s_to_write->push_back(CRCRecord(i, false, 0));
771         }
772       }
773     }
774   } else {
775     DCHECK(STATE_UNINITIALIZED == state_ || STATE_FAILURE == state_);
776   }
777 
778   if (synchronous_entry_) {
779     Closure task =
780         base::Bind(&SimpleSynchronousEntry::Close,
781                    base::Unretained(synchronous_entry_),
782                    SimpleEntryStat(last_used_, last_modified_, data_size_,
783                                    sparse_data_size_),
784                    base::Passed(&crc32s_to_write),
785                    stream_0_data_);
786     Closure reply = base::Bind(&SimpleEntryImpl::CloseOperationComplete, this);
787     synchronous_entry_ = NULL;
788     worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
789 
790     for (int i = 0; i < kSimpleEntryStreamCount; ++i) {
791       if (!have_written_[i]) {
792         SIMPLE_CACHE_UMA(ENUMERATION,
793                          "CheckCRCResult", cache_type_,
794                          crc_check_state_[i], CRC_CHECK_MAX);
795       }
796     }
797   } else {
798     CloseOperationComplete();
799   }
800 }
801 
ReadDataInternal(int stream_index,int offset,net::IOBuffer * buf,int buf_len,const CompletionCallback & callback)802 void SimpleEntryImpl::ReadDataInternal(int stream_index,
803                                        int offset,
804                                        net::IOBuffer* buf,
805                                        int buf_len,
806                                        const CompletionCallback& callback) {
807   DCHECK(io_thread_checker_.CalledOnValidThread());
808   ScopedOperationRunner operation_runner(this);
809 
810   if (net_log_.IsLogging()) {
811     net_log_.AddEvent(
812         net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_BEGIN,
813         CreateNetLogReadWriteDataCallback(stream_index, offset, buf_len,
814                                           false));
815   }
816 
817   if (state_ == STATE_FAILURE || state_ == STATE_UNINITIALIZED) {
818     if (!callback.is_null()) {
819       RecordReadResult(cache_type_, READ_RESULT_BAD_STATE);
820       // Note that the API states that client-provided callbacks for entry-level
821       // (i.e. non-backend) operations (e.g. read, write) are invoked even if
822       // the backend was already destroyed.
823       MessageLoopProxy::current()->PostTask(
824           FROM_HERE, base::Bind(callback, net::ERR_FAILED));
825     }
826     if (net_log_.IsLogging()) {
827       net_log_.AddEvent(
828           net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END,
829           CreateNetLogReadWriteCompleteCallback(net::ERR_FAILED));
830     }
831     return;
832   }
833   DCHECK_EQ(STATE_READY, state_);
834   if (offset >= GetDataSize(stream_index) || offset < 0 || !buf_len) {
835     RecordReadResult(cache_type_, READ_RESULT_FAST_EMPTY_RETURN);
836     // If there is nothing to read, we bail out before setting state_ to
837     // STATE_IO_PENDING.
838     if (!callback.is_null())
839       MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(callback, 0));
840     return;
841   }
842 
843   buf_len = std::min(buf_len, GetDataSize(stream_index) - offset);
844 
845   // Since stream 0 data is kept in memory, it is read immediately.
846   if (stream_index == 0) {
847     int ret_value = ReadStream0Data(buf, offset, buf_len);
848     if (!callback.is_null()) {
849       MessageLoopProxy::current()->PostTask(FROM_HERE,
850                                             base::Bind(callback, ret_value));
851     }
852     return;
853   }
854 
855   state_ = STATE_IO_PENDING;
856   if (!doomed_ && backend_.get())
857     backend_->index()->UseIfExists(entry_hash_);
858 
859   scoped_ptr<uint32> read_crc32(new uint32());
860   scoped_ptr<int> result(new int());
861   scoped_ptr<SimpleEntryStat> entry_stat(
862       new SimpleEntryStat(last_used_, last_modified_, data_size_,
863                           sparse_data_size_));
864   Closure task = base::Bind(
865       &SimpleSynchronousEntry::ReadData,
866       base::Unretained(synchronous_entry_),
867       SimpleSynchronousEntry::EntryOperationData(stream_index, offset, buf_len),
868       make_scoped_refptr(buf),
869       read_crc32.get(),
870       entry_stat.get(),
871       result.get());
872   Closure reply = base::Bind(&SimpleEntryImpl::ReadOperationComplete,
873                              this,
874                              stream_index,
875                              offset,
876                              callback,
877                              base::Passed(&read_crc32),
878                              base::Passed(&entry_stat),
879                              base::Passed(&result));
880   worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
881 }
882 
WriteDataInternal(int stream_index,int offset,net::IOBuffer * buf,int buf_len,const CompletionCallback & callback,bool truncate)883 void SimpleEntryImpl::WriteDataInternal(int stream_index,
884                                        int offset,
885                                        net::IOBuffer* buf,
886                                        int buf_len,
887                                        const CompletionCallback& callback,
888                                        bool truncate) {
889   DCHECK(io_thread_checker_.CalledOnValidThread());
890   ScopedOperationRunner operation_runner(this);
891 
892   if (net_log_.IsLogging()) {
893     net_log_.AddEvent(
894         net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_BEGIN,
895         CreateNetLogReadWriteDataCallback(stream_index, offset, buf_len,
896                                           truncate));
897   }
898 
899   if (state_ == STATE_FAILURE || state_ == STATE_UNINITIALIZED) {
900     RecordWriteResult(cache_type_, WRITE_RESULT_BAD_STATE);
901     if (net_log_.IsLogging()) {
902       net_log_.AddEvent(
903           net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_END,
904           CreateNetLogReadWriteCompleteCallback(net::ERR_FAILED));
905     }
906     if (!callback.is_null()) {
907       MessageLoopProxy::current()->PostTask(
908           FROM_HERE, base::Bind(callback, net::ERR_FAILED));
909     }
910     // |this| may be destroyed after return here.
911     return;
912   }
913 
914   DCHECK_EQ(STATE_READY, state_);
915 
916   // Since stream 0 data is kept in memory, it will be written immediatly.
917   if (stream_index == 0) {
918     int ret_value = SetStream0Data(buf, offset, buf_len, truncate);
919     if (!callback.is_null()) {
920       MessageLoopProxy::current()->PostTask(FROM_HERE,
921                                             base::Bind(callback, ret_value));
922     }
923     return;
924   }
925 
926   // Ignore zero-length writes that do not change the file size.
927   if (buf_len == 0) {
928     int32 data_size = data_size_[stream_index];
929     if (truncate ? (offset == data_size) : (offset <= data_size)) {
930       RecordWriteResult(cache_type_, WRITE_RESULT_FAST_EMPTY_RETURN);
931       if (!callback.is_null()) {
932         MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
933             callback, 0));
934       }
935       return;
936     }
937   }
938   state_ = STATE_IO_PENDING;
939   if (!doomed_ && backend_.get())
940     backend_->index()->UseIfExists(entry_hash_);
941 
942   AdvanceCrc(buf, offset, buf_len, stream_index);
943 
944   // |entry_stat| needs to be initialized before modifying |data_size_|.
945   scoped_ptr<SimpleEntryStat> entry_stat(
946       new SimpleEntryStat(last_used_, last_modified_, data_size_,
947                           sparse_data_size_));
948   if (truncate) {
949     data_size_[stream_index] = offset + buf_len;
950   } else {
951     data_size_[stream_index] = std::max(offset + buf_len,
952                                         GetDataSize(stream_index));
953   }
954 
955   // Since we don't know the correct values for |last_used_| and
956   // |last_modified_| yet, we make this approximation.
957   last_used_ = last_modified_ = base::Time::Now();
958 
959   have_written_[stream_index] = true;
960   // Writing on stream 1 affects the placement of stream 0 in the file, the EOF
961   // record will have to be rewritten.
962   if (stream_index == 1)
963     have_written_[0] = true;
964 
965   scoped_ptr<int> result(new int());
966   Closure task = base::Bind(&SimpleSynchronousEntry::WriteData,
967                             base::Unretained(synchronous_entry_),
968                             SimpleSynchronousEntry::EntryOperationData(
969                                 stream_index, offset, buf_len, truncate,
970                                 doomed_),
971                             make_scoped_refptr(buf),
972                             entry_stat.get(),
973                             result.get());
974   Closure reply = base::Bind(&SimpleEntryImpl::WriteOperationComplete,
975                              this,
976                              stream_index,
977                              callback,
978                              base::Passed(&entry_stat),
979                              base::Passed(&result));
980   worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
981 }
982 
ReadSparseDataInternal(int64 sparse_offset,net::IOBuffer * buf,int buf_len,const CompletionCallback & callback)983 void SimpleEntryImpl::ReadSparseDataInternal(
984     int64 sparse_offset,
985     net::IOBuffer* buf,
986     int buf_len,
987     const CompletionCallback& callback) {
988   DCHECK(io_thread_checker_.CalledOnValidThread());
989   ScopedOperationRunner operation_runner(this);
990 
991   DCHECK_EQ(STATE_READY, state_);
992   state_ = STATE_IO_PENDING;
993 
994   scoped_ptr<int> result(new int());
995   scoped_ptr<base::Time> last_used(new base::Time());
996   Closure task = base::Bind(&SimpleSynchronousEntry::ReadSparseData,
997                             base::Unretained(synchronous_entry_),
998                             SimpleSynchronousEntry::EntryOperationData(
999                                 sparse_offset, buf_len),
1000                             make_scoped_refptr(buf),
1001                             last_used.get(),
1002                             result.get());
1003   Closure reply = base::Bind(&SimpleEntryImpl::ReadSparseOperationComplete,
1004                              this,
1005                              callback,
1006                              base::Passed(&last_used),
1007                              base::Passed(&result));
1008   worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
1009 }
1010 
WriteSparseDataInternal(int64 sparse_offset,net::IOBuffer * buf,int buf_len,const CompletionCallback & callback)1011 void SimpleEntryImpl::WriteSparseDataInternal(
1012     int64 sparse_offset,
1013     net::IOBuffer* buf,
1014     int buf_len,
1015     const CompletionCallback& callback) {
1016   DCHECK(io_thread_checker_.CalledOnValidThread());
1017   ScopedOperationRunner operation_runner(this);
1018 
1019   DCHECK_EQ(STATE_READY, state_);
1020   state_ = STATE_IO_PENDING;
1021 
1022   int64 max_sparse_data_size = kint64max;
1023   if (backend_.get()) {
1024     int64 max_cache_size = backend_->index()->max_size();
1025     max_sparse_data_size = max_cache_size / kMaxSparseDataSizeDivisor;
1026   }
1027 
1028   scoped_ptr<SimpleEntryStat> entry_stat(
1029       new SimpleEntryStat(last_used_, last_modified_, data_size_,
1030                           sparse_data_size_));
1031 
1032   last_used_ = last_modified_ = base::Time::Now();
1033 
1034   scoped_ptr<int> result(new int());
1035   Closure task = base::Bind(&SimpleSynchronousEntry::WriteSparseData,
1036                             base::Unretained(synchronous_entry_),
1037                             SimpleSynchronousEntry::EntryOperationData(
1038                                 sparse_offset, buf_len),
1039                             make_scoped_refptr(buf),
1040                             max_sparse_data_size,
1041                             entry_stat.get(),
1042                             result.get());
1043   Closure reply = base::Bind(&SimpleEntryImpl::WriteSparseOperationComplete,
1044                              this,
1045                              callback,
1046                              base::Passed(&entry_stat),
1047                              base::Passed(&result));
1048   worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
1049 }
1050 
GetAvailableRangeInternal(int64 sparse_offset,int len,int64 * out_start,const CompletionCallback & callback)1051 void SimpleEntryImpl::GetAvailableRangeInternal(
1052     int64 sparse_offset,
1053     int len,
1054     int64* out_start,
1055     const CompletionCallback& callback) {
1056   DCHECK(io_thread_checker_.CalledOnValidThread());
1057   ScopedOperationRunner operation_runner(this);
1058 
1059   DCHECK_EQ(STATE_READY, state_);
1060   state_ = STATE_IO_PENDING;
1061 
1062   scoped_ptr<int> result(new int());
1063   Closure task = base::Bind(&SimpleSynchronousEntry::GetAvailableRange,
1064                             base::Unretained(synchronous_entry_),
1065                             SimpleSynchronousEntry::EntryOperationData(
1066                                 sparse_offset, len),
1067                             out_start,
1068                             result.get());
1069   Closure reply = base::Bind(
1070       &SimpleEntryImpl::GetAvailableRangeOperationComplete,
1071       this,
1072       callback,
1073       base::Passed(&result));
1074   worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
1075 }
1076 
DoomEntryInternal(const CompletionCallback & callback)1077 void SimpleEntryImpl::DoomEntryInternal(const CompletionCallback& callback) {
1078   PostTaskAndReplyWithResult(
1079       worker_pool_, FROM_HERE,
1080       base::Bind(&SimpleSynchronousEntry::DoomEntry, path_, entry_hash_),
1081       base::Bind(&SimpleEntryImpl::DoomOperationComplete, this, callback,
1082                  state_));
1083   state_ = STATE_IO_PENDING;
1084 }
1085 
CreationOperationComplete(const CompletionCallback & completion_callback,const base::TimeTicks & start_time,scoped_ptr<SimpleEntryCreationResults> in_results,Entry ** out_entry,net::NetLog::EventType end_event_type)1086 void SimpleEntryImpl::CreationOperationComplete(
1087     const CompletionCallback& completion_callback,
1088     const base::TimeTicks& start_time,
1089     scoped_ptr<SimpleEntryCreationResults> in_results,
1090     Entry** out_entry,
1091     net::NetLog::EventType end_event_type) {
1092   DCHECK(io_thread_checker_.CalledOnValidThread());
1093   DCHECK_EQ(state_, STATE_IO_PENDING);
1094   DCHECK(in_results);
1095   ScopedOperationRunner operation_runner(this);
1096   SIMPLE_CACHE_UMA(BOOLEAN,
1097                    "EntryCreationResult", cache_type_,
1098                    in_results->result == net::OK);
1099   if (in_results->result != net::OK) {
1100     if (in_results->result != net::ERR_FILE_EXISTS)
1101       MarkAsDoomed();
1102 
1103     net_log_.AddEventWithNetErrorCode(end_event_type, net::ERR_FAILED);
1104     PostClientCallback(completion_callback, net::ERR_FAILED);
1105     MakeUninitialized();
1106     return;
1107   }
1108   // If out_entry is NULL, it means we already called ReturnEntryToCaller from
1109   // the optimistic Create case.
1110   if (out_entry)
1111     ReturnEntryToCaller(out_entry);
1112 
1113   state_ = STATE_READY;
1114   synchronous_entry_ = in_results->sync_entry;
1115   if (in_results->stream_0_data) {
1116     stream_0_data_ = in_results->stream_0_data;
1117     // The crc was read in SimpleSynchronousEntry.
1118     crc_check_state_[0] = CRC_CHECK_DONE;
1119     crc32s_[0] = in_results->stream_0_crc32;
1120     crc32s_end_offset_[0] = in_results->entry_stat.data_size(0);
1121   }
1122   if (key_.empty()) {
1123     SetKey(synchronous_entry_->key());
1124   } else {
1125     // This should only be triggered when creating an entry. The key check in
1126     // the open case is handled in SimpleBackendImpl.
1127     DCHECK_EQ(key_, synchronous_entry_->key());
1128   }
1129   UpdateDataFromEntryStat(in_results->entry_stat);
1130   SIMPLE_CACHE_UMA(TIMES,
1131                    "EntryCreationTime", cache_type_,
1132                    (base::TimeTicks::Now() - start_time));
1133   AdjustOpenEntryCountBy(cache_type_, 1);
1134 
1135   net_log_.AddEvent(end_event_type);
1136   PostClientCallback(completion_callback, net::OK);
1137 }
1138 
EntryOperationComplete(const CompletionCallback & completion_callback,const SimpleEntryStat & entry_stat,scoped_ptr<int> result)1139 void SimpleEntryImpl::EntryOperationComplete(
1140     const CompletionCallback& completion_callback,
1141     const SimpleEntryStat& entry_stat,
1142     scoped_ptr<int> result) {
1143   DCHECK(io_thread_checker_.CalledOnValidThread());
1144   DCHECK(synchronous_entry_);
1145   DCHECK_EQ(STATE_IO_PENDING, state_);
1146   DCHECK(result);
1147   if (*result < 0) {
1148     state_ = STATE_FAILURE;
1149     MarkAsDoomed();
1150   } else {
1151     state_ = STATE_READY;
1152     UpdateDataFromEntryStat(entry_stat);
1153   }
1154 
1155   if (!completion_callback.is_null()) {
1156     MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
1157         completion_callback, *result));
1158   }
1159   RunNextOperationIfNeeded();
1160 }
1161 
ReadOperationComplete(int stream_index,int offset,const CompletionCallback & completion_callback,scoped_ptr<uint32> read_crc32,scoped_ptr<SimpleEntryStat> entry_stat,scoped_ptr<int> result)1162 void SimpleEntryImpl::ReadOperationComplete(
1163     int stream_index,
1164     int offset,
1165     const CompletionCallback& completion_callback,
1166     scoped_ptr<uint32> read_crc32,
1167     scoped_ptr<SimpleEntryStat> entry_stat,
1168     scoped_ptr<int> result) {
1169   DCHECK(io_thread_checker_.CalledOnValidThread());
1170   DCHECK(synchronous_entry_);
1171   DCHECK_EQ(STATE_IO_PENDING, state_);
1172   DCHECK(read_crc32);
1173   DCHECK(result);
1174 
1175   if (*result > 0 &&
1176       crc_check_state_[stream_index] == CRC_CHECK_NEVER_READ_AT_ALL) {
1177     crc_check_state_[stream_index] = CRC_CHECK_NEVER_READ_TO_END;
1178   }
1179 
1180   if (*result > 0 && crc32s_end_offset_[stream_index] == offset) {
1181     uint32 current_crc = offset == 0 ? crc32(0, Z_NULL, 0)
1182                                      : crc32s_[stream_index];
1183     crc32s_[stream_index] = crc32_combine(current_crc, *read_crc32, *result);
1184     crc32s_end_offset_[stream_index] += *result;
1185     if (!have_written_[stream_index] &&
1186         GetDataSize(stream_index) == crc32s_end_offset_[stream_index]) {
1187       // We have just read a file from start to finish, and so we have
1188       // computed a crc of the entire file. We can check it now. If a cache
1189       // entry has a single reader, the normal pattern is to read from start
1190       // to finish.
1191 
1192       // Other cases are possible. In the case of two readers on the same
1193       // entry, one reader can be behind the other. In this case we compute
1194       // the crc as the most advanced reader progresses, and check it for
1195       // both readers as they read the last byte.
1196 
1197       net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CHECKSUM_BEGIN);
1198 
1199       scoped_ptr<int> new_result(new int());
1200       Closure task = base::Bind(&SimpleSynchronousEntry::CheckEOFRecord,
1201                                 base::Unretained(synchronous_entry_),
1202                                 stream_index,
1203                                 *entry_stat,
1204                                 crc32s_[stream_index],
1205                                 new_result.get());
1206       Closure reply = base::Bind(&SimpleEntryImpl::ChecksumOperationComplete,
1207                                  this, *result, stream_index,
1208                                  completion_callback,
1209                                  base::Passed(&new_result));
1210       worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
1211       crc_check_state_[stream_index] = CRC_CHECK_DONE;
1212       return;
1213     }
1214   }
1215 
1216   if (*result < 0) {
1217     crc32s_end_offset_[stream_index] = 0;
1218   }
1219 
1220   if (*result < 0) {
1221     RecordReadResult(cache_type_, READ_RESULT_SYNC_READ_FAILURE);
1222   } else {
1223     RecordReadResult(cache_type_, READ_RESULT_SUCCESS);
1224     if (crc_check_state_[stream_index] == CRC_CHECK_NEVER_READ_TO_END &&
1225         offset + *result == GetDataSize(stream_index)) {
1226       crc_check_state_[stream_index] = CRC_CHECK_NOT_DONE;
1227     }
1228   }
1229   if (net_log_.IsLogging()) {
1230     net_log_.AddEvent(
1231         net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END,
1232         CreateNetLogReadWriteCompleteCallback(*result));
1233   }
1234 
1235   EntryOperationComplete(completion_callback, *entry_stat, result.Pass());
1236 }
1237 
WriteOperationComplete(int stream_index,const CompletionCallback & completion_callback,scoped_ptr<SimpleEntryStat> entry_stat,scoped_ptr<int> result)1238 void SimpleEntryImpl::WriteOperationComplete(
1239     int stream_index,
1240     const CompletionCallback& completion_callback,
1241     scoped_ptr<SimpleEntryStat> entry_stat,
1242     scoped_ptr<int> result) {
1243   if (*result >= 0)
1244     RecordWriteResult(cache_type_, WRITE_RESULT_SUCCESS);
1245   else
1246     RecordWriteResult(cache_type_, WRITE_RESULT_SYNC_WRITE_FAILURE);
1247   if (net_log_.IsLogging()) {
1248     net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_END,
1249         CreateNetLogReadWriteCompleteCallback(*result));
1250   }
1251 
1252   if (*result < 0) {
1253     crc32s_end_offset_[stream_index] = 0;
1254   }
1255 
1256   EntryOperationComplete(completion_callback, *entry_stat, result.Pass());
1257 }
1258 
ReadSparseOperationComplete(const CompletionCallback & completion_callback,scoped_ptr<base::Time> last_used,scoped_ptr<int> result)1259 void SimpleEntryImpl::ReadSparseOperationComplete(
1260     const CompletionCallback& completion_callback,
1261     scoped_ptr<base::Time> last_used,
1262     scoped_ptr<int> result) {
1263   DCHECK(io_thread_checker_.CalledOnValidThread());
1264   DCHECK(synchronous_entry_);
1265   DCHECK(result);
1266 
1267   SimpleEntryStat entry_stat(*last_used, last_modified_, data_size_,
1268                              sparse_data_size_);
1269   EntryOperationComplete(completion_callback, entry_stat, result.Pass());
1270 }
1271 
WriteSparseOperationComplete(const CompletionCallback & completion_callback,scoped_ptr<SimpleEntryStat> entry_stat,scoped_ptr<int> result)1272 void SimpleEntryImpl::WriteSparseOperationComplete(
1273     const CompletionCallback& completion_callback,
1274     scoped_ptr<SimpleEntryStat> entry_stat,
1275     scoped_ptr<int> result) {
1276   DCHECK(io_thread_checker_.CalledOnValidThread());
1277   DCHECK(synchronous_entry_);
1278   DCHECK(result);
1279 
1280   EntryOperationComplete(completion_callback, *entry_stat, result.Pass());
1281 }
1282 
GetAvailableRangeOperationComplete(const CompletionCallback & completion_callback,scoped_ptr<int> result)1283 void SimpleEntryImpl::GetAvailableRangeOperationComplete(
1284     const CompletionCallback& completion_callback,
1285     scoped_ptr<int> result) {
1286   DCHECK(io_thread_checker_.CalledOnValidThread());
1287   DCHECK(synchronous_entry_);
1288   DCHECK(result);
1289 
1290   SimpleEntryStat entry_stat(last_used_, last_modified_, data_size_,
1291                              sparse_data_size_);
1292   EntryOperationComplete(completion_callback, entry_stat, result.Pass());
1293 }
1294 
DoomOperationComplete(const CompletionCallback & callback,State state_to_restore,int result)1295 void SimpleEntryImpl::DoomOperationComplete(
1296     const CompletionCallback& callback,
1297     State state_to_restore,
1298     int result) {
1299   state_ = state_to_restore;
1300   if (!callback.is_null())
1301     callback.Run(result);
1302   RunNextOperationIfNeeded();
1303   if (backend_)
1304     backend_->OnDoomComplete(entry_hash_);
1305 }
1306 
ChecksumOperationComplete(int orig_result,int stream_index,const CompletionCallback & completion_callback,scoped_ptr<int> result)1307 void SimpleEntryImpl::ChecksumOperationComplete(
1308     int orig_result,
1309     int stream_index,
1310     const CompletionCallback& completion_callback,
1311     scoped_ptr<int> result) {
1312   DCHECK(io_thread_checker_.CalledOnValidThread());
1313   DCHECK(synchronous_entry_);
1314   DCHECK_EQ(STATE_IO_PENDING, state_);
1315   DCHECK(result);
1316 
1317   if (net_log_.IsLogging()) {
1318     net_log_.AddEventWithNetErrorCode(
1319         net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CHECKSUM_END,
1320         *result);
1321   }
1322 
1323   if (*result == net::OK) {
1324     *result = orig_result;
1325     if (orig_result >= 0)
1326       RecordReadResult(cache_type_, READ_RESULT_SUCCESS);
1327     else
1328       RecordReadResult(cache_type_, READ_RESULT_SYNC_READ_FAILURE);
1329   } else {
1330     RecordReadResult(cache_type_, READ_RESULT_SYNC_CHECKSUM_FAILURE);
1331   }
1332   if (net_log_.IsLogging()) {
1333     net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END,
1334         CreateNetLogReadWriteCompleteCallback(*result));
1335   }
1336 
1337   SimpleEntryStat entry_stat(last_used_, last_modified_, data_size_,
1338                              sparse_data_size_);
1339   EntryOperationComplete(completion_callback, entry_stat, result.Pass());
1340 }
1341 
CloseOperationComplete()1342 void SimpleEntryImpl::CloseOperationComplete() {
1343   DCHECK(!synchronous_entry_);
1344   DCHECK_EQ(0, open_count_);
1345   DCHECK(STATE_IO_PENDING == state_ || STATE_FAILURE == state_ ||
1346          STATE_UNINITIALIZED == state_);
1347   net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CLOSE_END);
1348   AdjustOpenEntryCountBy(cache_type_, -1);
1349   MakeUninitialized();
1350   RunNextOperationIfNeeded();
1351 }
1352 
UpdateDataFromEntryStat(const SimpleEntryStat & entry_stat)1353 void SimpleEntryImpl::UpdateDataFromEntryStat(
1354     const SimpleEntryStat& entry_stat) {
1355   DCHECK(io_thread_checker_.CalledOnValidThread());
1356   DCHECK(synchronous_entry_);
1357   DCHECK_EQ(STATE_READY, state_);
1358 
1359   last_used_ = entry_stat.last_used();
1360   last_modified_ = entry_stat.last_modified();
1361   for (int i = 0; i < kSimpleEntryStreamCount; ++i) {
1362     data_size_[i] = entry_stat.data_size(i);
1363   }
1364   sparse_data_size_ = entry_stat.sparse_data_size();
1365   if (!doomed_ && backend_.get())
1366     backend_->index()->UpdateEntrySize(entry_hash_, GetDiskUsage());
1367 }
1368 
GetDiskUsage() const1369 int64 SimpleEntryImpl::GetDiskUsage() const {
1370   int64 file_size = 0;
1371   for (int i = 0; i < kSimpleEntryStreamCount; ++i) {
1372     file_size +=
1373         simple_util::GetFileSizeFromKeyAndDataSize(key_, data_size_[i]);
1374   }
1375   file_size += sparse_data_size_;
1376   return file_size;
1377 }
1378 
RecordReadIsParallelizable(const SimpleEntryOperation & operation) const1379 void SimpleEntryImpl::RecordReadIsParallelizable(
1380     const SimpleEntryOperation& operation) const {
1381   if (!executing_operation_)
1382     return;
1383   // Used in histograms, please only add entries at the end.
1384   enum ReadDependencyType {
1385     // READ_STANDALONE = 0, Deprecated.
1386     READ_FOLLOWS_READ = 1,
1387     READ_FOLLOWS_CONFLICTING_WRITE = 2,
1388     READ_FOLLOWS_NON_CONFLICTING_WRITE = 3,
1389     READ_FOLLOWS_OTHER = 4,
1390     READ_ALONE_IN_QUEUE = 5,
1391     READ_DEPENDENCY_TYPE_MAX = 6,
1392   };
1393 
1394   ReadDependencyType type = READ_FOLLOWS_OTHER;
1395   if (operation.alone_in_queue()) {
1396     type = READ_ALONE_IN_QUEUE;
1397   } else if (executing_operation_->type() == SimpleEntryOperation::TYPE_READ) {
1398     type = READ_FOLLOWS_READ;
1399   } else if (executing_operation_->type() == SimpleEntryOperation::TYPE_WRITE) {
1400     if (executing_operation_->ConflictsWith(operation))
1401       type = READ_FOLLOWS_CONFLICTING_WRITE;
1402     else
1403       type = READ_FOLLOWS_NON_CONFLICTING_WRITE;
1404   }
1405   SIMPLE_CACHE_UMA(ENUMERATION,
1406                    "ReadIsParallelizable", cache_type_,
1407                    type, READ_DEPENDENCY_TYPE_MAX);
1408 }
1409 
RecordWriteDependencyType(const SimpleEntryOperation & operation) const1410 void SimpleEntryImpl::RecordWriteDependencyType(
1411     const SimpleEntryOperation& operation) const {
1412   if (!executing_operation_)
1413     return;
1414   // Used in histograms, please only add entries at the end.
1415   enum WriteDependencyType {
1416     WRITE_OPTIMISTIC = 0,
1417     WRITE_FOLLOWS_CONFLICTING_OPTIMISTIC = 1,
1418     WRITE_FOLLOWS_NON_CONFLICTING_OPTIMISTIC = 2,
1419     WRITE_FOLLOWS_CONFLICTING_WRITE = 3,
1420     WRITE_FOLLOWS_NON_CONFLICTING_WRITE = 4,
1421     WRITE_FOLLOWS_CONFLICTING_READ = 5,
1422     WRITE_FOLLOWS_NON_CONFLICTING_READ = 6,
1423     WRITE_FOLLOWS_OTHER = 7,
1424     WRITE_DEPENDENCY_TYPE_MAX = 8,
1425   };
1426 
1427   WriteDependencyType type = WRITE_FOLLOWS_OTHER;
1428   if (operation.optimistic()) {
1429     type = WRITE_OPTIMISTIC;
1430   } else if (executing_operation_->type() == SimpleEntryOperation::TYPE_READ ||
1431              executing_operation_->type() == SimpleEntryOperation::TYPE_WRITE) {
1432     bool conflicting = executing_operation_->ConflictsWith(operation);
1433 
1434     if (executing_operation_->type() == SimpleEntryOperation::TYPE_READ) {
1435       type = conflicting ? WRITE_FOLLOWS_CONFLICTING_READ
1436                          : WRITE_FOLLOWS_NON_CONFLICTING_READ;
1437     } else if (executing_operation_->optimistic()) {
1438       type = conflicting ? WRITE_FOLLOWS_CONFLICTING_OPTIMISTIC
1439                          : WRITE_FOLLOWS_NON_CONFLICTING_OPTIMISTIC;
1440     } else {
1441       type = conflicting ? WRITE_FOLLOWS_CONFLICTING_WRITE
1442                          : WRITE_FOLLOWS_NON_CONFLICTING_WRITE;
1443     }
1444   }
1445   SIMPLE_CACHE_UMA(ENUMERATION,
1446                    "WriteDependencyType", cache_type_,
1447                    type, WRITE_DEPENDENCY_TYPE_MAX);
1448 }
1449 
ReadStream0Data(net::IOBuffer * buf,int offset,int buf_len)1450 int SimpleEntryImpl::ReadStream0Data(net::IOBuffer* buf,
1451                                      int offset,
1452                                      int buf_len) {
1453   if (buf_len < 0) {
1454     RecordReadResult(cache_type_, READ_RESULT_SYNC_READ_FAILURE);
1455     return 0;
1456   }
1457   memcpy(buf->data(), stream_0_data_->data() + offset, buf_len);
1458   UpdateDataFromEntryStat(
1459       SimpleEntryStat(base::Time::Now(), last_modified_, data_size_,
1460                       sparse_data_size_));
1461   RecordReadResult(cache_type_, READ_RESULT_SUCCESS);
1462   return buf_len;
1463 }
1464 
SetStream0Data(net::IOBuffer * buf,int offset,int buf_len,bool truncate)1465 int SimpleEntryImpl::SetStream0Data(net::IOBuffer* buf,
1466                                     int offset,
1467                                     int buf_len,
1468                                     bool truncate) {
1469   // Currently, stream 0 is only used for HTTP headers, and always writes them
1470   // with a single, truncating write. Detect these writes and record the size
1471   // changes of the headers. Also, support writes to stream 0 that have
1472   // different access patterns, as required by the API contract.
1473   // All other clients of the Simple Cache are encouraged to use stream 1.
1474   have_written_[0] = true;
1475   int data_size = GetDataSize(0);
1476   if (offset == 0 && truncate) {
1477     RecordHeaderSizeChange(cache_type_, data_size, buf_len);
1478     stream_0_data_->SetCapacity(buf_len);
1479     memcpy(stream_0_data_->data(), buf->data(), buf_len);
1480     data_size_[0] = buf_len;
1481   } else {
1482     RecordUnexpectedStream0Write(cache_type_);
1483     const int buffer_size =
1484         truncate ? offset + buf_len : std::max(offset + buf_len, data_size);
1485     stream_0_data_->SetCapacity(buffer_size);
1486     // If |stream_0_data_| was extended, the extension until offset needs to be
1487     // zero-filled.
1488     const int fill_size = offset <= data_size ? 0 : offset - data_size;
1489     if (fill_size > 0)
1490       memset(stream_0_data_->data() + data_size, 0, fill_size);
1491     if (buf)
1492       memcpy(stream_0_data_->data() + offset, buf->data(), buf_len);
1493     data_size_[0] = buffer_size;
1494   }
1495   base::Time modification_time = base::Time::Now();
1496   AdvanceCrc(buf, offset, buf_len, 0);
1497   UpdateDataFromEntryStat(
1498       SimpleEntryStat(modification_time, modification_time, data_size_,
1499                       sparse_data_size_));
1500   RecordWriteResult(cache_type_, WRITE_RESULT_SUCCESS);
1501   return buf_len;
1502 }
1503 
AdvanceCrc(net::IOBuffer * buffer,int offset,int length,int stream_index)1504 void SimpleEntryImpl::AdvanceCrc(net::IOBuffer* buffer,
1505                                  int offset,
1506                                  int length,
1507                                  int stream_index) {
1508   // It is easy to incrementally compute the CRC from [0 .. |offset + buf_len|)
1509   // if |offset == 0| or we have already computed the CRC for [0 .. offset).
1510   // We rely on most write operations being sequential, start to end to compute
1511   // the crc of the data. When we write to an entry and close without having
1512   // done a sequential write, we don't check the CRC on read.
1513   if (offset == 0 || crc32s_end_offset_[stream_index] == offset) {
1514     uint32 initial_crc =
1515         (offset != 0) ? crc32s_[stream_index] : crc32(0, Z_NULL, 0);
1516     if (length > 0) {
1517       crc32s_[stream_index] = crc32(
1518           initial_crc, reinterpret_cast<const Bytef*>(buffer->data()), length);
1519     }
1520     crc32s_end_offset_[stream_index] = offset + length;
1521   } else if (offset < crc32s_end_offset_[stream_index]) {
1522     // If a range for which the crc32 was already computed is rewritten, the
1523     // computation of the crc32 need to start from 0 again.
1524     crc32s_end_offset_[stream_index] = 0;
1525   }
1526 }
1527 
1528 }  // namespace disk_cache
1529