1 // Copyright 2013 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "net/disk_cache/simple/simple_entry_impl.h"
6
7 #include <algorithm>
8 #include <cstring>
9 #include <limits>
10 #include <utility>
11 #include <vector>
12 #include <zlib.h>
13
14 #include "base/check_op.h"
15 #include "base/cxx17_backports.h"
16 #include "base/functional/bind.h"
17 #include "base/functional/callback.h"
18 #include "base/functional/callback_helpers.h"
19 #include "base/location.h"
20 #include "base/memory/raw_ptr.h"
21 #include "base/notreached.h"
22 #include "base/task/sequenced_task_runner.h"
23 #include "base/task/task_runner.h"
24 #include "base/time/time.h"
25 #include "base/trace_event/memory_usage_estimator.h"
26 #include "net/base/io_buffer.h"
27 #include "net/base/net_errors.h"
28 #include "net/base/prioritized_task_runner.h"
29 #include "net/disk_cache/backend_cleanup_tracker.h"
30 #include "net/disk_cache/net_log_parameters.h"
31 #include "net/disk_cache/simple/simple_backend_impl.h"
32 #include "net/disk_cache/simple/simple_histogram_enums.h"
33 #include "net/disk_cache/simple/simple_histogram_macros.h"
34 #include "net/disk_cache/simple/simple_index.h"
35 #include "net/disk_cache/simple/simple_net_log_parameters.h"
36 #include "net/disk_cache/simple/simple_synchronous_entry.h"
37 #include "net/disk_cache/simple/simple_util.h"
38 #include "net/log/net_log.h"
39 #include "net/log/net_log_source_type.h"
40
41 namespace disk_cache {
42 namespace {
43
44 // An entry can store sparse data taking up to 1 / kMaxSparseDataSizeDivisor of
45 // the cache.
46 const int64_t kMaxSparseDataSizeDivisor = 10;
47
ComputeIndexState(SimpleBackendImpl * backend,uint64_t entry_hash)48 OpenEntryIndexEnum ComputeIndexState(SimpleBackendImpl* backend,
49 uint64_t entry_hash) {
50 if (!backend->index()->initialized())
51 return INDEX_NOEXIST;
52 if (backend->index()->Has(entry_hash))
53 return INDEX_HIT;
54 return INDEX_MISS;
55 }
56
RecordOpenEntryIndexState(net::CacheType cache_type,OpenEntryIndexEnum state)57 void RecordOpenEntryIndexState(net::CacheType cache_type,
58 OpenEntryIndexEnum state) {
59 SIMPLE_CACHE_UMA(ENUMERATION, "OpenEntryIndexState", cache_type, state,
60 INDEX_MAX);
61 }
62
RecordHeaderSize(net::CacheType cache_type,int size)63 void RecordHeaderSize(net::CacheType cache_type, int size) {
64 SIMPLE_CACHE_UMA(COUNTS_10000, "HeaderSize", cache_type, size);
65 }
66
InvokeCallbackIfBackendIsAlive(const base::WeakPtr<SimpleBackendImpl> & backend,net::CompletionOnceCallback completion_callback,int result)67 void InvokeCallbackIfBackendIsAlive(
68 const base::WeakPtr<SimpleBackendImpl>& backend,
69 net::CompletionOnceCallback completion_callback,
70 int result) {
71 DCHECK(!completion_callback.is_null());
72 if (!backend.get())
73 return;
74 std::move(completion_callback).Run(result);
75 }
76
InvokeEntryResultCallbackIfBackendIsAlive(const base::WeakPtr<SimpleBackendImpl> & backend,EntryResultCallback completion_callback,EntryResult result)77 void InvokeEntryResultCallbackIfBackendIsAlive(
78 const base::WeakPtr<SimpleBackendImpl>& backend,
79 EntryResultCallback completion_callback,
80 EntryResult result) {
81 DCHECK(!completion_callback.is_null());
82 if (!backend.get())
83 return;
84 std::move(completion_callback).Run(std::move(result));
85 }
86
87 // If |sync_possible| is false, and callback is available, posts rv to it and
88 // return net::ERR_IO_PENDING; otherwise just passes through rv.
PostToCallbackIfNeeded(bool sync_possible,net::CompletionOnceCallback callback,int rv)89 int PostToCallbackIfNeeded(bool sync_possible,
90 net::CompletionOnceCallback callback,
91 int rv) {
92 if (!sync_possible && !callback.is_null()) {
93 base::SequencedTaskRunner::GetCurrentDefault()->PostTask(
94 FROM_HERE, base::BindOnce(std::move(callback), rv));
95 return net::ERR_IO_PENDING;
96 } else {
97 return rv;
98 }
99 }
100
101 } // namespace
102
103 using base::OnceClosure;
104 using base::FilePath;
105 using base::Time;
106 using base::TaskRunner;
107
108 // A helper class to insure that RunNextOperationIfNeeded() is called when
109 // exiting the current stack frame.
110 class SimpleEntryImpl::ScopedOperationRunner {
111 public:
ScopedOperationRunner(SimpleEntryImpl * entry)112 explicit ScopedOperationRunner(SimpleEntryImpl* entry) : entry_(entry) {
113 }
114
~ScopedOperationRunner()115 ~ScopedOperationRunner() {
116 entry_->RunNextOperationIfNeeded();
117 }
118
119 private:
120 const raw_ptr<SimpleEntryImpl> entry_;
121 };
122
123 SimpleEntryImpl::ActiveEntryProxy::~ActiveEntryProxy() = default;
124
SimpleEntryImpl(net::CacheType cache_type,const FilePath & path,scoped_refptr<BackendCleanupTracker> cleanup_tracker,const uint64_t entry_hash,OperationsMode operations_mode,SimpleBackendImpl * backend,SimpleFileTracker * file_tracker,scoped_refptr<BackendFileOperationsFactory> file_operations_factory,net::NetLog * net_log,uint32_t entry_priority)125 SimpleEntryImpl::SimpleEntryImpl(
126 net::CacheType cache_type,
127 const FilePath& path,
128 scoped_refptr<BackendCleanupTracker> cleanup_tracker,
129 const uint64_t entry_hash,
130 OperationsMode operations_mode,
131 SimpleBackendImpl* backend,
132 SimpleFileTracker* file_tracker,
133 scoped_refptr<BackendFileOperationsFactory> file_operations_factory,
134 net::NetLog* net_log,
135 uint32_t entry_priority)
136 : cleanup_tracker_(std::move(cleanup_tracker)),
137 backend_(backend->AsWeakPtr()),
138 file_tracker_(file_tracker),
139 file_operations_factory_(std::move(file_operations_factory)),
140 cache_type_(cache_type),
141 path_(path),
142 entry_hash_(entry_hash),
143 use_optimistic_operations_(operations_mode == OPTIMISTIC_OPERATIONS),
144 last_used_(Time::Now()),
145 last_modified_(last_used_),
146 prioritized_task_runner_(backend_->prioritized_task_runner()),
147 net_log_(
148 net::NetLogWithSource::Make(net_log,
149 net::NetLogSourceType::DISK_CACHE_ENTRY)),
150 stream_0_data_(base::MakeRefCounted<net::GrowableIOBuffer>()),
151 entry_priority_(entry_priority) {
152 static_assert(std::extent<decltype(data_size_)>() ==
153 std::extent<decltype(crc32s_end_offset_)>(),
154 "arrays should be the same size");
155 static_assert(
156 std::extent<decltype(data_size_)>() == std::extent<decltype(crc32s_)>(),
157 "arrays should be the same size");
158 static_assert(std::extent<decltype(data_size_)>() ==
159 std::extent<decltype(have_written_)>(),
160 "arrays should be the same size");
161 ResetEntry();
162 NetLogSimpleEntryConstruction(net_log_,
163 net::NetLogEventType::SIMPLE_CACHE_ENTRY,
164 net::NetLogEventPhase::BEGIN, this);
165 }
166
SetActiveEntryProxy(std::unique_ptr<ActiveEntryProxy> active_entry_proxy)167 void SimpleEntryImpl::SetActiveEntryProxy(
168 std::unique_ptr<ActiveEntryProxy> active_entry_proxy) {
169 DCHECK(!active_entry_proxy_);
170 active_entry_proxy_ = std::move(active_entry_proxy);
171 }
172
OpenEntry(EntryResultCallback callback)173 EntryResult SimpleEntryImpl::OpenEntry(EntryResultCallback callback) {
174 DCHECK(backend_.get());
175
176 net_log_.AddEvent(net::NetLogEventType::SIMPLE_CACHE_ENTRY_OPEN_CALL);
177
178 OpenEntryIndexEnum index_state =
179 ComputeIndexState(backend_.get(), entry_hash_);
180 RecordOpenEntryIndexState(cache_type_, index_state);
181
182 // If entry is not known to the index, initiate fast failover to the network.
183 if (index_state == INDEX_MISS) {
184 net_log_.AddEventWithNetErrorCode(
185 net::NetLogEventType::SIMPLE_CACHE_ENTRY_OPEN_END, net::ERR_FAILED);
186 return EntryResult::MakeError(net::ERR_FAILED);
187 }
188
189 pending_operations_.push(SimpleEntryOperation::OpenOperation(
190 this, SimpleEntryOperation::ENTRY_NEEDS_CALLBACK, std::move(callback)));
191 RunNextOperationIfNeeded();
192 return EntryResult::MakeError(net::ERR_IO_PENDING);
193 }
194
CreateEntry(EntryResultCallback callback)195 EntryResult SimpleEntryImpl::CreateEntry(EntryResultCallback callback) {
196 DCHECK(backend_.get());
197 DCHECK_EQ(entry_hash_, simple_util::GetEntryHashKey(key_));
198
199 net_log_.AddEvent(net::NetLogEventType::SIMPLE_CACHE_ENTRY_CREATE_CALL);
200
201 EntryResult result = EntryResult::MakeError(net::ERR_IO_PENDING);
202 if (use_optimistic_operations_ &&
203 state_ == STATE_UNINITIALIZED && pending_operations_.size() == 0) {
204 net_log_.AddEvent(
205 net::NetLogEventType::SIMPLE_CACHE_ENTRY_CREATE_OPTIMISTIC);
206
207 ReturnEntryToCaller();
208 result = EntryResult::MakeCreated(this);
209 pending_operations_.push(SimpleEntryOperation::CreateOperation(
210 this, SimpleEntryOperation::ENTRY_ALREADY_RETURNED,
211 EntryResultCallback()));
212
213 // If we are optimistically returning before a preceeding doom, we need to
214 // wait for that IO, about which we will be notified externally.
215 if (optimistic_create_pending_doom_state_ != CREATE_NORMAL) {
216 DCHECK_EQ(CREATE_OPTIMISTIC_PENDING_DOOM,
217 optimistic_create_pending_doom_state_);
218 state_ = STATE_IO_PENDING;
219 }
220 } else {
221 pending_operations_.push(SimpleEntryOperation::CreateOperation(
222 this, SimpleEntryOperation::ENTRY_NEEDS_CALLBACK, std::move(callback)));
223 }
224
225 // We insert the entry in the index before creating the entry files in the
226 // SimpleSynchronousEntry, because this way the worst scenario is when we
227 // have the entry in the index but we don't have the created files yet, this
228 // way we never leak files. CreationOperationComplete will remove the entry
229 // from the index if the creation fails.
230 backend_->index()->Insert(entry_hash_);
231
232 RunNextOperationIfNeeded();
233 return result;
234 }
235
OpenOrCreateEntry(EntryResultCallback callback)236 EntryResult SimpleEntryImpl::OpenOrCreateEntry(EntryResultCallback callback) {
237 DCHECK(backend_.get());
238 DCHECK_EQ(entry_hash_, simple_util::GetEntryHashKey(key_));
239
240 net_log_.AddEvent(
241 net::NetLogEventType::SIMPLE_CACHE_ENTRY_OPEN_OR_CREATE_CALL);
242
243 OpenEntryIndexEnum index_state =
244 ComputeIndexState(backend_.get(), entry_hash_);
245 RecordOpenEntryIndexState(cache_type_, index_state);
246
247 EntryResult result = EntryResult::MakeError(net::ERR_IO_PENDING);
248 if (index_state == INDEX_MISS && use_optimistic_operations_ &&
249 state_ == STATE_UNINITIALIZED && pending_operations_.size() == 0) {
250 net_log_.AddEvent(
251 net::NetLogEventType::SIMPLE_CACHE_ENTRY_CREATE_OPTIMISTIC);
252
253 ReturnEntryToCaller();
254 result = EntryResult::MakeCreated(this);
255 pending_operations_.push(SimpleEntryOperation::OpenOrCreateOperation(
256 this, index_state, SimpleEntryOperation::ENTRY_ALREADY_RETURNED,
257 EntryResultCallback()));
258
259 // The post-doom stuff should go through CreateEntry, not here.
260 DCHECK_EQ(CREATE_NORMAL, optimistic_create_pending_doom_state_);
261 } else {
262 pending_operations_.push(SimpleEntryOperation::OpenOrCreateOperation(
263 this, index_state, SimpleEntryOperation::ENTRY_NEEDS_CALLBACK,
264 std::move(callback)));
265 }
266
267 // We insert the entry in the index before creating the entry files in the
268 // SimpleSynchronousEntry, because this way the worst scenario is when we
269 // have the entry in the index but we don't have the created files yet, this
270 // way we never leak files. CreationOperationComplete will remove the entry
271 // from the index if the creation fails.
272 backend_->index()->Insert(entry_hash_);
273
274 RunNextOperationIfNeeded();
275 return result;
276 }
277
DoomEntry(net::CompletionOnceCallback callback)278 net::Error SimpleEntryImpl::DoomEntry(net::CompletionOnceCallback callback) {
279 if (doom_state_ != DOOM_NONE)
280 return net::OK;
281 net_log_.AddEvent(net::NetLogEventType::SIMPLE_CACHE_ENTRY_DOOM_CALL);
282 net_log_.AddEvent(net::NetLogEventType::SIMPLE_CACHE_ENTRY_DOOM_BEGIN);
283
284 MarkAsDoomed(DOOM_QUEUED);
285 if (backend_.get()) {
286 if (optimistic_create_pending_doom_state_ == CREATE_NORMAL) {
287 post_doom_waiting_ = backend_->OnDoomStart(entry_hash_);
288 } else {
289 DCHECK_EQ(STATE_IO_PENDING, state_);
290 DCHECK_EQ(CREATE_OPTIMISTIC_PENDING_DOOM,
291 optimistic_create_pending_doom_state_);
292 // If we are in this state, we went ahead with making the entry even
293 // though the backend was already keeping track of a doom, so it can't
294 // keep track of ours. So we delay notifying it until
295 // NotifyDoomBeforeCreateComplete is called. Since this path is invoked
296 // only when the queue of post-doom callbacks was previously empty, while
297 // the CompletionOnceCallback for the op is posted,
298 // NotifyDoomBeforeCreateComplete() will be the first thing running after
299 // the previous doom completes, so at that point we can immediately grab
300 // a spot in entries_pending_doom_.
301 optimistic_create_pending_doom_state_ =
302 CREATE_OPTIMISTIC_PENDING_DOOM_FOLLOWED_BY_DOOM;
303 }
304 }
305 pending_operations_.push(
306 SimpleEntryOperation::DoomOperation(this, std::move(callback)));
307 RunNextOperationIfNeeded();
308 return net::ERR_IO_PENDING;
309 }
310
SetCreatePendingDoom()311 void SimpleEntryImpl::SetCreatePendingDoom() {
312 DCHECK_EQ(CREATE_NORMAL, optimistic_create_pending_doom_state_);
313 optimistic_create_pending_doom_state_ = CREATE_OPTIMISTIC_PENDING_DOOM;
314 }
315
NotifyDoomBeforeCreateComplete()316 void SimpleEntryImpl::NotifyDoomBeforeCreateComplete() {
317 DCHECK_EQ(STATE_IO_PENDING, state_);
318 DCHECK_NE(CREATE_NORMAL, optimistic_create_pending_doom_state_);
319 if (backend_.get() && optimistic_create_pending_doom_state_ ==
320 CREATE_OPTIMISTIC_PENDING_DOOM_FOLLOWED_BY_DOOM)
321 post_doom_waiting_ = backend_->OnDoomStart(entry_hash_);
322
323 state_ = STATE_UNINITIALIZED;
324 optimistic_create_pending_doom_state_ = CREATE_NORMAL;
325 RunNextOperationIfNeeded();
326 }
327
SetKey(const std::string & key)328 void SimpleEntryImpl::SetKey(const std::string& key) {
329 key_ = key;
330 net_log_.AddEventWithStringParams(
331 net::NetLogEventType::SIMPLE_CACHE_ENTRY_SET_KEY, "key", key);
332 }
333
Doom()334 void SimpleEntryImpl::Doom() {
335 DoomEntry(CompletionOnceCallback());
336 }
337
Close()338 void SimpleEntryImpl::Close() {
339 DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
340 CHECK_LT(0, open_count_);
341
342 net_log_.AddEvent(net::NetLogEventType::SIMPLE_CACHE_ENTRY_CLOSE_CALL);
343
344 if (--open_count_ > 0) {
345 DCHECK(!HasOneRef());
346 Release(); // Balanced in ReturnEntryToCaller().
347 return;
348 }
349
350 pending_operations_.push(SimpleEntryOperation::CloseOperation(this));
351 DCHECK(!HasOneRef());
352 Release(); // Balanced in ReturnEntryToCaller().
353 RunNextOperationIfNeeded();
354 }
355
GetKey() const356 std::string SimpleEntryImpl::GetKey() const {
357 DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
358 return key_;
359 }
360
GetLastUsed() const361 Time SimpleEntryImpl::GetLastUsed() const {
362 DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
363 DCHECK(cache_type_ != net::APP_CACHE);
364 return last_used_;
365 }
366
GetLastModified() const367 Time SimpleEntryImpl::GetLastModified() const {
368 DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
369 return last_modified_;
370 }
371
GetDataSize(int stream_index) const372 int32_t SimpleEntryImpl::GetDataSize(int stream_index) const {
373 DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
374 DCHECK_LE(0, data_size_[stream_index]);
375 return data_size_[stream_index];
376 }
377
ReadData(int stream_index,int offset,net::IOBuffer * buf,int buf_len,CompletionOnceCallback callback)378 int SimpleEntryImpl::ReadData(int stream_index,
379 int offset,
380 net::IOBuffer* buf,
381 int buf_len,
382 CompletionOnceCallback callback) {
383 DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
384
385 if (net_log_.IsCapturing()) {
386 NetLogReadWriteData(
387 net_log_, net::NetLogEventType::SIMPLE_CACHE_ENTRY_READ_CALL,
388 net::NetLogEventPhase::NONE, stream_index, offset, buf_len, false);
389 }
390
391 if (stream_index < 0 || stream_index >= kSimpleEntryStreamCount ||
392 buf_len < 0) {
393 if (net_log_.IsCapturing()) {
394 NetLogReadWriteComplete(
395 net_log_, net::NetLogEventType::SIMPLE_CACHE_ENTRY_READ_END,
396 net::NetLogEventPhase::NONE, net::ERR_INVALID_ARGUMENT);
397 }
398
399 return net::ERR_INVALID_ARGUMENT;
400 }
401
402 // If this is the only operation, bypass the queue, and also see if there is
403 // in-memory data to handle it synchronously. In principle, multiple reads can
404 // be parallelized, but past studies have shown that parallelizable ones
405 // happen <1% of the time, so it's probably not worth the effort.
406 bool alone_in_queue =
407 pending_operations_.size() == 0 && state_ == STATE_READY;
408
409 if (alone_in_queue) {
410 return ReadDataInternal(/*sync_possible = */ true, stream_index, offset,
411 buf, buf_len, std::move(callback));
412 }
413
414 pending_operations_.push(SimpleEntryOperation::ReadOperation(
415 this, stream_index, offset, buf_len, buf, std::move(callback)));
416 RunNextOperationIfNeeded();
417 return net::ERR_IO_PENDING;
418 }
419
WriteData(int stream_index,int offset,net::IOBuffer * buf,int buf_len,CompletionOnceCallback callback,bool truncate)420 int SimpleEntryImpl::WriteData(int stream_index,
421 int offset,
422 net::IOBuffer* buf,
423 int buf_len,
424 CompletionOnceCallback callback,
425 bool truncate) {
426 DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
427
428 if (net_log_.IsCapturing()) {
429 NetLogReadWriteData(
430 net_log_, net::NetLogEventType::SIMPLE_CACHE_ENTRY_WRITE_CALL,
431 net::NetLogEventPhase::NONE, stream_index, offset, buf_len, truncate);
432 }
433
434 if (stream_index < 0 || stream_index >= kSimpleEntryStreamCount ||
435 offset < 0 || buf_len < 0) {
436 if (net_log_.IsCapturing()) {
437 NetLogReadWriteComplete(
438 net_log_, net::NetLogEventType::SIMPLE_CACHE_ENTRY_WRITE_END,
439 net::NetLogEventPhase::NONE, net::ERR_INVALID_ARGUMENT);
440 }
441 return net::ERR_INVALID_ARGUMENT;
442 }
443 int end_offset;
444 if (!base::CheckAdd(offset, buf_len).AssignIfValid(&end_offset) ||
445 (backend_.get() && end_offset > backend_->MaxFileSize())) {
446 if (net_log_.IsCapturing()) {
447 NetLogReadWriteComplete(
448 net_log_, net::NetLogEventType::SIMPLE_CACHE_ENTRY_WRITE_END,
449 net::NetLogEventPhase::NONE, net::ERR_FAILED);
450 }
451 return net::ERR_FAILED;
452 }
453 ScopedOperationRunner operation_runner(this);
454
455 // Stream 0 data is kept in memory, so can be written immediatly if there are
456 // no IO operations pending.
457 if (stream_index == 0 && state_ == STATE_READY &&
458 pending_operations_.size() == 0)
459 return SetStream0Data(buf, offset, buf_len, truncate);
460
461 // We can only do optimistic Write if there is no pending operations, so
462 // that we are sure that the next call to RunNextOperationIfNeeded will
463 // actually run the write operation that sets the stream size. It also
464 // prevents from previous possibly-conflicting writes that could be stacked
465 // in the |pending_operations_|. We could optimize this for when we have
466 // only read operations enqueued, but past studies have shown that that such
467 // parallelizable cases are very rare.
468 const bool optimistic =
469 (use_optimistic_operations_ && state_ == STATE_READY &&
470 pending_operations_.size() == 0);
471 CompletionOnceCallback op_callback;
472 scoped_refptr<net::IOBuffer> op_buf;
473 int ret_value = net::ERR_FAILED;
474 if (!optimistic) {
475 op_buf = buf;
476 op_callback = std::move(callback);
477 ret_value = net::ERR_IO_PENDING;
478 } else {
479 // TODO(morlovich,pasko): For performance, don't use a copy of an IOBuffer
480 // here to avoid paying the price of the RefCountedThreadSafe atomic
481 // operations.
482 if (buf) {
483 op_buf = base::MakeRefCounted<IOBuffer>(buf_len);
484 memcpy(op_buf->data(), buf->data(), buf_len);
485 }
486 op_callback = CompletionOnceCallback();
487 ret_value = buf_len;
488 if (net_log_.IsCapturing()) {
489 NetLogReadWriteComplete(
490 net_log_, net::NetLogEventType::SIMPLE_CACHE_ENTRY_WRITE_OPTIMISTIC,
491 net::NetLogEventPhase::NONE, buf_len);
492 }
493 }
494
495 pending_operations_.push(SimpleEntryOperation::WriteOperation(
496 this, stream_index, offset, buf_len, op_buf.get(), truncate, optimistic,
497 std::move(op_callback)));
498 return ret_value;
499 }
500
ReadSparseData(int64_t offset,net::IOBuffer * buf,int buf_len,CompletionOnceCallback callback)501 int SimpleEntryImpl::ReadSparseData(int64_t offset,
502 net::IOBuffer* buf,
503 int buf_len,
504 CompletionOnceCallback callback) {
505 DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
506
507 if (net_log_.IsCapturing()) {
508 NetLogSparseOperation(
509 net_log_, net::NetLogEventType::SIMPLE_CACHE_ENTRY_READ_SPARSE_CALL,
510 net::NetLogEventPhase::NONE, offset, buf_len);
511 }
512
513 if (offset < 0 || buf_len < 0) {
514 if (net_log_.IsCapturing()) {
515 NetLogReadWriteComplete(
516 net_log_, net::NetLogEventType::SIMPLE_CACHE_ENTRY_READ_SPARSE_END,
517 net::NetLogEventPhase::NONE, net::ERR_INVALID_ARGUMENT);
518 }
519 return net::ERR_INVALID_ARGUMENT;
520 }
521
522 // Truncate |buf_len| to make sure that |offset + buf_len| does not overflow.
523 // This is OK since one can't write that far anyway.
524 // The result of std::min is guaranteed to fit into int since |buf_len| did.
525 buf_len = std::min(static_cast<int64_t>(buf_len),
526 std::numeric_limits<int64_t>::max() - offset);
527
528 ScopedOperationRunner operation_runner(this);
529 pending_operations_.push(SimpleEntryOperation::ReadSparseOperation(
530 this, offset, buf_len, buf, std::move(callback)));
531 return net::ERR_IO_PENDING;
532 }
533
WriteSparseData(int64_t offset,net::IOBuffer * buf,int buf_len,CompletionOnceCallback callback)534 int SimpleEntryImpl::WriteSparseData(int64_t offset,
535 net::IOBuffer* buf,
536 int buf_len,
537 CompletionOnceCallback callback) {
538 DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
539
540 if (net_log_.IsCapturing()) {
541 NetLogSparseOperation(
542 net_log_, net::NetLogEventType::SIMPLE_CACHE_ENTRY_WRITE_SPARSE_CALL,
543 net::NetLogEventPhase::NONE, offset, buf_len);
544 }
545
546 if (offset < 0 || buf_len < 0 || !base::CheckAdd(offset, buf_len).IsValid()) {
547 if (net_log_.IsCapturing()) {
548 NetLogReadWriteComplete(
549 net_log_, net::NetLogEventType::SIMPLE_CACHE_ENTRY_WRITE_SPARSE_END,
550 net::NetLogEventPhase::NONE, net::ERR_INVALID_ARGUMENT);
551 }
552 return net::ERR_INVALID_ARGUMENT;
553 }
554
555 ScopedOperationRunner operation_runner(this);
556 pending_operations_.push(SimpleEntryOperation::WriteSparseOperation(
557 this, offset, buf_len, buf, std::move(callback)));
558 return net::ERR_IO_PENDING;
559 }
560
GetAvailableRange(int64_t offset,int len,RangeResultCallback callback)561 RangeResult SimpleEntryImpl::GetAvailableRange(int64_t offset,
562 int len,
563 RangeResultCallback callback) {
564 DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
565 if (offset < 0 || len < 0)
566 return RangeResult(net::ERR_INVALID_ARGUMENT);
567
568 // Truncate |len| to make sure that |offset + len| does not overflow.
569 // This is OK since one can't write that far anyway.
570 // The result of std::min is guaranteed to fit into int since |len| did.
571 len = std::min(static_cast<int64_t>(len),
572 std::numeric_limits<int64_t>::max() - offset);
573
574 ScopedOperationRunner operation_runner(this);
575 pending_operations_.push(SimpleEntryOperation::GetAvailableRangeOperation(
576 this, offset, len, std::move(callback)));
577 return RangeResult(net::ERR_IO_PENDING);
578 }
579
CouldBeSparse() const580 bool SimpleEntryImpl::CouldBeSparse() const {
581 DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
582 // TODO(morlovich): Actually check.
583 return true;
584 }
585
CancelSparseIO()586 void SimpleEntryImpl::CancelSparseIO() {
587 DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
588 // The Simple Cache does not return distinct objects for the same non-doomed
589 // entry, so there's no need to coordinate which object is performing sparse
590 // I/O. Therefore, CancelSparseIO and ReadyForSparseIO succeed instantly.
591 }
592
ReadyForSparseIO(CompletionOnceCallback callback)593 net::Error SimpleEntryImpl::ReadyForSparseIO(CompletionOnceCallback callback) {
594 DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
595 // The simple Cache does not return distinct objects for the same non-doomed
596 // entry, so there's no need to coordinate which object is performing sparse
597 // I/O. Therefore, CancelSparseIO and ReadyForSparseIO succeed instantly.
598 return net::OK;
599 }
600
SetLastUsedTimeForTest(base::Time time)601 void SimpleEntryImpl::SetLastUsedTimeForTest(base::Time time) {
602 last_used_ = time;
603 backend_->index()->SetLastUsedTimeForTest(entry_hash_, time);
604 }
605
SetPriority(uint32_t entry_priority)606 void SimpleEntryImpl::SetPriority(uint32_t entry_priority) {
607 entry_priority_ = entry_priority;
608 }
609
~SimpleEntryImpl()610 SimpleEntryImpl::~SimpleEntryImpl() {
611 DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
612 DCHECK_EQ(0U, pending_operations_.size());
613
614 // This used to DCHECK on `state_`, but it turns out that destruction
615 // happening on thread shutdown, when closures holding `this` get deleted
616 // can happen in circumstances not possible during normal use, such as when
617 // I/O for Close operation is keeping the entry alive in STATE_IO_PENDING, or
618 // an entry that's STATE_READY has callbacks pending to hand it over to the
619 // user right as the thread is shutdown (this would also have a non-null
620 // `synchronous_entry_`).
621 net_log_.EndEvent(net::NetLogEventType::SIMPLE_CACHE_ENTRY);
622 }
623
PostClientCallback(net::CompletionOnceCallback callback,int result)624 void SimpleEntryImpl::PostClientCallback(net::CompletionOnceCallback callback,
625 int result) {
626 if (callback.is_null())
627 return;
628 // Note that the callback is posted rather than directly invoked to avoid
629 // reentrancy issues.
630 base::SequencedTaskRunner::GetCurrentDefault()->PostTask(
631 FROM_HERE, base::BindOnce(&InvokeCallbackIfBackendIsAlive, backend_,
632 std::move(callback), result));
633 }
634
PostClientCallback(EntryResultCallback callback,EntryResult result)635 void SimpleEntryImpl::PostClientCallback(EntryResultCallback callback,
636 EntryResult result) {
637 if (callback.is_null())
638 return;
639 // Note that the callback is posted rather than directly invoked to avoid
640 // reentrancy issues.
641 base::SequencedTaskRunner::GetCurrentDefault()->PostTask(
642 FROM_HERE,
643 base::BindOnce(&InvokeEntryResultCallbackIfBackendIsAlive, backend_,
644 std::move(callback), std::move(result)));
645 }
646
ResetEntry()647 void SimpleEntryImpl::ResetEntry() {
648 // If we're doomed, we can't really do anything else with the entry, since
649 // we no longer own the name and are disconnected from the active entry table.
650 // We preserve doom_state_ accross this entry for this same reason.
651 state_ = doom_state_ == DOOM_COMPLETED ? STATE_FAILURE : STATE_UNINITIALIZED;
652 std::memset(crc32s_end_offset_, 0, sizeof(crc32s_end_offset_));
653 std::memset(crc32s_, 0, sizeof(crc32s_));
654 std::memset(have_written_, 0, sizeof(have_written_));
655 std::memset(data_size_, 0, sizeof(data_size_));
656 }
657
ReturnEntryToCaller()658 void SimpleEntryImpl::ReturnEntryToCaller() {
659 DCHECK(backend_);
660 ++open_count_;
661 AddRef(); // Balanced in Close()
662 }
663
ReturnEntryToCallerAsync(bool is_open,EntryResultCallback callback)664 void SimpleEntryImpl::ReturnEntryToCallerAsync(bool is_open,
665 EntryResultCallback callback) {
666 DCHECK(!callback.is_null());
667
668 // |open_count_| must be incremented immediately, so that a Close on an alias
669 // doesn't try to wrap things up.
670 ++open_count_;
671
672 // Note that the callback is posted rather than directly invoked to avoid
673 // reentrancy issues.
674 base::SequencedTaskRunner::GetCurrentDefault()->PostTask(
675 FROM_HERE,
676 base::BindOnce(&SimpleEntryImpl::FinishReturnEntryToCallerAsync, this,
677 is_open, std::move(callback)));
678 }
679
FinishReturnEntryToCallerAsync(bool is_open,EntryResultCallback callback)680 void SimpleEntryImpl::FinishReturnEntryToCallerAsync(
681 bool is_open,
682 EntryResultCallback callback) {
683 AddRef(); // Balanced in Close()
684 if (!backend_.get()) {
685 // With backend dead, Open/Create operations are responsible for cleaning up
686 // the entry --- the ownership is never transferred to the caller, and their
687 // callback isn't invoked.
688 Close();
689 return;
690 }
691
692 std::move(callback).Run(is_open ? EntryResult::MakeOpened(this)
693 : EntryResult::MakeCreated(this));
694 }
695
MarkAsDoomed(DoomState new_state)696 void SimpleEntryImpl::MarkAsDoomed(DoomState new_state) {
697 DCHECK_NE(DOOM_NONE, new_state);
698 doom_state_ = new_state;
699 if (!backend_.get())
700 return;
701 backend_->index()->Remove(entry_hash_);
702 active_entry_proxy_.reset();
703 }
704
RunNextOperationIfNeeded()705 void SimpleEntryImpl::RunNextOperationIfNeeded() {
706 DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
707 if (!pending_operations_.empty() && state_ != STATE_IO_PENDING) {
708 SimpleEntryOperation operation = std::move(pending_operations_.front());
709 pending_operations_.pop();
710 switch (operation.type()) {
711 case SimpleEntryOperation::TYPE_OPEN:
712 OpenEntryInternal(operation.entry_result_state(),
713 operation.ReleaseEntryResultCallback());
714 break;
715 case SimpleEntryOperation::TYPE_CREATE:
716 CreateEntryInternal(operation.entry_result_state(),
717 operation.ReleaseEntryResultCallback());
718 break;
719 case SimpleEntryOperation::TYPE_OPEN_OR_CREATE:
720 OpenOrCreateEntryInternal(operation.index_state(),
721 operation.entry_result_state(),
722 operation.ReleaseEntryResultCallback());
723 break;
724 case SimpleEntryOperation::TYPE_CLOSE:
725 CloseInternal();
726 break;
727 case SimpleEntryOperation::TYPE_READ:
728 ReadDataInternal(/* sync_possible= */ false, operation.index(),
729 operation.offset(), operation.buf(),
730 operation.length(), operation.ReleaseCallback());
731 break;
732 case SimpleEntryOperation::TYPE_WRITE:
733 WriteDataInternal(operation.index(), operation.offset(),
734 operation.buf(), operation.length(),
735 operation.ReleaseCallback(), operation.truncate());
736 break;
737 case SimpleEntryOperation::TYPE_READ_SPARSE:
738 ReadSparseDataInternal(operation.sparse_offset(), operation.buf(),
739 operation.length(), operation.ReleaseCallback());
740 break;
741 case SimpleEntryOperation::TYPE_WRITE_SPARSE:
742 WriteSparseDataInternal(operation.sparse_offset(), operation.buf(),
743 operation.length(),
744 operation.ReleaseCallback());
745 break;
746 case SimpleEntryOperation::TYPE_GET_AVAILABLE_RANGE:
747 GetAvailableRangeInternal(operation.sparse_offset(), operation.length(),
748 operation.ReleaseRangeResultCalback());
749 break;
750 case SimpleEntryOperation::TYPE_DOOM:
751 DoomEntryInternal(operation.ReleaseCallback());
752 break;
753 default:
754 NOTREACHED();
755 }
756 // |this| may have been deleted.
757 }
758 }
759
OpenEntryInternal(SimpleEntryOperation::EntryResultState result_state,EntryResultCallback callback)760 void SimpleEntryImpl::OpenEntryInternal(
761 SimpleEntryOperation::EntryResultState result_state,
762 EntryResultCallback callback) {
763 ScopedOperationRunner operation_runner(this);
764
765 net_log_.AddEvent(net::NetLogEventType::SIMPLE_CACHE_ENTRY_OPEN_BEGIN);
766
767 // No optimistic sync return possible on open.
768 DCHECK_EQ(SimpleEntryOperation::ENTRY_NEEDS_CALLBACK, result_state);
769
770 if (state_ == STATE_READY) {
771 ReturnEntryToCallerAsync(/* is_open = */ true, std::move(callback));
772 NetLogSimpleEntryCreation(net_log_,
773 net::NetLogEventType::SIMPLE_CACHE_ENTRY_OPEN_END,
774 net::NetLogEventPhase::NONE, this, net::OK);
775 return;
776 }
777 if (state_ == STATE_FAILURE) {
778 PostClientCallback(std::move(callback),
779 EntryResult::MakeError(net::ERR_FAILED));
780 NetLogSimpleEntryCreation(
781 net_log_, net::NetLogEventType::SIMPLE_CACHE_ENTRY_OPEN_END,
782 net::NetLogEventPhase::NONE, this, net::ERR_FAILED);
783 return;
784 }
785
786 DCHECK_EQ(STATE_UNINITIALIZED, state_);
787 DCHECK(!synchronous_entry_);
788 state_ = STATE_IO_PENDING;
789 const base::TimeTicks start_time = base::TimeTicks::Now();
790 auto results = std::make_unique<SimpleEntryCreationResults>(SimpleEntryStat(
791 last_used_, last_modified_, data_size_, sparse_data_size_));
792
793 int32_t trailer_prefetch_size = -1;
794 base::Time last_used_time;
795 if (SimpleBackendImpl* backend = backend_.get()) {
796 if (cache_type_ == net::APP_CACHE) {
797 trailer_prefetch_size =
798 backend->index()->GetTrailerPrefetchSize(entry_hash_);
799 } else {
800 last_used_time = backend->index()->GetLastUsedTime(entry_hash_);
801 }
802 }
803
804 base::OnceClosure task = base::BindOnce(
805 &SimpleSynchronousEntry::OpenEntry, cache_type_, path_, key_, entry_hash_,
806 file_tracker_, file_operations_factory_->CreateUnbound(),
807 trailer_prefetch_size, results.get());
808
809 base::OnceClosure reply = base::BindOnce(
810 &SimpleEntryImpl::CreationOperationComplete, this, result_state,
811 std::move(callback), start_time, last_used_time, std::move(results),
812 net::NetLogEventType::SIMPLE_CACHE_ENTRY_OPEN_END);
813
814 prioritized_task_runner_->PostTaskAndReply(FROM_HERE, std::move(task),
815 std::move(reply), entry_priority_);
816 }
817
CreateEntryInternal(SimpleEntryOperation::EntryResultState result_state,EntryResultCallback callback)818 void SimpleEntryImpl::CreateEntryInternal(
819 SimpleEntryOperation::EntryResultState result_state,
820 EntryResultCallback callback) {
821 ScopedOperationRunner operation_runner(this);
822
823 net_log_.AddEvent(net::NetLogEventType::SIMPLE_CACHE_ENTRY_CREATE_BEGIN);
824
825 if (state_ != STATE_UNINITIALIZED) {
826 // There is already an active normal entry.
827 NetLogSimpleEntryCreation(
828 net_log_, net::NetLogEventType::SIMPLE_CACHE_ENTRY_CREATE_END,
829 net::NetLogEventPhase::NONE, this, net::ERR_FAILED);
830 // If we have optimistically returned an entry, we would be the first entry
831 // in queue with state_ == STATE_UNINITIALIZED.
832 DCHECK_EQ(SimpleEntryOperation::ENTRY_NEEDS_CALLBACK, result_state);
833 PostClientCallback(std::move(callback),
834 EntryResult::MakeError(net::ERR_FAILED));
835 return;
836 }
837 DCHECK_EQ(STATE_UNINITIALIZED, state_);
838 DCHECK(!synchronous_entry_);
839
840 state_ = STATE_IO_PENDING;
841
842 // Since we don't know the correct values for |last_used_| and
843 // |last_modified_| yet, we make this approximation.
844 last_used_ = last_modified_ = base::Time::Now();
845
846 const base::TimeTicks start_time = base::TimeTicks::Now();
847 auto results = std::make_unique<SimpleEntryCreationResults>(SimpleEntryStat(
848 last_used_, last_modified_, data_size_, sparse_data_size_));
849
850 OnceClosure task =
851 base::BindOnce(&SimpleSynchronousEntry::CreateEntry, cache_type_, path_,
852 key_, entry_hash_, file_tracker_,
853 file_operations_factory_->CreateUnbound(), results.get());
854 OnceClosure reply = base::BindOnce(
855 &SimpleEntryImpl::CreationOperationComplete, this, result_state,
856 std::move(callback), start_time, base::Time(), std::move(results),
857 net::NetLogEventType::SIMPLE_CACHE_ENTRY_CREATE_END);
858 prioritized_task_runner_->PostTaskAndReply(FROM_HERE, std::move(task),
859 std::move(reply), entry_priority_);
860 }
861
OpenOrCreateEntryInternal(OpenEntryIndexEnum index_state,SimpleEntryOperation::EntryResultState result_state,EntryResultCallback callback)862 void SimpleEntryImpl::OpenOrCreateEntryInternal(
863 OpenEntryIndexEnum index_state,
864 SimpleEntryOperation::EntryResultState result_state,
865 EntryResultCallback callback) {
866 ScopedOperationRunner operation_runner(this);
867
868 net_log_.AddEvent(
869 net::NetLogEventType::SIMPLE_CACHE_ENTRY_OPEN_OR_CREATE_BEGIN);
870
871 // result_state may be ENTRY_ALREADY_RETURNED only if an optimistic create is
872 // being performed, which must be in STATE_UNINITIALIZED.
873 bool optimistic_create =
874 (result_state == SimpleEntryOperation::ENTRY_ALREADY_RETURNED);
875 DCHECK(!optimistic_create || state_ == STATE_UNINITIALIZED);
876
877 if (state_ == STATE_READY) {
878 ReturnEntryToCallerAsync(/* is_open = */ true, std::move(callback));
879 NetLogSimpleEntryCreation(
880 net_log_, net::NetLogEventType::SIMPLE_CACHE_ENTRY_OPEN_OR_CREATE_END,
881 net::NetLogEventPhase::NONE, this, net::OK);
882 return;
883 }
884 if (state_ == STATE_FAILURE) {
885 PostClientCallback(std::move(callback),
886 EntryResult::MakeError(net::ERR_FAILED));
887 NetLogSimpleEntryCreation(
888 net_log_, net::NetLogEventType::SIMPLE_CACHE_ENTRY_OPEN_OR_CREATE_END,
889 net::NetLogEventPhase::NONE, this, net::ERR_FAILED);
890 return;
891 }
892
893 DCHECK_EQ(STATE_UNINITIALIZED, state_);
894 DCHECK(!synchronous_entry_);
895 state_ = STATE_IO_PENDING;
896 const base::TimeTicks start_time = base::TimeTicks::Now();
897 auto results = std::make_unique<SimpleEntryCreationResults>(SimpleEntryStat(
898 last_used_, last_modified_, data_size_, sparse_data_size_));
899
900 int32_t trailer_prefetch_size = -1;
901 base::Time last_used_time;
902 if (SimpleBackendImpl* backend = backend_.get()) {
903 if (cache_type_ == net::APP_CACHE) {
904 trailer_prefetch_size =
905 backend->index()->GetTrailerPrefetchSize(entry_hash_);
906 } else {
907 last_used_time = backend->index()->GetLastUsedTime(entry_hash_);
908 }
909 }
910
911 base::OnceClosure task =
912 base::BindOnce(&SimpleSynchronousEntry::OpenOrCreateEntry, cache_type_,
913 path_, key_, entry_hash_, index_state, optimistic_create,
914 file_tracker_, file_operations_factory_->CreateUnbound(),
915 trailer_prefetch_size, results.get());
916
917 base::OnceClosure reply = base::BindOnce(
918 &SimpleEntryImpl::CreationOperationComplete, this, result_state,
919 std::move(callback), start_time, last_used_time, std::move(results),
920 net::NetLogEventType::SIMPLE_CACHE_ENTRY_OPEN_OR_CREATE_END);
921
922 prioritized_task_runner_->PostTaskAndReply(FROM_HERE, std::move(task),
923 std::move(reply), entry_priority_);
924 }
925
CloseInternal()926 void SimpleEntryImpl::CloseInternal() {
927 DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
928
929 if (open_count_ != 0) {
930 // Entry got resurrected in between Close and CloseInternal, nothing to do
931 // for now.
932 return;
933 }
934
935 typedef SimpleSynchronousEntry::CRCRecord CRCRecord;
936 auto crc32s_to_write = std::make_unique<std::vector<CRCRecord>>();
937
938 net_log_.AddEvent(net::NetLogEventType::SIMPLE_CACHE_ENTRY_CLOSE_BEGIN);
939
940 if (state_ == STATE_READY) {
941 DCHECK(synchronous_entry_);
942 state_ = STATE_IO_PENDING;
943 for (int i = 0; i < kSimpleEntryStreamCount; ++i) {
944 if (have_written_[i]) {
945 if (GetDataSize(i) == crc32s_end_offset_[i]) {
946 int32_t crc = GetDataSize(i) == 0 ? crc32(0, Z_NULL, 0) : crc32s_[i];
947 crc32s_to_write->push_back(CRCRecord(i, true, crc));
948 } else {
949 crc32s_to_write->push_back(CRCRecord(i, false, 0));
950 }
951 }
952 }
953 } else {
954 DCHECK(STATE_UNINITIALIZED == state_ || STATE_FAILURE == state_);
955 }
956
957 auto results = std::make_unique<SimpleEntryCloseResults>();
958 if (synchronous_entry_) {
959 OnceClosure task = base::BindOnce(
960 &SimpleSynchronousEntry::Close, base::Unretained(synchronous_entry_),
961 SimpleEntryStat(last_used_, last_modified_, data_size_,
962 sparse_data_size_),
963 std::move(crc32s_to_write), base::RetainedRef(stream_0_data_),
964 results.get());
965 OnceClosure reply = base::BindOnce(&SimpleEntryImpl::CloseOperationComplete,
966 this, std::move(results));
967 synchronous_entry_ = nullptr;
968 prioritized_task_runner_->PostTaskAndReply(
969 FROM_HERE, std::move(task), std::move(reply), entry_priority_);
970 } else {
971 CloseOperationComplete(std::move(results));
972 }
973 }
974
ReadDataInternal(bool sync_possible,int stream_index,int offset,net::IOBuffer * buf,int buf_len,net::CompletionOnceCallback callback)975 int SimpleEntryImpl::ReadDataInternal(bool sync_possible,
976 int stream_index,
977 int offset,
978 net::IOBuffer* buf,
979 int buf_len,
980 net::CompletionOnceCallback callback) {
981 DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
982 ScopedOperationRunner operation_runner(this);
983
984 if (net_log_.IsCapturing()) {
985 NetLogReadWriteData(
986 net_log_, net::NetLogEventType::SIMPLE_CACHE_ENTRY_READ_BEGIN,
987 net::NetLogEventPhase::NONE, stream_index, offset, buf_len, false);
988 }
989
990 if (state_ == STATE_FAILURE || state_ == STATE_UNINITIALIZED) {
991 if (net_log_.IsCapturing()) {
992 NetLogReadWriteComplete(net_log_,
993 net::NetLogEventType::SIMPLE_CACHE_ENTRY_READ_END,
994 net::NetLogEventPhase::NONE, net::ERR_FAILED);
995 }
996 // Note that the API states that client-provided callbacks for entry-level
997 // (i.e. non-backend) operations (e.g. read, write) are invoked even if
998 // the backend was already destroyed.
999 return PostToCallbackIfNeeded(sync_possible, std::move(callback),
1000 net::ERR_FAILED);
1001 }
1002 DCHECK_EQ(STATE_READY, state_);
1003 if (offset >= GetDataSize(stream_index) || offset < 0 || !buf_len) {
1004 // If there is nothing to read, we bail out before setting state_ to
1005 // STATE_IO_PENDING (so ScopedOperationRunner might start us on next op
1006 // here).
1007 return PostToCallbackIfNeeded(sync_possible, std::move(callback), 0);
1008 }
1009
1010 // Truncate read to not go past end of stream.
1011 buf_len = std::min(buf_len, GetDataSize(stream_index) - offset);
1012
1013 // Since stream 0 data is kept in memory, it is read immediately.
1014 if (stream_index == 0) {
1015 int rv = ReadFromBuffer(stream_0_data_.get(), offset, buf_len, buf);
1016 return PostToCallbackIfNeeded(sync_possible, std::move(callback), rv);
1017 }
1018
1019 // Sometimes we can read in-ram prefetched stream 1 data immediately, too.
1020 if (stream_index == 1) {
1021 if (stream_1_prefetch_data_) {
1022 int rv =
1023 ReadFromBuffer(stream_1_prefetch_data_.get(), offset, buf_len, buf);
1024 return PostToCallbackIfNeeded(sync_possible, std::move(callback), rv);
1025 }
1026 }
1027
1028 state_ = STATE_IO_PENDING;
1029 if (doom_state_ == DOOM_NONE && backend_.get())
1030 backend_->index()->UseIfExists(entry_hash_);
1031
1032 SimpleSynchronousEntry::ReadRequest read_req(stream_index, offset, buf_len);
1033 // Figure out if we should be computing the checksum for this read,
1034 // and whether we should be verifying it, too.
1035 if (crc32s_end_offset_[stream_index] == offset) {
1036 read_req.request_update_crc = true;
1037 read_req.previous_crc32 =
1038 offset == 0 ? crc32(0, Z_NULL, 0) : crc32s_[stream_index];
1039
1040 // We can't verify the checksum if we already overwrote part of the file.
1041 // (It may still make sense to compute it if the overwritten area and the
1042 // about-to-read-in area are adjoint).
1043 read_req.request_verify_crc = !have_written_[stream_index];
1044 }
1045
1046 auto result = std::make_unique<SimpleSynchronousEntry::ReadResult>();
1047 auto entry_stat = std::make_unique<SimpleEntryStat>(
1048 last_used_, last_modified_, data_size_, sparse_data_size_);
1049 OnceClosure task = base::BindOnce(
1050 &SimpleSynchronousEntry::ReadData, base::Unretained(synchronous_entry_),
1051 read_req, entry_stat.get(), base::RetainedRef(buf), result.get());
1052 OnceClosure reply = base::BindOnce(
1053 &SimpleEntryImpl::ReadOperationComplete, this, stream_index, offset,
1054 std::move(callback), std::move(entry_stat), std::move(result));
1055 prioritized_task_runner_->PostTaskAndReply(FROM_HERE, std::move(task),
1056 std::move(reply), entry_priority_);
1057 return net::ERR_IO_PENDING;
1058 }
1059
WriteDataInternal(int stream_index,int offset,net::IOBuffer * buf,int buf_len,net::CompletionOnceCallback callback,bool truncate)1060 void SimpleEntryImpl::WriteDataInternal(int stream_index,
1061 int offset,
1062 net::IOBuffer* buf,
1063 int buf_len,
1064 net::CompletionOnceCallback callback,
1065 bool truncate) {
1066 DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
1067 ScopedOperationRunner operation_runner(this);
1068
1069 if (net_log_.IsCapturing()) {
1070 NetLogReadWriteData(
1071 net_log_, net::NetLogEventType::SIMPLE_CACHE_ENTRY_WRITE_BEGIN,
1072 net::NetLogEventPhase::NONE, stream_index, offset, buf_len, truncate);
1073 }
1074
1075 if (state_ == STATE_FAILURE || state_ == STATE_UNINITIALIZED) {
1076 if (net_log_.IsCapturing()) {
1077 NetLogReadWriteComplete(
1078 net_log_, net::NetLogEventType::SIMPLE_CACHE_ENTRY_WRITE_END,
1079 net::NetLogEventPhase::NONE, net::ERR_FAILED);
1080 }
1081 if (!callback.is_null()) {
1082 base::SequencedTaskRunner::GetCurrentDefault()->PostTask(
1083 FROM_HERE, base::BindOnce(std::move(callback), net::ERR_FAILED));
1084 }
1085 // |this| may be destroyed after return here.
1086 return;
1087 }
1088
1089 DCHECK_EQ(STATE_READY, state_);
1090
1091 // Since stream 0 data is kept in memory, it will be written immediatly.
1092 if (stream_index == 0) {
1093 int ret_value = SetStream0Data(buf, offset, buf_len, truncate);
1094 if (!callback.is_null()) {
1095 base::SequencedTaskRunner::GetCurrentDefault()->PostTask(
1096 FROM_HERE, base::BindOnce(std::move(callback), ret_value));
1097 }
1098 return;
1099 }
1100
1101 // Ignore zero-length writes that do not change the file size.
1102 if (buf_len == 0) {
1103 int32_t data_size = data_size_[stream_index];
1104 if (truncate ? (offset == data_size) : (offset <= data_size)) {
1105 if (!callback.is_null()) {
1106 base::SequencedTaskRunner::GetCurrentDefault()->PostTask(
1107 FROM_HERE, base::BindOnce(std::move(callback), 0));
1108 }
1109 return;
1110 }
1111 }
1112 state_ = STATE_IO_PENDING;
1113 if (doom_state_ == DOOM_NONE && backend_.get())
1114 backend_->index()->UseIfExists(entry_hash_);
1115
1116 // Any stream 1 write invalidates the prefetched data.
1117 if (stream_index == 1)
1118 stream_1_prefetch_data_ = nullptr;
1119
1120 bool request_update_crc = false;
1121 uint32_t initial_crc = 0;
1122
1123 if (offset < crc32s_end_offset_[stream_index]) {
1124 // If a range for which the crc32 was already computed is rewritten, the
1125 // computation of the crc32 need to start from 0 again.
1126 crc32s_end_offset_[stream_index] = 0;
1127 }
1128
1129 if (crc32s_end_offset_[stream_index] == offset) {
1130 request_update_crc = true;
1131 initial_crc = (offset != 0) ? crc32s_[stream_index] : crc32(0, Z_NULL, 0);
1132 }
1133
1134 // |entry_stat| needs to be initialized before modifying |data_size_|.
1135 auto entry_stat = std::make_unique<SimpleEntryStat>(
1136 last_used_, last_modified_, data_size_, sparse_data_size_);
1137 if (truncate) {
1138 data_size_[stream_index] = offset + buf_len;
1139 } else {
1140 data_size_[stream_index] = std::max(offset + buf_len,
1141 GetDataSize(stream_index));
1142 }
1143
1144 auto write_result = std::make_unique<SimpleSynchronousEntry::WriteResult>();
1145
1146 // Since we don't know the correct values for |last_used_| and
1147 // |last_modified_| yet, we make this approximation.
1148 last_used_ = last_modified_ = base::Time::Now();
1149
1150 have_written_[stream_index] = true;
1151 // Writing on stream 1 affects the placement of stream 0 in the file, the EOF
1152 // record will have to be rewritten.
1153 if (stream_index == 1)
1154 have_written_[0] = true;
1155
1156 // Retain a reference to |buf| in |reply| instead of |task|, so that we can
1157 // reduce cross thread malloc/free pairs. The cross thread malloc/free pair
1158 // increases the apparent memory usage due to the thread cached free list.
1159 // TODO(morlovich): Remove the doom_state_ argument to WriteData, since with
1160 // renaming rather than delete, creating a new stream 2 of doomed entry will
1161 // just work.
1162 OnceClosure task = base::BindOnce(
1163 &SimpleSynchronousEntry::WriteData, base::Unretained(synchronous_entry_),
1164 SimpleSynchronousEntry::WriteRequest(
1165 stream_index, offset, buf_len, initial_crc, truncate,
1166 doom_state_ != DOOM_NONE, request_update_crc),
1167 base::Unretained(buf), entry_stat.get(), write_result.get());
1168 OnceClosure reply =
1169 base::BindOnce(&SimpleEntryImpl::WriteOperationComplete, this,
1170 stream_index, std::move(callback), std::move(entry_stat),
1171 std::move(write_result), base::RetainedRef(buf));
1172 prioritized_task_runner_->PostTaskAndReply(FROM_HERE, std::move(task),
1173 std::move(reply), entry_priority_);
1174 }
1175
ReadSparseDataInternal(int64_t sparse_offset,net::IOBuffer * buf,int buf_len,net::CompletionOnceCallback callback)1176 void SimpleEntryImpl::ReadSparseDataInternal(
1177 int64_t sparse_offset,
1178 net::IOBuffer* buf,
1179 int buf_len,
1180 net::CompletionOnceCallback callback) {
1181 DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
1182 ScopedOperationRunner operation_runner(this);
1183
1184 if (net_log_.IsCapturing()) {
1185 NetLogSparseOperation(
1186 net_log_, net::NetLogEventType::SIMPLE_CACHE_ENTRY_READ_SPARSE_BEGIN,
1187 net::NetLogEventPhase::NONE, sparse_offset, buf_len);
1188 }
1189
1190 if (state_ == STATE_FAILURE || state_ == STATE_UNINITIALIZED) {
1191 if (net_log_.IsCapturing()) {
1192 NetLogReadWriteComplete(
1193 net_log_, net::NetLogEventType::SIMPLE_CACHE_ENTRY_READ_SPARSE_END,
1194 net::NetLogEventPhase::NONE, net::ERR_FAILED);
1195 }
1196 if (!callback.is_null()) {
1197 base::SequencedTaskRunner::GetCurrentDefault()->PostTask(
1198 FROM_HERE, base::BindOnce(std::move(callback), net::ERR_FAILED));
1199 }
1200 // |this| may be destroyed after return here.
1201 return;
1202 }
1203
1204 DCHECK_EQ(STATE_READY, state_);
1205 state_ = STATE_IO_PENDING;
1206
1207 auto result = std::make_unique<int>();
1208 auto last_used = std::make_unique<base::Time>();
1209 OnceClosure task = base::BindOnce(
1210 &SimpleSynchronousEntry::ReadSparseData,
1211 base::Unretained(synchronous_entry_),
1212 SimpleSynchronousEntry::SparseRequest(sparse_offset, buf_len),
1213 base::RetainedRef(buf), last_used.get(), result.get());
1214 OnceClosure reply = base::BindOnce(
1215 &SimpleEntryImpl::ReadSparseOperationComplete, this, std::move(callback),
1216 std::move(last_used), std::move(result));
1217 prioritized_task_runner_->PostTaskAndReply(FROM_HERE, std::move(task),
1218 std::move(reply), entry_priority_);
1219 }
1220
WriteSparseDataInternal(int64_t sparse_offset,net::IOBuffer * buf,int buf_len,net::CompletionOnceCallback callback)1221 void SimpleEntryImpl::WriteSparseDataInternal(
1222 int64_t sparse_offset,
1223 net::IOBuffer* buf,
1224 int buf_len,
1225 net::CompletionOnceCallback callback) {
1226 DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
1227 ScopedOperationRunner operation_runner(this);
1228
1229 if (net_log_.IsCapturing()) {
1230 NetLogSparseOperation(
1231 net_log_, net::NetLogEventType::SIMPLE_CACHE_ENTRY_WRITE_SPARSE_BEGIN,
1232 net::NetLogEventPhase::NONE, sparse_offset, buf_len);
1233 }
1234
1235 if (state_ == STATE_FAILURE || state_ == STATE_UNINITIALIZED) {
1236 if (net_log_.IsCapturing()) {
1237 NetLogReadWriteComplete(
1238 net_log_, net::NetLogEventType::SIMPLE_CACHE_ENTRY_WRITE_SPARSE_END,
1239 net::NetLogEventPhase::NONE, net::ERR_FAILED);
1240 }
1241 if (!callback.is_null()) {
1242 base::SequencedTaskRunner::GetCurrentDefault()->PostTask(
1243 FROM_HERE, base::BindOnce(std::move(callback), net::ERR_FAILED));
1244 }
1245 // |this| may be destroyed after return here.
1246 return;
1247 }
1248
1249 DCHECK_EQ(STATE_READY, state_);
1250 state_ = STATE_IO_PENDING;
1251
1252 uint64_t max_sparse_data_size = std::numeric_limits<int64_t>::max();
1253 if (backend_.get()) {
1254 uint64_t max_cache_size = backend_->index()->max_size();
1255 max_sparse_data_size = max_cache_size / kMaxSparseDataSizeDivisor;
1256 }
1257
1258 auto entry_stat = std::make_unique<SimpleEntryStat>(
1259 last_used_, last_modified_, data_size_, sparse_data_size_);
1260
1261 last_used_ = last_modified_ = base::Time::Now();
1262
1263 auto result = std::make_unique<int>();
1264 OnceClosure task = base::BindOnce(
1265 &SimpleSynchronousEntry::WriteSparseData,
1266 base::Unretained(synchronous_entry_),
1267 SimpleSynchronousEntry::SparseRequest(sparse_offset, buf_len),
1268 base::RetainedRef(buf), max_sparse_data_size, entry_stat.get(),
1269 result.get());
1270 OnceClosure reply = base::BindOnce(
1271 &SimpleEntryImpl::WriteSparseOperationComplete, this, std::move(callback),
1272 std::move(entry_stat), std::move(result));
1273 prioritized_task_runner_->PostTaskAndReply(FROM_HERE, std::move(task),
1274 std::move(reply), entry_priority_);
1275 }
1276
GetAvailableRangeInternal(int64_t sparse_offset,int len,RangeResultCallback callback)1277 void SimpleEntryImpl::GetAvailableRangeInternal(int64_t sparse_offset,
1278 int len,
1279 RangeResultCallback callback) {
1280 DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
1281 ScopedOperationRunner operation_runner(this);
1282
1283 if (state_ == STATE_FAILURE || state_ == STATE_UNINITIALIZED) {
1284 if (!callback.is_null()) {
1285 base::SequencedTaskRunner::GetCurrentDefault()->PostTask(
1286 FROM_HERE,
1287 base::BindOnce(std::move(callback), RangeResult(net::ERR_FAILED)));
1288 }
1289 // |this| may be destroyed after return here.
1290 return;
1291 }
1292
1293 DCHECK_EQ(STATE_READY, state_);
1294 state_ = STATE_IO_PENDING;
1295
1296 auto result = std::make_unique<RangeResult>();
1297 OnceClosure task = base::BindOnce(
1298 &SimpleSynchronousEntry::GetAvailableRange,
1299 base::Unretained(synchronous_entry_),
1300 SimpleSynchronousEntry::SparseRequest(sparse_offset, len), result.get());
1301 OnceClosure reply =
1302 base::BindOnce(&SimpleEntryImpl::GetAvailableRangeOperationComplete, this,
1303 std::move(callback), std::move(result));
1304 prioritized_task_runner_->PostTaskAndReply(FROM_HERE, std::move(task),
1305 std::move(reply), entry_priority_);
1306 }
1307
DoomEntryInternal(net::CompletionOnceCallback callback)1308 void SimpleEntryImpl::DoomEntryInternal(net::CompletionOnceCallback callback) {
1309 if (doom_state_ == DOOM_COMPLETED) {
1310 // During the time we were sitting on a queue, some operation failed
1311 // and cleaned our files up, so we don't have to do anything.
1312 DoomOperationComplete(std::move(callback), state_, net::OK);
1313 return;
1314 }
1315
1316 if (!backend_) {
1317 // If there's no backend, we want to truncate the files rather than delete
1318 // or rename them. Either op will update the entry directory's mtime, which
1319 // will likely force a full index rebuild on the next startup; this is
1320 // clearly an undesirable cost. Instead, the lesser evil is to set the entry
1321 // files to length zero, leaving the invalid entry in the index. On the next
1322 // attempt to open the entry, it will fail asynchronously (since the magic
1323 // numbers will not be found), and the files will actually be removed.
1324 // Since there is no backend, new entries to conflict with us also can't be
1325 // created.
1326 prioritized_task_runner_->PostTaskAndReplyWithResult(
1327 FROM_HERE,
1328 base::BindOnce(&SimpleSynchronousEntry::TruncateEntryFiles, path_,
1329 entry_hash_, file_operations_factory_->CreateUnbound()),
1330 base::BindOnce(&SimpleEntryImpl::DoomOperationComplete, this,
1331 std::move(callback),
1332 // Return to STATE_FAILURE after dooming, since no
1333 // operation can succeed on the truncated entry files.
1334 STATE_FAILURE),
1335 entry_priority_);
1336 state_ = STATE_IO_PENDING;
1337 return;
1338 }
1339
1340 if (synchronous_entry_) {
1341 // If there is a backing object, we have to go through its instance methods,
1342 // so that it can rename itself and keep track of the altenative name.
1343 prioritized_task_runner_->PostTaskAndReplyWithResult(
1344 FROM_HERE,
1345 base::BindOnce(&SimpleSynchronousEntry::Doom,
1346 base::Unretained(synchronous_entry_)),
1347 base::BindOnce(&SimpleEntryImpl::DoomOperationComplete, this,
1348 std::move(callback), state_),
1349 entry_priority_);
1350 } else {
1351 DCHECK_EQ(STATE_UNINITIALIZED, state_);
1352 // If nothing is open, we can just delete the files. We know they have the
1353 // base names, since if we ever renamed them our doom_state_ would be
1354 // DOOM_COMPLETED, and we would exit at function entry.
1355 prioritized_task_runner_->PostTaskAndReplyWithResult(
1356 FROM_HERE,
1357 base::BindOnce(&SimpleSynchronousEntry::DeleteEntryFiles, path_,
1358 cache_type_, entry_hash_,
1359 file_operations_factory_->CreateUnbound()),
1360 base::BindOnce(&SimpleEntryImpl::DoomOperationComplete, this,
1361 std::move(callback), state_),
1362 entry_priority_);
1363 }
1364 state_ = STATE_IO_PENDING;
1365 }
1366
CreationOperationComplete(SimpleEntryOperation::EntryResultState result_state,EntryResultCallback completion_callback,const base::TimeTicks & start_time,const base::Time index_last_used_time,std::unique_ptr<SimpleEntryCreationResults> in_results,net::NetLogEventType end_event_type)1367 void SimpleEntryImpl::CreationOperationComplete(
1368 SimpleEntryOperation::EntryResultState result_state,
1369 EntryResultCallback completion_callback,
1370 const base::TimeTicks& start_time,
1371 const base::Time index_last_used_time,
1372 std::unique_ptr<SimpleEntryCreationResults> in_results,
1373 net::NetLogEventType end_event_type) {
1374 DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
1375 DCHECK_EQ(state_, STATE_IO_PENDING);
1376 DCHECK(in_results);
1377 ScopedOperationRunner operation_runner(this);
1378 if (in_results->result != net::OK) {
1379 if (in_results->result != net::ERR_FILE_EXISTS) {
1380 // Here we keep index up-to-date, but don't remove ourselves from active
1381 // entries since we may have queued operations, and it would be
1382 // problematic to run further Creates, Opens, or Dooms if we are not
1383 // the active entry. We can only do this because OpenEntryInternal
1384 // and CreateEntryInternal have to start from STATE_UNINITIALIZED, so
1385 // nothing else is going on which may be confused.
1386 if (backend_)
1387 backend_->index()->Remove(entry_hash_);
1388 }
1389
1390 net_log_.AddEventWithNetErrorCode(end_event_type, net::ERR_FAILED);
1391 PostClientCallback(std::move(completion_callback),
1392 EntryResult::MakeError(net::ERR_FAILED));
1393 ResetEntry();
1394 return;
1395 }
1396
1397 // If this is a successful creation (rather than open), mark all streams to be
1398 // saved on close.
1399 if (in_results->created) {
1400 for (bool& have_written : have_written_)
1401 have_written = true;
1402 }
1403
1404 // Make sure to keep the index up-to-date. We likely already did this when
1405 // CreateEntry was called, but it's possible we were sitting on a queue
1406 // after an op that removed us.
1407 if (backend_ && doom_state_ == DOOM_NONE)
1408 backend_->index()->Insert(entry_hash_);
1409
1410 state_ = STATE_READY;
1411 synchronous_entry_ = in_results->sync_entry;
1412
1413 // Copy over any pre-fetched data and its CRCs.
1414 for (int stream = 0; stream < 2; ++stream) {
1415 const SimpleStreamPrefetchData& prefetched =
1416 in_results->stream_prefetch_data[stream];
1417 if (prefetched.data.get()) {
1418 if (stream == 0)
1419 stream_0_data_ = prefetched.data;
1420 else
1421 stream_1_prefetch_data_ = prefetched.data;
1422
1423 // The crc was read in SimpleSynchronousEntry.
1424 crc32s_[stream] = prefetched.stream_crc32;
1425 crc32s_end_offset_[stream] = in_results->entry_stat.data_size(stream);
1426 }
1427 }
1428
1429 // If this entry was opened by hash, key_ could still be empty. If so, update
1430 // it with the key read from the synchronous entry.
1431 if (key_.empty()) {
1432 SetKey(synchronous_entry_->key());
1433 } else {
1434 // This should only be triggered when creating an entry. In the open case
1435 // the key is either copied from the arguments to open, or checked
1436 // in the synchronous entry.
1437 DCHECK_EQ(key_, synchronous_entry_->key());
1438 }
1439
1440 // Prefer index last used time to disk's, since that may be pretty inaccurate.
1441 if (!index_last_used_time.is_null())
1442 in_results->entry_stat.set_last_used(index_last_used_time);
1443
1444 UpdateDataFromEntryStat(in_results->entry_stat);
1445 if (cache_type_ == net::APP_CACHE && backend_.get() && backend_->index()) {
1446 backend_->index()->SetTrailerPrefetchSize(
1447 entry_hash_, in_results->computed_trailer_prefetch_size);
1448 }
1449 SIMPLE_CACHE_UMA(TIMES,
1450 "EntryCreationTime", cache_type_,
1451 (base::TimeTicks::Now() - start_time));
1452
1453 net_log_.AddEvent(end_event_type);
1454
1455 const bool created = in_results->created;
1456
1457 // We need to release `in_results` before going out of scope, because
1458 // `operation_runner` destruction might call a close operation, that will
1459 // ultimately release `in_results->sync_entry`, and thus leading to having a
1460 // dangling pointer here.
1461 in_results = nullptr;
1462 if (result_state == SimpleEntryOperation::ENTRY_NEEDS_CALLBACK) {
1463 ReturnEntryToCallerAsync(!created, std::move(completion_callback));
1464 }
1465 }
1466
UpdateStateAfterOperationComplete(const SimpleEntryStat & entry_stat,int result)1467 void SimpleEntryImpl::UpdateStateAfterOperationComplete(
1468 const SimpleEntryStat& entry_stat,
1469 int result) {
1470 DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
1471 DCHECK(synchronous_entry_);
1472 DCHECK_EQ(STATE_IO_PENDING, state_);
1473 if (result < 0) {
1474 state_ = STATE_FAILURE;
1475 MarkAsDoomed(DOOM_COMPLETED);
1476 } else {
1477 state_ = STATE_READY;
1478 UpdateDataFromEntryStat(entry_stat);
1479 }
1480 }
1481
EntryOperationComplete(net::CompletionOnceCallback completion_callback,const SimpleEntryStat & entry_stat,int result)1482 void SimpleEntryImpl::EntryOperationComplete(
1483 net::CompletionOnceCallback completion_callback,
1484 const SimpleEntryStat& entry_stat,
1485 int result) {
1486 UpdateStateAfterOperationComplete(entry_stat, result);
1487 if (!completion_callback.is_null()) {
1488 base::SequencedTaskRunner::GetCurrentDefault()->PostTask(
1489 FROM_HERE, base::BindOnce(std::move(completion_callback), result));
1490 }
1491 RunNextOperationIfNeeded();
1492 }
1493
ReadOperationComplete(int stream_index,int offset,net::CompletionOnceCallback completion_callback,std::unique_ptr<SimpleEntryStat> entry_stat,std::unique_ptr<SimpleSynchronousEntry::ReadResult> read_result)1494 void SimpleEntryImpl::ReadOperationComplete(
1495 int stream_index,
1496 int offset,
1497 net::CompletionOnceCallback completion_callback,
1498 std::unique_ptr<SimpleEntryStat> entry_stat,
1499 std::unique_ptr<SimpleSynchronousEntry::ReadResult> read_result) {
1500 DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
1501 DCHECK(synchronous_entry_);
1502 DCHECK_EQ(STATE_IO_PENDING, state_);
1503 DCHECK(read_result);
1504 int result = read_result->result;
1505
1506 if (read_result->crc_updated) {
1507 if (result > 0) {
1508 DCHECK_EQ(crc32s_end_offset_[stream_index], offset);
1509 crc32s_end_offset_[stream_index] += result;
1510 crc32s_[stream_index] = read_result->updated_crc32;
1511 }
1512 }
1513
1514 if (result < 0) {
1515 crc32s_end_offset_[stream_index] = 0;
1516 }
1517
1518 if (net_log_.IsCapturing()) {
1519 NetLogReadWriteComplete(net_log_,
1520 net::NetLogEventType::SIMPLE_CACHE_ENTRY_READ_END,
1521 net::NetLogEventPhase::NONE, result);
1522 }
1523
1524 EntryOperationComplete(std::move(completion_callback), *entry_stat, result);
1525 }
1526
WriteOperationComplete(int stream_index,net::CompletionOnceCallback completion_callback,std::unique_ptr<SimpleEntryStat> entry_stat,std::unique_ptr<SimpleSynchronousEntry::WriteResult> write_result,net::IOBuffer * buf)1527 void SimpleEntryImpl::WriteOperationComplete(
1528 int stream_index,
1529 net::CompletionOnceCallback completion_callback,
1530 std::unique_ptr<SimpleEntryStat> entry_stat,
1531 std::unique_ptr<SimpleSynchronousEntry::WriteResult> write_result,
1532 net::IOBuffer* buf) {
1533 int result = write_result->result;
1534 if (net_log_.IsCapturing()) {
1535 NetLogReadWriteComplete(net_log_,
1536 net::NetLogEventType::SIMPLE_CACHE_ENTRY_WRITE_END,
1537 net::NetLogEventPhase::NONE, result);
1538 }
1539
1540 if (result < 0)
1541 crc32s_end_offset_[stream_index] = 0;
1542
1543 if (result > 0 && write_result->crc_updated) {
1544 crc32s_end_offset_[stream_index] += result;
1545 crc32s_[stream_index] = write_result->updated_crc32;
1546 }
1547
1548 EntryOperationComplete(std::move(completion_callback), *entry_stat, result);
1549 }
1550
ReadSparseOperationComplete(net::CompletionOnceCallback completion_callback,std::unique_ptr<base::Time> last_used,std::unique_ptr<int> result)1551 void SimpleEntryImpl::ReadSparseOperationComplete(
1552 net::CompletionOnceCallback completion_callback,
1553 std::unique_ptr<base::Time> last_used,
1554 std::unique_ptr<int> result) {
1555 DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
1556 DCHECK(synchronous_entry_);
1557 DCHECK(result);
1558
1559 if (net_log_.IsCapturing()) {
1560 NetLogReadWriteComplete(
1561 net_log_, net::NetLogEventType::SIMPLE_CACHE_ENTRY_READ_SPARSE_END,
1562 net::NetLogEventPhase::NONE, *result);
1563 }
1564
1565 SimpleEntryStat entry_stat(*last_used, last_modified_, data_size_,
1566 sparse_data_size_);
1567 EntryOperationComplete(std::move(completion_callback), entry_stat, *result);
1568 }
1569
WriteSparseOperationComplete(net::CompletionOnceCallback completion_callback,std::unique_ptr<SimpleEntryStat> entry_stat,std::unique_ptr<int> result)1570 void SimpleEntryImpl::WriteSparseOperationComplete(
1571 net::CompletionOnceCallback completion_callback,
1572 std::unique_ptr<SimpleEntryStat> entry_stat,
1573 std::unique_ptr<int> result) {
1574 DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
1575 DCHECK(synchronous_entry_);
1576 DCHECK(result);
1577
1578 if (net_log_.IsCapturing()) {
1579 NetLogReadWriteComplete(
1580 net_log_, net::NetLogEventType::SIMPLE_CACHE_ENTRY_WRITE_SPARSE_END,
1581 net::NetLogEventPhase::NONE, *result);
1582 }
1583
1584 EntryOperationComplete(std::move(completion_callback), *entry_stat, *result);
1585 }
1586
GetAvailableRangeOperationComplete(RangeResultCallback completion_callback,std::unique_ptr<RangeResult> result)1587 void SimpleEntryImpl::GetAvailableRangeOperationComplete(
1588 RangeResultCallback completion_callback,
1589 std::unique_ptr<RangeResult> result) {
1590 DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
1591 DCHECK(synchronous_entry_);
1592 DCHECK(result);
1593
1594 SimpleEntryStat entry_stat(last_used_, last_modified_, data_size_,
1595 sparse_data_size_);
1596 UpdateStateAfterOperationComplete(entry_stat, result->net_error);
1597 if (!completion_callback.is_null()) {
1598 base::SequencedTaskRunner::GetCurrentDefault()->PostTask(
1599 FROM_HERE, base::BindOnce(std::move(completion_callback), *result));
1600 }
1601 RunNextOperationIfNeeded();
1602 }
1603
DoomOperationComplete(net::CompletionOnceCallback callback,State state_to_restore,int result)1604 void SimpleEntryImpl::DoomOperationComplete(
1605 net::CompletionOnceCallback callback,
1606 State state_to_restore,
1607 int result) {
1608 state_ = state_to_restore;
1609 doom_state_ = DOOM_COMPLETED;
1610 net_log_.AddEvent(net::NetLogEventType::SIMPLE_CACHE_ENTRY_DOOM_END);
1611 PostClientCallback(std::move(callback), result);
1612 RunNextOperationIfNeeded();
1613 if (post_doom_waiting_) {
1614 post_doom_waiting_->OnDoomComplete(entry_hash_);
1615 post_doom_waiting_ = nullptr;
1616 }
1617 }
1618
CloseOperationComplete(std::unique_ptr<SimpleEntryCloseResults> in_results)1619 void SimpleEntryImpl::CloseOperationComplete(
1620 std::unique_ptr<SimpleEntryCloseResults> in_results) {
1621 DCHECK(!synchronous_entry_);
1622 DCHECK_EQ(0, open_count_);
1623 DCHECK(STATE_IO_PENDING == state_ || STATE_FAILURE == state_ ||
1624 STATE_UNINITIALIZED == state_);
1625 net_log_.AddEvent(net::NetLogEventType::SIMPLE_CACHE_ENTRY_CLOSE_END);
1626 if (cache_type_ == net::APP_CACHE &&
1627 in_results->estimated_trailer_prefetch_size > 0 && backend_.get() &&
1628 backend_->index()) {
1629 backend_->index()->SetTrailerPrefetchSize(
1630 entry_hash_, in_results->estimated_trailer_prefetch_size);
1631 }
1632 ResetEntry();
1633 RunNextOperationIfNeeded();
1634 }
1635
UpdateDataFromEntryStat(const SimpleEntryStat & entry_stat)1636 void SimpleEntryImpl::UpdateDataFromEntryStat(
1637 const SimpleEntryStat& entry_stat) {
1638 DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
1639 DCHECK(synchronous_entry_);
1640 DCHECK_EQ(STATE_READY, state_);
1641
1642 last_used_ = entry_stat.last_used();
1643 last_modified_ = entry_stat.last_modified();
1644 for (int i = 0; i < kSimpleEntryStreamCount; ++i) {
1645 data_size_[i] = entry_stat.data_size(i);
1646 }
1647 sparse_data_size_ = entry_stat.sparse_data_size();
1648
1649 SimpleBackendImpl* backend_ptr = backend_.get();
1650 if (doom_state_ == DOOM_NONE && backend_ptr) {
1651 backend_ptr->index()->UpdateEntrySize(
1652 entry_hash_, base::checked_cast<uint32_t>(GetDiskUsage()));
1653 }
1654 }
1655
GetDiskUsage() const1656 int64_t SimpleEntryImpl::GetDiskUsage() const {
1657 int64_t file_size = 0;
1658 for (int data_size : data_size_) {
1659 file_size += simple_util::GetFileSizeFromDataSize(key_.size(), data_size);
1660 }
1661 file_size += sparse_data_size_;
1662 return file_size;
1663 }
1664
ReadFromBuffer(net::GrowableIOBuffer * in_buf,int offset,int buf_len,net::IOBuffer * out_buf)1665 int SimpleEntryImpl::ReadFromBuffer(net::GrowableIOBuffer* in_buf,
1666 int offset,
1667 int buf_len,
1668 net::IOBuffer* out_buf) {
1669 DCHECK_GE(buf_len, 0);
1670
1671 memcpy(out_buf->data(), in_buf->data() + offset, buf_len);
1672 UpdateDataFromEntryStat(SimpleEntryStat(base::Time::Now(), last_modified_,
1673 data_size_, sparse_data_size_));
1674 return buf_len;
1675 }
1676
SetStream0Data(net::IOBuffer * buf,int offset,int buf_len,bool truncate)1677 int SimpleEntryImpl::SetStream0Data(net::IOBuffer* buf,
1678 int offset,
1679 int buf_len,
1680 bool truncate) {
1681 // Currently, stream 0 is only used for HTTP headers, and always writes them
1682 // with a single, truncating write. Detect these writes and record the size
1683 // changes of the headers. Also, support writes to stream 0 that have
1684 // different access patterns, as required by the API contract.
1685 // All other clients of the Simple Cache are encouraged to use stream 1.
1686 have_written_[0] = true;
1687 int data_size = GetDataSize(0);
1688 if (offset == 0 && truncate) {
1689 stream_0_data_->SetCapacity(buf_len);
1690 memcpy(stream_0_data_->data(), buf->data(), buf_len);
1691 data_size_[0] = buf_len;
1692 } else {
1693 const int buffer_size =
1694 truncate ? offset + buf_len : std::max(offset + buf_len, data_size);
1695 stream_0_data_->SetCapacity(buffer_size);
1696 // If |stream_0_data_| was extended, the extension until offset needs to be
1697 // zero-filled.
1698 const int fill_size = offset <= data_size ? 0 : offset - data_size;
1699 if (fill_size > 0)
1700 memset(stream_0_data_->data() + data_size, 0, fill_size);
1701 if (buf)
1702 memcpy(stream_0_data_->data() + offset, buf->data(), buf_len);
1703 data_size_[0] = buffer_size;
1704 }
1705 RecordHeaderSize(cache_type_, data_size_[0]);
1706 base::Time modification_time = base::Time::Now();
1707
1708 // Reset checksum; SimpleSynchronousEntry::Close will compute it for us,
1709 // and do it off the source creation sequence.
1710 crc32s_end_offset_[0] = 0;
1711
1712 UpdateDataFromEntryStat(
1713 SimpleEntryStat(modification_time, modification_time, data_size_,
1714 sparse_data_size_));
1715 return buf_len;
1716 }
1717
1718 } // namespace disk_cache
1719