• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2012 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "net/disk_cache/blockfile/entry_impl.h"
6 
7 #include <limits>
8 #include <memory>
9 
10 #include "base/files/file_util.h"
11 #include "base/hash/hash.h"
12 #include "base/numerics/safe_math.h"
13 #include "base/strings/string_util.h"
14 #include "base/time/time.h"
15 #include "net/base/io_buffer.h"
16 #include "net/base/net_errors.h"
17 #include "net/disk_cache/blockfile/backend_impl.h"
18 #include "net/disk_cache/blockfile/bitmap.h"
19 #include "net/disk_cache/blockfile/disk_format.h"
20 #include "net/disk_cache/blockfile/histogram_macros.h"
21 #include "net/disk_cache/blockfile/sparse_control.h"
22 #include "net/disk_cache/cache_util.h"
23 #include "net/disk_cache/net_log_parameters.h"
24 #include "net/log/net_log.h"
25 #include "net/log/net_log_event_type.h"
26 #include "net/log/net_log_source_type.h"
27 
28 // Provide a BackendImpl object to macros from histogram_macros.h.
29 #define CACHE_UMA_BACKEND_IMPL_OBJ backend_
30 
31 using base::Time;
32 using base::TimeTicks;
33 
34 namespace {
35 
36 // Index for the file used to store the key, if any (files_[kKeyFileIndex]).
37 const int kKeyFileIndex = 3;
38 
39 // This class implements FileIOCallback to buffer the callback from a file IO
40 // operation from the actual net class.
41 class SyncCallback: public disk_cache::FileIOCallback {
42  public:
43   // |end_event_type| is the event type to log on completion.  Logs nothing on
44   // discard, or when the NetLog is not set to log all events.
SyncCallback(scoped_refptr<disk_cache::EntryImpl> entry,net::IOBuffer * buffer,net::CompletionOnceCallback callback,net::NetLogEventType end_event_type)45   SyncCallback(scoped_refptr<disk_cache::EntryImpl> entry,
46                net::IOBuffer* buffer,
47                net::CompletionOnceCallback callback,
48                net::NetLogEventType end_event_type)
49       : entry_(std::move(entry)),
50         callback_(std::move(callback)),
51         buf_(buffer),
52         start_(TimeTicks::Now()),
53         end_event_type_(end_event_type) {
54     entry_->IncrementIoCount();
55   }
56 
57   SyncCallback(const SyncCallback&) = delete;
58   SyncCallback& operator=(const SyncCallback&) = delete;
59 
60   ~SyncCallback() override = default;
61 
62   void OnFileIOComplete(int bytes_copied) override;
63   void Discard();
64 
65  private:
66   scoped_refptr<disk_cache::EntryImpl> entry_;
67   net::CompletionOnceCallback callback_;
68   scoped_refptr<net::IOBuffer> buf_;
69   TimeTicks start_;
70   const net::NetLogEventType end_event_type_;
71 };
72 
OnFileIOComplete(int bytes_copied)73 void SyncCallback::OnFileIOComplete(int bytes_copied) {
74   entry_->DecrementIoCount();
75   if (!callback_.is_null()) {
76     if (entry_->net_log().IsCapturing()) {
77       disk_cache::NetLogReadWriteComplete(entry_->net_log(), end_event_type_,
78                                           net::NetLogEventPhase::END,
79                                           bytes_copied);
80     }
81     entry_->ReportIOTime(disk_cache::EntryImpl::kAsyncIO, start_);
82     buf_ = nullptr;  // Release the buffer before invoking the callback.
83     std::move(callback_).Run(bytes_copied);
84   }
85   delete this;
86 }
87 
Discard()88 void SyncCallback::Discard() {
89   callback_.Reset();
90   buf_ = nullptr;
91   OnFileIOComplete(0);
92 }
93 
94 const int kMaxBufferSize = 1024 * 1024;  // 1 MB.
95 
96 }  // namespace
97 
98 namespace disk_cache {
99 
100 // This class handles individual memory buffers that store data before it is
101 // sent to disk. The buffer can start at any offset, but if we try to write to
102 // anywhere in the first 16KB of the file (kMaxBlockSize), we set the offset to
103 // zero. The buffer grows up to a size determined by the backend, to keep the
104 // total memory used under control.
105 class EntryImpl::UserBuffer {
106  public:
UserBuffer(BackendImpl * backend)107   explicit UserBuffer(BackendImpl* backend) : backend_(backend->GetWeakPtr()) {
108     buffer_.reserve(kMaxBlockSize);
109   }
110 
111   UserBuffer(const UserBuffer&) = delete;
112   UserBuffer& operator=(const UserBuffer&) = delete;
113 
~UserBuffer()114   ~UserBuffer() {
115     if (backend_.get())
116       backend_->BufferDeleted(capacity() - kMaxBlockSize);
117   }
118 
119   // Returns true if we can handle writing |len| bytes to |offset|.
120   bool PreWrite(int offset, int len);
121 
122   // Truncates the buffer to |offset| bytes.
123   void Truncate(int offset);
124 
125   // Writes |len| bytes from |buf| at the given |offset|.
126   void Write(int offset, IOBuffer* buf, int len);
127 
128   // Returns true if we can read |len| bytes from |offset|, given that the
129   // actual file has |eof| bytes stored. Note that the number of bytes to read
130   // may be modified by this method even though it returns false: that means we
131   // should do a smaller read from disk.
132   bool PreRead(int eof, int offset, int* len);
133 
134   // Read |len| bytes from |buf| at the given |offset|.
135   int Read(int offset, IOBuffer* buf, int len);
136 
137   // Prepare this buffer for reuse.
138   void Reset();
139 
Data()140   char* Data() { return buffer_.data(); }
Size()141   int Size() { return static_cast<int>(buffer_.size()); }
Start()142   int Start() { return offset_; }
End()143   int End() { return offset_ + Size(); }
144 
145  private:
capacity()146   int capacity() { return static_cast<int>(buffer_.capacity()); }
147   bool GrowBuffer(int required, int limit);
148 
149   base::WeakPtr<BackendImpl> backend_;
150   int offset_ = 0;
151   std::vector<char> buffer_;
152   bool grow_allowed_ = true;
153 };
154 
PreWrite(int offset,int len)155 bool EntryImpl::UserBuffer::PreWrite(int offset, int len) {
156   DCHECK_GE(offset, 0);
157   DCHECK_GE(len, 0);
158   DCHECK_GE(offset + len, 0);
159 
160   // We don't want to write before our current start.
161   if (offset < offset_)
162     return false;
163 
164   // Lets get the common case out of the way.
165   if (offset + len <= capacity())
166     return true;
167 
168   // If we are writing to the first 16K (kMaxBlockSize), we want to keep the
169   // buffer offset_ at 0.
170   if (!Size() && offset > kMaxBlockSize)
171     return GrowBuffer(len, kMaxBufferSize);
172 
173   int required = offset - offset_ + len;
174   return GrowBuffer(required, kMaxBufferSize * 6 / 5);
175 }
176 
Truncate(int offset)177 void EntryImpl::UserBuffer::Truncate(int offset) {
178   DCHECK_GE(offset, 0);
179   DCHECK_GE(offset, offset_);
180   DVLOG(3) << "Buffer truncate at " << offset << " current " << offset_;
181 
182   offset -= offset_;
183   if (Size() >= offset)
184     buffer_.resize(offset);
185 }
186 
Write(int offset,IOBuffer * buf,int len)187 void EntryImpl::UserBuffer::Write(int offset, IOBuffer* buf, int len) {
188   DCHECK_GE(offset, 0);
189   DCHECK_GE(len, 0);
190   DCHECK_GE(offset + len, 0);
191 
192   // 0-length writes that don't extend can just be ignored here, and are safe
193   // even if they're are before offset_, as truncates are handled elsewhere.
194   if (len == 0 && offset < End())
195     return;
196 
197   DCHECK_GE(offset, offset_);
198   DVLOG(3) << "Buffer write at " << offset << " current " << offset_;
199 
200   if (!Size() && offset > kMaxBlockSize)
201     offset_ = offset;
202 
203   offset -= offset_;
204 
205   if (offset > Size())
206     buffer_.resize(offset);
207 
208   if (!len)
209     return;
210 
211   char* buffer = buf->data();
212   int valid_len = Size() - offset;
213   int copy_len = std::min(valid_len, len);
214   if (copy_len) {
215     memcpy(&buffer_[offset], buffer, copy_len);
216     len -= copy_len;
217     buffer += copy_len;
218   }
219   if (!len)
220     return;
221 
222   buffer_.insert(buffer_.end(), buffer, buffer + len);
223 }
224 
PreRead(int eof,int offset,int * len)225 bool EntryImpl::UserBuffer::PreRead(int eof, int offset, int* len) {
226   DCHECK_GE(offset, 0);
227   DCHECK_GT(*len, 0);
228 
229   if (offset < offset_) {
230     // We are reading before this buffer.
231     if (offset >= eof)
232       return true;
233 
234     // If the read overlaps with the buffer, change its length so that there is
235     // no overlap.
236     *len = std::min(*len, offset_ - offset);
237     *len = std::min(*len, eof - offset);
238 
239     // We should read from disk.
240     return false;
241   }
242 
243   if (!Size())
244     return false;
245 
246   // See if we can fulfill the first part of the operation.
247   return (offset - offset_ < Size());
248 }
249 
Read(int offset,IOBuffer * buf,int len)250 int EntryImpl::UserBuffer::Read(int offset, IOBuffer* buf, int len) {
251   DCHECK_GE(offset, 0);
252   DCHECK_GT(len, 0);
253   DCHECK(Size() || offset < offset_);
254 
255   int clean_bytes = 0;
256   if (offset < offset_) {
257     // We don't have a file so lets fill the first part with 0.
258     clean_bytes = std::min(offset_ - offset, len);
259     memset(buf->data(), 0, clean_bytes);
260     if (len == clean_bytes)
261       return len;
262     offset = offset_;
263     len -= clean_bytes;
264   }
265 
266   int start = offset - offset_;
267   int available = Size() - start;
268   DCHECK_GE(start, 0);
269   DCHECK_GE(available, 0);
270   len = std::min(len, available);
271   memcpy(buf->data() + clean_bytes, &buffer_[start], len);
272   return len + clean_bytes;
273 }
274 
Reset()275 void EntryImpl::UserBuffer::Reset() {
276   if (!grow_allowed_) {
277     if (backend_.get())
278       backend_->BufferDeleted(capacity() - kMaxBlockSize);
279     grow_allowed_ = true;
280     std::vector<char> tmp;
281     buffer_.swap(tmp);
282     buffer_.reserve(kMaxBlockSize);
283   }
284   offset_ = 0;
285   buffer_.clear();
286 }
287 
GrowBuffer(int required,int limit)288 bool EntryImpl::UserBuffer::GrowBuffer(int required, int limit) {
289   DCHECK_GE(required, 0);
290   int current_size = capacity();
291   if (required <= current_size)
292     return true;
293 
294   if (required > limit)
295     return false;
296 
297   if (!backend_.get())
298     return false;
299 
300   int to_add = std::max(required - current_size, kMaxBlockSize * 4);
301   to_add = std::max(current_size, to_add);
302   required = std::min(current_size + to_add, limit);
303 
304   grow_allowed_ = backend_->IsAllocAllowed(current_size, required);
305   if (!grow_allowed_)
306     return false;
307 
308   DVLOG(3) << "Buffer grow to " << required;
309 
310   buffer_.reserve(required);
311   return true;
312 }
313 
314 // ------------------------------------------------------------------------
315 
EntryImpl(BackendImpl * backend,Addr address,bool read_only)316 EntryImpl::EntryImpl(BackendImpl* backend, Addr address, bool read_only)
317     : entry_(nullptr, Addr(0)),
318       node_(nullptr, Addr(0)),
319       backend_(backend->GetWeakPtr()),
320       read_only_(read_only) {
321   entry_.LazyInit(backend->File(address), address);
322 }
323 
DoomImpl()324 void EntryImpl::DoomImpl() {
325   if (doomed_ || !backend_.get())
326     return;
327 
328   SetPointerForInvalidEntry(backend_->GetCurrentEntryId());
329   backend_->InternalDoomEntry(this);
330 }
331 
ReadDataImpl(int index,int offset,IOBuffer * buf,int buf_len,CompletionOnceCallback callback)332 int EntryImpl::ReadDataImpl(int index,
333                             int offset,
334                             IOBuffer* buf,
335                             int buf_len,
336                             CompletionOnceCallback callback) {
337   if (net_log_.IsCapturing()) {
338     NetLogReadWriteData(net_log_, net::NetLogEventType::ENTRY_READ_DATA,
339                         net::NetLogEventPhase::BEGIN, index, offset, buf_len,
340                         false);
341   }
342 
343   int result =
344       InternalReadData(index, offset, buf, buf_len, std::move(callback));
345 
346   if (result != net::ERR_IO_PENDING && net_log_.IsCapturing()) {
347     NetLogReadWriteComplete(net_log_, net::NetLogEventType::ENTRY_READ_DATA,
348                             net::NetLogEventPhase::END, result);
349   }
350   return result;
351 }
352 
WriteDataImpl(int index,int offset,IOBuffer * buf,int buf_len,CompletionOnceCallback callback,bool truncate)353 int EntryImpl::WriteDataImpl(int index,
354                              int offset,
355                              IOBuffer* buf,
356                              int buf_len,
357                              CompletionOnceCallback callback,
358                              bool truncate) {
359   if (net_log_.IsCapturing()) {
360     NetLogReadWriteData(net_log_, net::NetLogEventType::ENTRY_WRITE_DATA,
361                         net::NetLogEventPhase::BEGIN, index, offset, buf_len,
362                         truncate);
363   }
364 
365   int result = InternalWriteData(index, offset, buf, buf_len,
366                                  std::move(callback), truncate);
367 
368   if (result != net::ERR_IO_PENDING && net_log_.IsCapturing()) {
369     NetLogReadWriteComplete(net_log_, net::NetLogEventType::ENTRY_WRITE_DATA,
370                             net::NetLogEventPhase::END, result);
371   }
372   return result;
373 }
374 
ReadSparseDataImpl(int64_t offset,IOBuffer * buf,int buf_len,CompletionOnceCallback callback)375 int EntryImpl::ReadSparseDataImpl(int64_t offset,
376                                   IOBuffer* buf,
377                                   int buf_len,
378                                   CompletionOnceCallback callback) {
379   DCHECK(node_.Data()->dirty || read_only_);
380   int result = InitSparseData();
381   if (net::OK != result)
382     return result;
383 
384   TimeTicks start = TimeTicks::Now();
385   result = sparse_->StartIO(SparseControl::kReadOperation, offset, buf, buf_len,
386                             std::move(callback));
387   ReportIOTime(kSparseRead, start);
388   return result;
389 }
390 
WriteSparseDataImpl(int64_t offset,IOBuffer * buf,int buf_len,CompletionOnceCallback callback)391 int EntryImpl::WriteSparseDataImpl(int64_t offset,
392                                    IOBuffer* buf,
393                                    int buf_len,
394                                    CompletionOnceCallback callback) {
395   DCHECK(node_.Data()->dirty || read_only_);
396   int result = InitSparseData();
397   if (net::OK != result)
398     return result;
399 
400   TimeTicks start = TimeTicks::Now();
401   result = sparse_->StartIO(SparseControl::kWriteOperation, offset, buf,
402                             buf_len, std::move(callback));
403   ReportIOTime(kSparseWrite, start);
404   return result;
405 }
406 
GetAvailableRangeImpl(int64_t offset,int len)407 RangeResult EntryImpl::GetAvailableRangeImpl(int64_t offset, int len) {
408   int result = InitSparseData();
409   if (net::OK != result)
410     return RangeResult(static_cast<net::Error>(result));
411 
412   return sparse_->GetAvailableRange(offset, len);
413 }
414 
CancelSparseIOImpl()415 void EntryImpl::CancelSparseIOImpl() {
416   if (!sparse_.get())
417     return;
418 
419   sparse_->CancelIO();
420 }
421 
ReadyForSparseIOImpl(CompletionOnceCallback callback)422 int EntryImpl::ReadyForSparseIOImpl(CompletionOnceCallback callback) {
423   DCHECK(sparse_.get());
424   return sparse_->ReadyToUse(std::move(callback));
425 }
426 
GetHash()427 uint32_t EntryImpl::GetHash() {
428   return entry_.Data()->hash;
429 }
430 
CreateEntry(Addr node_address,const std::string & key,uint32_t hash)431 bool EntryImpl::CreateEntry(Addr node_address,
432                             const std::string& key,
433                             uint32_t hash) {
434   EntryStore* entry_store = entry_.Data();
435   RankingsNode* node = node_.Data();
436   memset(entry_store, 0, sizeof(EntryStore) * entry_.address().num_blocks());
437   memset(node, 0, sizeof(RankingsNode));
438   if (!node_.LazyInit(backend_->File(node_address), node_address))
439     return false;
440 
441   entry_store->rankings_node = node_address.value();
442   node->contents = entry_.address().value();
443 
444   entry_store->hash = hash;
445   entry_store->creation_time = Time::Now().ToInternalValue();
446   entry_store->key_len = static_cast<int32_t>(key.size());
447   if (entry_store->key_len > kMaxInternalKeyLength) {
448     Addr address(0);
449     if (!CreateBlock(entry_store->key_len + 1, &address))
450       return false;
451 
452     entry_store->long_key = address.value();
453     File* key_file = GetBackingFile(address, kKeyFileIndex);
454     key_ = key;
455 
456     size_t offset = 0;
457     if (address.is_block_file())
458       offset = address.start_block() * address.BlockSize() + kBlockHeaderSize;
459 
460     if (!key_file || !key_file->Write(key.data(), key.size() + 1, offset)) {
461       DeleteData(address, kKeyFileIndex);
462       return false;
463     }
464 
465     if (address.is_separate_file())
466       key_file->SetLength(key.size() + 1);
467   } else {
468     memcpy(entry_store->key, key.data(), key.size());
469     entry_store->key[key.size()] = '\0';
470   }
471   backend_->ModifyStorageSize(0, static_cast<int32_t>(key.size()));
472   CACHE_UMA(COUNTS, "KeySize", 0, static_cast<int32_t>(key.size()));
473   node->dirty = backend_->GetCurrentEntryId();
474   return true;
475 }
476 
IsSameEntry(const std::string & key,uint32_t hash)477 bool EntryImpl::IsSameEntry(const std::string& key, uint32_t hash) {
478   if (entry_.Data()->hash != hash ||
479       static_cast<size_t>(entry_.Data()->key_len) != key.size())
480     return false;
481 
482   return (key.compare(GetKey()) == 0);
483 }
484 
InternalDoom()485 void EntryImpl::InternalDoom() {
486   net_log_.AddEvent(net::NetLogEventType::ENTRY_DOOM);
487   DCHECK(node_.HasData());
488   if (!node_.Data()->dirty) {
489     node_.Data()->dirty = backend_->GetCurrentEntryId();
490     node_.Store();
491   }
492   doomed_ = true;
493 }
494 
DeleteEntryData(bool everything)495 void EntryImpl::DeleteEntryData(bool everything) {
496   DCHECK(doomed_ || !everything);
497 
498   if (GetEntryFlags() & PARENT_ENTRY) {
499     // We have some child entries that must go away.
500     SparseControl::DeleteChildren(this);
501   }
502 
503   if (GetDataSize(0))
504     CACHE_UMA(COUNTS, "DeleteHeader", 0, GetDataSize(0));
505   if (GetDataSize(1))
506     CACHE_UMA(COUNTS, "DeleteData", 0, GetDataSize(1));
507   for (int index = 0; index < kNumStreams; index++) {
508     Addr address(entry_.Data()->data_addr[index]);
509     if (address.is_initialized()) {
510       backend_->ModifyStorageSize(entry_.Data()->data_size[index] -
511                                       unreported_size_[index], 0);
512       entry_.Data()->data_addr[index] = 0;
513       entry_.Data()->data_size[index] = 0;
514       entry_.Store();
515       DeleteData(address, index);
516     }
517   }
518 
519   if (!everything)
520     return;
521 
522   // Remove all traces of this entry.
523   backend_->RemoveEntry(this);
524 
525   // Note that at this point node_ and entry_ are just two blocks of data, and
526   // even if they reference each other, nobody should be referencing them.
527 
528   Addr address(entry_.Data()->long_key);
529   DeleteData(address, kKeyFileIndex);
530   backend_->ModifyStorageSize(entry_.Data()->key_len, 0);
531 
532   backend_->DeleteBlock(entry_.address(), true);
533   entry_.Discard();
534 
535   if (!LeaveRankingsBehind()) {
536     backend_->DeleteBlock(node_.address(), true);
537     node_.Discard();
538   }
539 }
540 
GetNextAddress()541 CacheAddr EntryImpl::GetNextAddress() {
542   return entry_.Data()->next;
543 }
544 
SetNextAddress(Addr address)545 void EntryImpl::SetNextAddress(Addr address) {
546   DCHECK_NE(address.value(), entry_.address().value());
547   entry_.Data()->next = address.value();
548   bool success = entry_.Store();
549   DCHECK(success);
550 }
551 
LoadNodeAddress()552 bool EntryImpl::LoadNodeAddress() {
553   Addr address(entry_.Data()->rankings_node);
554   if (!node_.LazyInit(backend_->File(address), address))
555     return false;
556   return node_.Load();
557 }
558 
Update()559 bool EntryImpl::Update() {
560   DCHECK(node_.HasData());
561 
562   if (read_only_)
563     return true;
564 
565   RankingsNode* rankings = node_.Data();
566   if (!rankings->dirty) {
567     rankings->dirty = backend_->GetCurrentEntryId();
568     if (!node_.Store())
569       return false;
570   }
571   return true;
572 }
573 
SetDirtyFlag(int32_t current_id)574 void EntryImpl::SetDirtyFlag(int32_t current_id) {
575   DCHECK(node_.HasData());
576   if (node_.Data()->dirty && current_id != node_.Data()->dirty)
577     dirty_ = true;
578 
579   if (!current_id)
580     dirty_ = true;
581 }
582 
SetPointerForInvalidEntry(int32_t new_id)583 void EntryImpl::SetPointerForInvalidEntry(int32_t new_id) {
584   node_.Data()->dirty = new_id;
585   node_.Store();
586 }
587 
LeaveRankingsBehind()588 bool EntryImpl::LeaveRankingsBehind() {
589   return !node_.Data()->contents;
590 }
591 
592 // This only includes checks that relate to the first block of the entry (the
593 // first 256 bytes), and values that should be set from the entry creation.
594 // Basically, even if there is something wrong with this entry, we want to see
595 // if it is possible to load the rankings node and delete them together.
SanityCheck()596 bool EntryImpl::SanityCheck() {
597   if (!entry_.VerifyHash())
598     return false;
599 
600   EntryStore* stored = entry_.Data();
601   if (!stored->rankings_node || stored->key_len <= 0)
602     return false;
603 
604   if (stored->reuse_count < 0 || stored->refetch_count < 0)
605     return false;
606 
607   Addr rankings_addr(stored->rankings_node);
608   if (!rankings_addr.SanityCheckForRankings())
609     return false;
610 
611   Addr next_addr(stored->next);
612   if (next_addr.is_initialized() && !next_addr.SanityCheckForEntry()) {
613     STRESS_NOTREACHED();
614     return false;
615   }
616   STRESS_DCHECK(next_addr.value() != entry_.address().value());
617 
618   if (stored->state > ENTRY_DOOMED || stored->state < ENTRY_NORMAL)
619     return false;
620 
621   Addr key_addr(stored->long_key);
622   if ((stored->key_len <= kMaxInternalKeyLength && key_addr.is_initialized()) ||
623       (stored->key_len > kMaxInternalKeyLength && !key_addr.is_initialized()))
624     return false;
625 
626   if (!key_addr.SanityCheck())
627     return false;
628 
629   if (key_addr.is_initialized() &&
630       ((stored->key_len < kMaxBlockSize && key_addr.is_separate_file()) ||
631        (stored->key_len >= kMaxBlockSize && key_addr.is_block_file())))
632     return false;
633 
634   int num_blocks = NumBlocksForEntry(stored->key_len);
635   if (entry_.address().num_blocks() != num_blocks)
636     return false;
637 
638   return true;
639 }
640 
DataSanityCheck()641 bool EntryImpl::DataSanityCheck() {
642   EntryStore* stored = entry_.Data();
643   Addr key_addr(stored->long_key);
644 
645   // The key must be NULL terminated.
646   if (!key_addr.is_initialized() && stored->key[stored->key_len])
647     return false;
648 
649   if (stored->hash != base::PersistentHash(GetKey()))
650     return false;
651 
652   for (int i = 0; i < kNumStreams; i++) {
653     Addr data_addr(stored->data_addr[i]);
654     int data_size = stored->data_size[i];
655     if (data_size < 0)
656       return false;
657     if (!data_size && data_addr.is_initialized())
658       return false;
659     if (!data_addr.SanityCheck())
660       return false;
661     if (!data_size)
662       continue;
663     if (data_size <= kMaxBlockSize && data_addr.is_separate_file())
664       return false;
665     if (data_size > kMaxBlockSize && data_addr.is_block_file())
666       return false;
667   }
668   return true;
669 }
670 
FixForDelete()671 void EntryImpl::FixForDelete() {
672   EntryStore* stored = entry_.Data();
673   Addr key_addr(stored->long_key);
674 
675   if (!key_addr.is_initialized())
676     stored->key[stored->key_len] = '\0';
677 
678   for (int i = 0; i < kNumStreams; i++) {
679     Addr data_addr(stored->data_addr[i]);
680     int data_size = stored->data_size[i];
681     if (data_addr.is_initialized()) {
682       if ((data_size <= kMaxBlockSize && data_addr.is_separate_file()) ||
683           (data_size > kMaxBlockSize && data_addr.is_block_file()) ||
684           !data_addr.SanityCheck()) {
685         STRESS_NOTREACHED();
686         // The address is weird so don't attempt to delete it.
687         stored->data_addr[i] = 0;
688         // In general, trust the stored size as it should be in sync with the
689         // total size tracked by the backend.
690       }
691     }
692     if (data_size < 0)
693       stored->data_size[i] = 0;
694   }
695   entry_.Store();
696 }
697 
IncrementIoCount()698 void EntryImpl::IncrementIoCount() {
699   backend_->IncrementIoCount();
700 }
701 
DecrementIoCount()702 void EntryImpl::DecrementIoCount() {
703   if (backend_.get())
704     backend_->DecrementIoCount();
705 }
706 
OnEntryCreated(BackendImpl * backend)707 void EntryImpl::OnEntryCreated(BackendImpl* backend) {
708   // Just grab a reference to the backround queue.
709   background_queue_ = backend->GetBackgroundQueue();
710 }
711 
SetTimes(base::Time last_used,base::Time last_modified)712 void EntryImpl::SetTimes(base::Time last_used, base::Time last_modified) {
713   node_.Data()->last_used = last_used.ToInternalValue();
714   node_.Data()->last_modified = last_modified.ToInternalValue();
715   node_.set_modified();
716 }
717 
ReportIOTime(Operation op,const base::TimeTicks & start)718 void EntryImpl::ReportIOTime(Operation op, const base::TimeTicks& start) {
719   if (!backend_.get())
720     return;
721 
722   switch (op) {
723     case kRead:
724       CACHE_UMA(AGE_MS, "ReadTime", 0, start);
725       break;
726     case kWrite:
727       CACHE_UMA(AGE_MS, "WriteTime", 0, start);
728       break;
729     case kSparseRead:
730       CACHE_UMA(AGE_MS, "SparseReadTime", 0, start);
731       break;
732     case kSparseWrite:
733       CACHE_UMA(AGE_MS, "SparseWriteTime", 0, start);
734       break;
735     case kAsyncIO:
736       CACHE_UMA(AGE_MS, "AsyncIOTime", 0, start);
737       break;
738     case kReadAsync1:
739       CACHE_UMA(AGE_MS, "AsyncReadDispatchTime", 0, start);
740       break;
741     case kWriteAsync1:
742       CACHE_UMA(AGE_MS, "AsyncWriteDispatchTime", 0, start);
743       break;
744     default:
745       NOTREACHED();
746   }
747 }
748 
BeginLogging(net::NetLog * net_log,bool created)749 void EntryImpl::BeginLogging(net::NetLog* net_log, bool created) {
750   DCHECK(!net_log_.net_log());
751   net_log_ = net::NetLogWithSource::Make(
752       net_log, net::NetLogSourceType::DISK_CACHE_ENTRY);
753   net_log_.BeginEvent(net::NetLogEventType::DISK_CACHE_ENTRY_IMPL, [&] {
754     return CreateNetLogParametersEntryCreationParams(this, created);
755   });
756 }
757 
net_log() const758 const net::NetLogWithSource& EntryImpl::net_log() const {
759   return net_log_;
760 }
761 
762 // static
NumBlocksForEntry(int key_size)763 int EntryImpl::NumBlocksForEntry(int key_size) {
764   // The longest key that can be stored using one block.
765   int key1_len =
766       static_cast<int>(sizeof(EntryStore) - offsetof(EntryStore, key));
767 
768   if (key_size < key1_len || key_size > kMaxInternalKeyLength)
769     return 1;
770 
771   return ((key_size - key1_len) / 256 + 2);
772 }
773 
774 // ------------------------------------------------------------------------
775 
Doom()776 void EntryImpl::Doom() {
777   if (background_queue_.get())
778     background_queue_->DoomEntryImpl(this);
779 }
780 
Close()781 void EntryImpl::Close() {
782   if (background_queue_.get())
783     background_queue_->CloseEntryImpl(this);
784 }
785 
GetKey() const786 std::string EntryImpl::GetKey() const {
787   CacheEntryBlock* entry = const_cast<CacheEntryBlock*>(&entry_);
788   int key_len = entry->Data()->key_len;
789   if (key_len <= kMaxInternalKeyLength)
790     return std::string(entry->Data()->key, key_len);
791 
792   // We keep a copy of the key so that we can always return it, even if the
793   // backend is disabled.
794   if (!key_.empty())
795     return key_;
796 
797   Addr address(entry->Data()->long_key);
798   DCHECK(address.is_initialized());
799   size_t offset = 0;
800   if (address.is_block_file())
801     offset = address.start_block() * address.BlockSize() + kBlockHeaderSize;
802 
803   static_assert(kNumStreams == kKeyFileIndex, "invalid key index");
804   File* key_file = const_cast<EntryImpl*>(this)->GetBackingFile(address,
805                                                                 kKeyFileIndex);
806   if (!key_file)
807     return std::string();
808 
809   ++key_len;  // We store a trailing \0 on disk.
810   if (!offset && key_file->GetLength() != static_cast<size_t>(key_len))
811     return std::string();
812 
813   // WriteInto will ensure that key_.length() == key_len - 1, and so
814   // key_.c_str()[key_len] will be '\0'. Taking advantage of this, do not
815   // attempt read up to the expected on-disk '\0' --- which would be |key_len|
816   // bytes total --- as if due to a corrupt file it isn't |key_| would get its
817   // internal nul messed up.
818   if (!key_file->Read(base::WriteInto(&key_, key_len), key_len - 1, offset))
819     key_.clear();
820   DCHECK_LE(strlen(key_.data()), static_cast<size_t>(key_len));
821   return key_;
822 }
823 
GetLastUsed() const824 Time EntryImpl::GetLastUsed() const {
825   CacheRankingsBlock* node = const_cast<CacheRankingsBlock*>(&node_);
826   return Time::FromInternalValue(node->Data()->last_used);
827 }
828 
GetLastModified() const829 Time EntryImpl::GetLastModified() const {
830   CacheRankingsBlock* node = const_cast<CacheRankingsBlock*>(&node_);
831   return Time::FromInternalValue(node->Data()->last_modified);
832 }
833 
GetDataSize(int index) const834 int32_t EntryImpl::GetDataSize(int index) const {
835   if (index < 0 || index >= kNumStreams)
836     return 0;
837 
838   CacheEntryBlock* entry = const_cast<CacheEntryBlock*>(&entry_);
839   return entry->Data()->data_size[index];
840 }
841 
ReadData(int index,int offset,IOBuffer * buf,int buf_len,CompletionOnceCallback callback)842 int EntryImpl::ReadData(int index,
843                         int offset,
844                         IOBuffer* buf,
845                         int buf_len,
846                         CompletionOnceCallback callback) {
847   if (callback.is_null())
848     return ReadDataImpl(index, offset, buf, buf_len, std::move(callback));
849 
850   DCHECK(node_.Data()->dirty || read_only_);
851   if (index < 0 || index >= kNumStreams)
852     return net::ERR_INVALID_ARGUMENT;
853 
854   int entry_size = entry_.Data()->data_size[index];
855   if (offset >= entry_size || offset < 0 || !buf_len)
856     return 0;
857 
858   if (buf_len < 0)
859     return net::ERR_INVALID_ARGUMENT;
860 
861   if (!background_queue_.get())
862     return net::ERR_UNEXPECTED;
863 
864   background_queue_->ReadData(this, index, offset, buf, buf_len,
865                               std::move(callback));
866   return net::ERR_IO_PENDING;
867 }
868 
WriteData(int index,int offset,IOBuffer * buf,int buf_len,CompletionOnceCallback callback,bool truncate)869 int EntryImpl::WriteData(int index,
870                          int offset,
871                          IOBuffer* buf,
872                          int buf_len,
873                          CompletionOnceCallback callback,
874                          bool truncate) {
875   if (callback.is_null()) {
876     return WriteDataImpl(index, offset, buf, buf_len, std::move(callback),
877                          truncate);
878   }
879 
880   DCHECK(node_.Data()->dirty || read_only_);
881   if (index < 0 || index >= kNumStreams)
882     return net::ERR_INVALID_ARGUMENT;
883 
884   if (offset < 0 || buf_len < 0)
885     return net::ERR_INVALID_ARGUMENT;
886 
887   if (!background_queue_.get())
888     return net::ERR_UNEXPECTED;
889 
890   background_queue_->WriteData(this, index, offset, buf, buf_len, truncate,
891                                std::move(callback));
892   return net::ERR_IO_PENDING;
893 }
894 
ReadSparseData(int64_t offset,IOBuffer * buf,int buf_len,CompletionOnceCallback callback)895 int EntryImpl::ReadSparseData(int64_t offset,
896                               IOBuffer* buf,
897                               int buf_len,
898                               CompletionOnceCallback callback) {
899   if (callback.is_null())
900     return ReadSparseDataImpl(offset, buf, buf_len, std::move(callback));
901 
902   if (!background_queue_.get())
903     return net::ERR_UNEXPECTED;
904 
905   background_queue_->ReadSparseData(this, offset, buf, buf_len,
906                                     std::move(callback));
907   return net::ERR_IO_PENDING;
908 }
909 
WriteSparseData(int64_t offset,IOBuffer * buf,int buf_len,CompletionOnceCallback callback)910 int EntryImpl::WriteSparseData(int64_t offset,
911                                IOBuffer* buf,
912                                int buf_len,
913                                CompletionOnceCallback callback) {
914   if (callback.is_null())
915     return WriteSparseDataImpl(offset, buf, buf_len, std::move(callback));
916 
917   if (!background_queue_.get())
918     return net::ERR_UNEXPECTED;
919 
920   background_queue_->WriteSparseData(this, offset, buf, buf_len,
921                                      std::move(callback));
922   return net::ERR_IO_PENDING;
923 }
924 
GetAvailableRange(int64_t offset,int len,RangeResultCallback callback)925 RangeResult EntryImpl::GetAvailableRange(int64_t offset,
926                                          int len,
927                                          RangeResultCallback callback) {
928   if (!background_queue_.get())
929     return RangeResult(net::ERR_UNEXPECTED);
930 
931   background_queue_->GetAvailableRange(this, offset, len, std::move(callback));
932   return RangeResult(net::ERR_IO_PENDING);
933 }
934 
CouldBeSparse() const935 bool EntryImpl::CouldBeSparse() const {
936   if (sparse_.get())
937     return true;
938 
939   auto sparse = std::make_unique<SparseControl>(const_cast<EntryImpl*>(this));
940   return sparse->CouldBeSparse();
941 }
942 
CancelSparseIO()943 void EntryImpl::CancelSparseIO() {
944   if (background_queue_.get())
945     background_queue_->CancelSparseIO(this);
946 }
947 
ReadyForSparseIO(CompletionOnceCallback callback)948 net::Error EntryImpl::ReadyForSparseIO(CompletionOnceCallback callback) {
949   if (!sparse_.get())
950     return net::OK;
951 
952   if (!background_queue_.get())
953     return net::ERR_UNEXPECTED;
954 
955   background_queue_->ReadyForSparseIO(this, std::move(callback));
956   return net::ERR_IO_PENDING;
957 }
958 
SetLastUsedTimeForTest(base::Time time)959 void EntryImpl::SetLastUsedTimeForTest(base::Time time) {
960   SetTimes(time, time);
961 }
962 
963 // When an entry is deleted from the cache, we clean up all the data associated
964 // with it for two reasons: to simplify the reuse of the block (we know that any
965 // unused block is filled with zeros), and to simplify the handling of write /
966 // read partial information from an entry (don't have to worry about returning
967 // data related to a previous cache entry because the range was not fully
968 // written before).
~EntryImpl()969 EntryImpl::~EntryImpl() {
970   if (!backend_.get()) {
971     entry_.clear_modified();
972     node_.clear_modified();
973     return;
974   }
975 
976   // Save the sparse info to disk. This will generate IO for this entry and
977   // maybe for a child entry, so it is important to do it before deleting this
978   // entry.
979   sparse_.reset();
980 
981   // Remove this entry from the list of open entries.
982   backend_->OnEntryDestroyBegin(entry_.address());
983 
984   if (doomed_) {
985     DeleteEntryData(true);
986   } else {
987 #if defined(NET_BUILD_STRESS_CACHE)
988     SanityCheck();
989 #endif
990     net_log_.AddEvent(net::NetLogEventType::ENTRY_CLOSE);
991     bool ret = true;
992     for (int index = 0; index < kNumStreams; index++) {
993       if (user_buffers_[index].get()) {
994         ret = Flush(index, 0);
995         if (!ret)
996           LOG(ERROR) << "Failed to save user data";
997       }
998       if (unreported_size_[index]) {
999         backend_->ModifyStorageSize(
1000             entry_.Data()->data_size[index] - unreported_size_[index],
1001             entry_.Data()->data_size[index]);
1002       }
1003     }
1004 
1005     if (!ret) {
1006       // There was a failure writing the actual data. Mark the entry as dirty.
1007       int current_id = backend_->GetCurrentEntryId();
1008       node_.Data()->dirty = current_id == 1 ? -1 : current_id - 1;
1009       node_.Store();
1010     } else if (node_.HasData() && !dirty_ && node_.Data()->dirty) {
1011       node_.Data()->dirty = 0;
1012       node_.Store();
1013     }
1014   }
1015 
1016   net_log_.EndEvent(net::NetLogEventType::DISK_CACHE_ENTRY_IMPL);
1017   backend_->OnEntryDestroyEnd();
1018 }
1019 
1020 // ------------------------------------------------------------------------
1021 
InternalReadData(int index,int offset,IOBuffer * buf,int buf_len,CompletionOnceCallback callback)1022 int EntryImpl::InternalReadData(int index,
1023                                 int offset,
1024                                 IOBuffer* buf,
1025                                 int buf_len,
1026                                 CompletionOnceCallback callback) {
1027   DCHECK(node_.Data()->dirty || read_only_);
1028   DVLOG(2) << "Read from " << index << " at " << offset << " : " << buf_len;
1029   if (index < 0 || index >= kNumStreams)
1030     return net::ERR_INVALID_ARGUMENT;
1031 
1032   int entry_size = entry_.Data()->data_size[index];
1033   if (offset >= entry_size || offset < 0 || !buf_len)
1034     return 0;
1035 
1036   if (buf_len < 0)
1037     return net::ERR_INVALID_ARGUMENT;
1038 
1039   if (!backend_.get())
1040     return net::ERR_UNEXPECTED;
1041 
1042   TimeTicks start = TimeTicks::Now();
1043 
1044   int end_offset;
1045   if (!base::CheckAdd(offset, buf_len).AssignIfValid(&end_offset) ||
1046       end_offset > entry_size)
1047     buf_len = entry_size - offset;
1048 
1049   UpdateRank(false);
1050 
1051   backend_->OnEvent(Stats::READ_DATA);
1052   backend_->OnRead(buf_len);
1053 
1054   Addr address(entry_.Data()->data_addr[index]);
1055   int eof = address.is_initialized() ? entry_size : 0;
1056   if (user_buffers_[index].get() &&
1057       user_buffers_[index]->PreRead(eof, offset, &buf_len)) {
1058     // Complete the operation locally.
1059     buf_len = user_buffers_[index]->Read(offset, buf, buf_len);
1060     ReportIOTime(kRead, start);
1061     return buf_len;
1062   }
1063 
1064   address.set_value(entry_.Data()->data_addr[index]);
1065   if (!address.is_initialized()) {
1066     DoomImpl();
1067     return net::ERR_FAILED;
1068   }
1069 
1070   File* file = GetBackingFile(address, index);
1071   if (!file) {
1072     DoomImpl();
1073     LOG(ERROR) << "No file for " << std::hex << address.value();
1074     return net::ERR_FILE_NOT_FOUND;
1075   }
1076 
1077   size_t file_offset = offset;
1078   if (address.is_block_file()) {
1079     DCHECK_LE(offset + buf_len, kMaxBlockSize);
1080     file_offset += address.start_block() * address.BlockSize() +
1081                    kBlockHeaderSize;
1082   }
1083 
1084   SyncCallback* io_callback = nullptr;
1085   bool null_callback = callback.is_null();
1086   if (!null_callback) {
1087     io_callback =
1088         new SyncCallback(base::WrapRefCounted(this), buf, std::move(callback),
1089                          net::NetLogEventType::ENTRY_READ_DATA);
1090   }
1091 
1092   TimeTicks start_async = TimeTicks::Now();
1093 
1094   bool completed;
1095   if (!file->Read(buf->data(), buf_len, file_offset, io_callback, &completed)) {
1096     if (io_callback)
1097       io_callback->Discard();
1098     DoomImpl();
1099     return net::ERR_CACHE_READ_FAILURE;
1100   }
1101 
1102   if (io_callback && completed)
1103     io_callback->Discard();
1104 
1105   if (io_callback)
1106     ReportIOTime(kReadAsync1, start_async);
1107 
1108   ReportIOTime(kRead, start);
1109   return (completed || null_callback) ? buf_len : net::ERR_IO_PENDING;
1110 }
1111 
InternalWriteData(int index,int offset,IOBuffer * buf,int buf_len,CompletionOnceCallback callback,bool truncate)1112 int EntryImpl::InternalWriteData(int index,
1113                                  int offset,
1114                                  IOBuffer* buf,
1115                                  int buf_len,
1116                                  CompletionOnceCallback callback,
1117                                  bool truncate) {
1118   DCHECK(node_.Data()->dirty || read_only_);
1119   DVLOG(2) << "Write to " << index << " at " << offset << " : " << buf_len;
1120   if (index < 0 || index >= kNumStreams)
1121     return net::ERR_INVALID_ARGUMENT;
1122 
1123   if (offset < 0 || buf_len < 0)
1124     return net::ERR_INVALID_ARGUMENT;
1125 
1126   if (!backend_.get())
1127     return net::ERR_UNEXPECTED;
1128 
1129   int max_file_size = backend_->MaxFileSize();
1130 
1131   int end_offset;
1132   if (offset > max_file_size || buf_len > max_file_size ||
1133       !base::CheckAdd(offset, buf_len).AssignIfValid(&end_offset) ||
1134       end_offset > max_file_size) {
1135     int size = base::CheckAdd(offset, buf_len)
1136                    .ValueOrDefault(std::numeric_limits<int32_t>::max());
1137     backend_->TooMuchStorageRequested(size);
1138     return net::ERR_FAILED;
1139   }
1140 
1141   TimeTicks start = TimeTicks::Now();
1142 
1143   // Read the size at this point (it may change inside prepare).
1144   int entry_size = entry_.Data()->data_size[index];
1145   bool extending = entry_size < offset + buf_len;
1146   truncate = truncate && entry_size > offset + buf_len;
1147   if (!PrepareTarget(index, offset, buf_len, truncate))
1148     return net::ERR_FAILED;
1149 
1150   if (extending || truncate)
1151     UpdateSize(index, entry_size, offset + buf_len);
1152 
1153   UpdateRank(true);
1154 
1155   backend_->OnEvent(Stats::WRITE_DATA);
1156   backend_->OnWrite(buf_len);
1157 
1158   if (user_buffers_[index].get()) {
1159     // Complete the operation locally.
1160     user_buffers_[index]->Write(offset, buf, buf_len);
1161     ReportIOTime(kWrite, start);
1162     return buf_len;
1163   }
1164 
1165   Addr address(entry_.Data()->data_addr[index]);
1166   if (offset + buf_len == 0) {
1167     if (truncate) {
1168       DCHECK(!address.is_initialized());
1169     }
1170     return 0;
1171   }
1172 
1173   File* file = GetBackingFile(address, index);
1174   if (!file)
1175     return net::ERR_FILE_NOT_FOUND;
1176 
1177   size_t file_offset = offset;
1178   if (address.is_block_file()) {
1179     DCHECK_LE(offset + buf_len, kMaxBlockSize);
1180     file_offset += address.start_block() * address.BlockSize() +
1181                    kBlockHeaderSize;
1182   } else if (truncate || (extending && !buf_len)) {
1183     if (!file->SetLength(offset + buf_len))
1184       return net::ERR_FAILED;
1185   }
1186 
1187   if (!buf_len)
1188     return 0;
1189 
1190   SyncCallback* io_callback = nullptr;
1191   bool null_callback = callback.is_null();
1192   if (!null_callback) {
1193     io_callback = new SyncCallback(this, buf, std::move(callback),
1194                                    net::NetLogEventType::ENTRY_WRITE_DATA);
1195   }
1196 
1197   TimeTicks start_async = TimeTicks::Now();
1198 
1199   bool completed;
1200   if (!file->Write(buf->data(), buf_len, file_offset, io_callback,
1201                    &completed)) {
1202     if (io_callback)
1203       io_callback->Discard();
1204     return net::ERR_CACHE_WRITE_FAILURE;
1205   }
1206 
1207   if (io_callback && completed)
1208     io_callback->Discard();
1209 
1210   if (io_callback)
1211     ReportIOTime(kWriteAsync1, start_async);
1212 
1213   ReportIOTime(kWrite, start);
1214   return (completed || null_callback) ? buf_len : net::ERR_IO_PENDING;
1215 }
1216 
1217 // ------------------------------------------------------------------------
1218 
CreateDataBlock(int index,int size)1219 bool EntryImpl::CreateDataBlock(int index, int size) {
1220   DCHECK(index >= 0 && index < kNumStreams);
1221 
1222   Addr address(entry_.Data()->data_addr[index]);
1223   if (!CreateBlock(size, &address))
1224     return false;
1225 
1226   entry_.Data()->data_addr[index] = address.value();
1227   entry_.Store();
1228   return true;
1229 }
1230 
CreateBlock(int size,Addr * address)1231 bool EntryImpl::CreateBlock(int size, Addr* address) {
1232   DCHECK(!address->is_initialized());
1233   if (!backend_.get())
1234     return false;
1235 
1236   FileType file_type = Addr::RequiredFileType(size);
1237   if (EXTERNAL == file_type) {
1238     if (size > backend_->MaxFileSize())
1239       return false;
1240     if (!backend_->CreateExternalFile(address))
1241       return false;
1242   } else {
1243     int num_blocks = Addr::RequiredBlocks(size, file_type);
1244 
1245     if (!backend_->CreateBlock(file_type, num_blocks, address))
1246       return false;
1247   }
1248   return true;
1249 }
1250 
1251 // Note that this method may end up modifying a block file so upon return the
1252 // involved block will be free, and could be reused for something else. If there
1253 // is a crash after that point (and maybe before returning to the caller), the
1254 // entry will be left dirty... and at some point it will be discarded; it is
1255 // important that the entry doesn't keep a reference to this address, or we'll
1256 // end up deleting the contents of |address| once again.
DeleteData(Addr address,int index)1257 void EntryImpl::DeleteData(Addr address, int index) {
1258   DCHECK(backend_.get());
1259   if (!address.is_initialized())
1260     return;
1261   if (address.is_separate_file()) {
1262     int failure = !base::DeleteFile(backend_->GetFileName(address));
1263     CACHE_UMA(COUNTS, "DeleteFailed", 0, failure);
1264     if (failure) {
1265       LOG(ERROR) << "Failed to delete " <<
1266           backend_->GetFileName(address).value() << " from the cache.";
1267     }
1268     if (files_[index].get())
1269       files_[index] = nullptr;  // Releases the object.
1270   } else {
1271     backend_->DeleteBlock(address, true);
1272   }
1273 }
1274 
UpdateRank(bool modified)1275 void EntryImpl::UpdateRank(bool modified) {
1276   if (!backend_.get())
1277     return;
1278 
1279   if (!doomed_) {
1280     // Everything is handled by the backend.
1281     backend_->UpdateRank(this, modified);
1282     return;
1283   }
1284 
1285   Time current = Time::Now();
1286   node_.Data()->last_used = current.ToInternalValue();
1287 
1288   if (modified)
1289     node_.Data()->last_modified = current.ToInternalValue();
1290 }
1291 
GetBackingFile(Addr address,int index)1292 File* EntryImpl::GetBackingFile(Addr address, int index) {
1293   if (!backend_.get())
1294     return nullptr;
1295 
1296   File* file;
1297   if (address.is_separate_file())
1298     file = GetExternalFile(address, index);
1299   else
1300     file = backend_->File(address);
1301   return file;
1302 }
1303 
GetExternalFile(Addr address,int index)1304 File* EntryImpl::GetExternalFile(Addr address, int index) {
1305   DCHECK(index >= 0 && index <= kKeyFileIndex);
1306   if (!files_[index].get()) {
1307     // For a key file, use mixed mode IO.
1308     auto file = base::MakeRefCounted<File>(kKeyFileIndex == index);
1309     if (file->Init(backend_->GetFileName(address)))
1310       files_[index].swap(file);
1311   }
1312   return files_[index].get();
1313 }
1314 
1315 // We keep a memory buffer for everything that ends up stored on a block file
1316 // (because we don't know yet the final data size), and for some of the data
1317 // that end up on external files. This function will initialize that memory
1318 // buffer and / or the files needed to store the data.
1319 //
1320 // In general, a buffer may overlap data already stored on disk, and in that
1321 // case, the contents of the buffer are the most accurate. It may also extend
1322 // the file, but we don't want to read from disk just to keep the buffer up to
1323 // date. This means that as soon as there is a chance to get confused about what
1324 // is the most recent version of some part of a file, we'll flush the buffer and
1325 // reuse it for the new data. Keep in mind that the normal use pattern is quite
1326 // simple (write sequentially from the beginning), so we optimize for handling
1327 // that case.
PrepareTarget(int index,int offset,int buf_len,bool truncate)1328 bool EntryImpl::PrepareTarget(int index, int offset, int buf_len,
1329                               bool truncate) {
1330   if (truncate)
1331     return HandleTruncation(index, offset, buf_len);
1332 
1333   if (!offset && !buf_len)
1334     return true;
1335 
1336   Addr address(entry_.Data()->data_addr[index]);
1337   if (address.is_initialized()) {
1338     if (address.is_block_file() && !MoveToLocalBuffer(index))
1339       return false;
1340 
1341     if (!user_buffers_[index].get() && offset < kMaxBlockSize) {
1342       // We are about to create a buffer for the first 16KB, make sure that we
1343       // preserve existing data.
1344       if (!CopyToLocalBuffer(index))
1345         return false;
1346     }
1347   }
1348 
1349   if (!user_buffers_[index].get())
1350     user_buffers_[index] = std::make_unique<UserBuffer>(backend_.get());
1351 
1352   return PrepareBuffer(index, offset, buf_len);
1353 }
1354 
1355 // We get to this function with some data already stored. If there is a
1356 // truncation that results on data stored internally, we'll explicitly
1357 // handle the case here.
HandleTruncation(int index,int offset,int buf_len)1358 bool EntryImpl::HandleTruncation(int index, int offset, int buf_len) {
1359   Addr address(entry_.Data()->data_addr[index]);
1360 
1361   int current_size = entry_.Data()->data_size[index];
1362   int new_size = offset + buf_len;
1363 
1364   // This is only called when actually truncating the file, not simply when
1365   // truncate = true is passed to WriteData(), which could be growing the file.
1366   DCHECK_LT(new_size, current_size);
1367 
1368   if (new_size == 0) {
1369     // This is by far the most common scenario.
1370     backend_->ModifyStorageSize(current_size - unreported_size_[index], 0);
1371     entry_.Data()->data_addr[index] = 0;
1372     entry_.Data()->data_size[index] = 0;
1373     unreported_size_[index] = 0;
1374     entry_.Store();
1375     DeleteData(address, index);
1376 
1377     user_buffers_[index].reset();
1378     return true;
1379   }
1380 
1381   // We never postpone truncating a file, if there is one, but we may postpone
1382   // telling the backend about the size reduction.
1383   if (user_buffers_[index].get()) {
1384     DCHECK_GE(current_size, user_buffers_[index]->Start());
1385     if (!address.is_initialized()) {
1386       // There is no overlap between the buffer and disk.
1387       if (new_size > user_buffers_[index]->Start()) {
1388         // Truncate our buffer.
1389         DCHECK_LT(new_size, user_buffers_[index]->End());
1390         user_buffers_[index]->Truncate(new_size);
1391 
1392         if (offset < user_buffers_[index]->Start()) {
1393           // Request to write before the current buffer's start, so flush it to
1394           // disk and re-init.
1395           UpdateSize(index, current_size, new_size);
1396           if (!Flush(index, 0))
1397             return false;
1398           return PrepareBuffer(index, offset, buf_len);
1399         } else {
1400           // Can just stick to using the memory buffer.
1401           return true;
1402         }
1403       }
1404 
1405       // Truncated to before the current buffer, so can just discard it.
1406       user_buffers_[index]->Reset();
1407       return PrepareBuffer(index, offset, buf_len);
1408     }
1409 
1410     // There is some overlap or we need to extend the file before the
1411     // truncation.
1412     if (offset > user_buffers_[index]->Start())
1413       user_buffers_[index]->Truncate(new_size);
1414     UpdateSize(index, current_size, new_size);
1415     if (!Flush(index, 0))
1416       return false;
1417     user_buffers_[index].reset();
1418   }
1419 
1420   // We have data somewhere, and it is not in a buffer.
1421   DCHECK(!user_buffers_[index].get());
1422   DCHECK(address.is_initialized());
1423 
1424   if (new_size > kMaxBlockSize)
1425     return true;  // Let the operation go directly to disk.
1426 
1427   return ImportSeparateFile(index, offset + buf_len);
1428 }
1429 
CopyToLocalBuffer(int index)1430 bool EntryImpl::CopyToLocalBuffer(int index) {
1431   Addr address(entry_.Data()->data_addr[index]);
1432   DCHECK(!user_buffers_[index].get());
1433   DCHECK(address.is_initialized());
1434 
1435   int len = std::min(entry_.Data()->data_size[index], kMaxBlockSize);
1436   user_buffers_[index] = std::make_unique<UserBuffer>(backend_.get());
1437   user_buffers_[index]->Write(len, nullptr, 0);
1438 
1439   File* file = GetBackingFile(address, index);
1440   int offset = 0;
1441 
1442   if (address.is_block_file())
1443     offset = address.start_block() * address.BlockSize() + kBlockHeaderSize;
1444 
1445   if (!file || !file->Read(user_buffers_[index]->Data(), len, offset, nullptr,
1446                            nullptr)) {
1447     user_buffers_[index].reset();
1448     return false;
1449   }
1450   return true;
1451 }
1452 
MoveToLocalBuffer(int index)1453 bool EntryImpl::MoveToLocalBuffer(int index) {
1454   if (!CopyToLocalBuffer(index))
1455     return false;
1456 
1457   Addr address(entry_.Data()->data_addr[index]);
1458   entry_.Data()->data_addr[index] = 0;
1459   entry_.Store();
1460   DeleteData(address, index);
1461 
1462   // If we lose this entry we'll see it as zero sized.
1463   int len = entry_.Data()->data_size[index];
1464   backend_->ModifyStorageSize(len - unreported_size_[index], 0);
1465   unreported_size_[index] = len;
1466   return true;
1467 }
1468 
ImportSeparateFile(int index,int new_size)1469 bool EntryImpl::ImportSeparateFile(int index, int new_size) {
1470   if (entry_.Data()->data_size[index] > new_size)
1471     UpdateSize(index, entry_.Data()->data_size[index], new_size);
1472 
1473   return MoveToLocalBuffer(index);
1474 }
1475 
PrepareBuffer(int index,int offset,int buf_len)1476 bool EntryImpl::PrepareBuffer(int index, int offset, int buf_len) {
1477   DCHECK(user_buffers_[index].get());
1478   if ((user_buffers_[index]->End() && offset > user_buffers_[index]->End()) ||
1479       offset > entry_.Data()->data_size[index]) {
1480     // We are about to extend the buffer or the file (with zeros), so make sure
1481     // that we are not overwriting anything.
1482     Addr address(entry_.Data()->data_addr[index]);
1483     if (address.is_initialized() && address.is_separate_file()) {
1484       if (!Flush(index, 0))
1485         return false;
1486       // There is an actual file already, and we don't want to keep track of
1487       // its length so we let this operation go straight to disk.
1488       // The only case when a buffer is allowed to extend the file (as in fill
1489       // with zeros before the start) is when there is no file yet to extend.
1490       user_buffers_[index].reset();
1491       return true;
1492     }
1493   }
1494 
1495   if (!user_buffers_[index]->PreWrite(offset, buf_len)) {
1496     if (!Flush(index, offset + buf_len))
1497       return false;
1498 
1499     // Lets try again.
1500     if (offset > user_buffers_[index]->End() ||
1501         !user_buffers_[index]->PreWrite(offset, buf_len)) {
1502       // We cannot complete the operation with a buffer.
1503       DCHECK(!user_buffers_[index]->Size());
1504       DCHECK(!user_buffers_[index]->Start());
1505       user_buffers_[index].reset();
1506     }
1507   }
1508   return true;
1509 }
1510 
Flush(int index,int min_len)1511 bool EntryImpl::Flush(int index, int min_len) {
1512   Addr address(entry_.Data()->data_addr[index]);
1513   DCHECK(user_buffers_[index].get());
1514   DCHECK(!address.is_initialized() || address.is_separate_file());
1515   DVLOG(3) << "Flush";
1516 
1517   int size = std::max(entry_.Data()->data_size[index], min_len);
1518   if (size && !address.is_initialized() && !CreateDataBlock(index, size))
1519     return false;
1520 
1521   if (!entry_.Data()->data_size[index]) {
1522     DCHECK(!user_buffers_[index]->Size());
1523     return true;
1524   }
1525 
1526   address.set_value(entry_.Data()->data_addr[index]);
1527 
1528   int len = user_buffers_[index]->Size();
1529   int offset = user_buffers_[index]->Start();
1530   if (!len && !offset)
1531     return true;
1532 
1533   if (address.is_block_file()) {
1534     DCHECK_EQ(len, entry_.Data()->data_size[index]);
1535     DCHECK(!offset);
1536     offset = address.start_block() * address.BlockSize() + kBlockHeaderSize;
1537   }
1538 
1539   File* file = GetBackingFile(address, index);
1540   if (!file)
1541     return false;
1542 
1543   if (!file->Write(user_buffers_[index]->Data(), len, offset, nullptr, nullptr))
1544     return false;
1545   user_buffers_[index]->Reset();
1546 
1547   return true;
1548 }
1549 
UpdateSize(int index,int old_size,int new_size)1550 void EntryImpl::UpdateSize(int index, int old_size, int new_size) {
1551   if (entry_.Data()->data_size[index] == new_size)
1552     return;
1553 
1554   unreported_size_[index] += new_size - old_size;
1555   entry_.Data()->data_size[index] = new_size;
1556   entry_.set_modified();
1557 }
1558 
InitSparseData()1559 int EntryImpl::InitSparseData() {
1560   if (sparse_.get())
1561     return net::OK;
1562 
1563   // Use a local variable so that sparse_ never goes from 'valid' to NULL.
1564   auto sparse = std::make_unique<SparseControl>(this);
1565   int result = sparse->Init();
1566   if (net::OK == result)
1567     sparse_.swap(sparse);
1568 
1569   return result;
1570 }
1571 
SetEntryFlags(uint32_t flags)1572 void EntryImpl::SetEntryFlags(uint32_t flags) {
1573   entry_.Data()->flags |= flags;
1574   entry_.set_modified();
1575 }
1576 
GetEntryFlags()1577 uint32_t EntryImpl::GetEntryFlags() {
1578   return entry_.Data()->flags;
1579 }
1580 
GetData(int index,std::unique_ptr<char[]> * buffer,Addr * address)1581 void EntryImpl::GetData(int index,
1582                         std::unique_ptr<char[]>* buffer,
1583                         Addr* address) {
1584   DCHECK(backend_.get());
1585   if (user_buffers_[index].get() && user_buffers_[index]->Size() &&
1586       !user_buffers_[index]->Start()) {
1587     // The data is already in memory, just copy it and we're done.
1588     int data_len = entry_.Data()->data_size[index];
1589     if (data_len <= user_buffers_[index]->Size()) {
1590       DCHECK(!user_buffers_[index]->Start());
1591       *buffer = std::make_unique<char[]>(data_len);
1592       memcpy(buffer->get(), user_buffers_[index]->Data(), data_len);
1593       return;
1594     }
1595   }
1596 
1597   // Bad news: we'd have to read the info from disk so instead we'll just tell
1598   // the caller where to read from.
1599   *buffer = nullptr;
1600   address->set_value(entry_.Data()->data_addr[index]);
1601   if (address->is_initialized()) {
1602     // Prevent us from deleting the block from the backing store.
1603     backend_->ModifyStorageSize(entry_.Data()->data_size[index] -
1604                                     unreported_size_[index], 0);
1605     entry_.Data()->data_addr[index] = 0;
1606     entry_.Data()->data_size[index] = 0;
1607   }
1608 }
1609 
1610 }  // namespace disk_cache
1611 
1612 #undef CACHE_UMA_BACKEND_IMPL_OBJ  // undef for jumbo builds
1613