• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2012 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "net/disk_cache/blockfile/entry_impl.h"
6 
7 #include <limits>
8 #include <memory>
9 
10 #include "base/files/file_util.h"
11 #include "base/hash/hash.h"
12 #include "base/numerics/safe_math.h"
13 #include "base/strings/string_util.h"
14 #include "base/time/time.h"
15 #include "net/base/io_buffer.h"
16 #include "net/base/net_errors.h"
17 #include "net/disk_cache/blockfile/backend_impl.h"
18 #include "net/disk_cache/blockfile/bitmap.h"
19 #include "net/disk_cache/blockfile/disk_format.h"
20 #include "net/disk_cache/blockfile/sparse_control.h"
21 #include "net/disk_cache/cache_util.h"
22 #include "net/disk_cache/net_log_parameters.h"
23 #include "net/log/net_log.h"
24 #include "net/log/net_log_event_type.h"
25 #include "net/log/net_log_source_type.h"
26 
27 using base::Time;
28 using base::TimeTicks;
29 
30 namespace {
31 
32 // Index for the file used to store the key, if any (files_[kKeyFileIndex]).
33 const int kKeyFileIndex = 3;
34 
35 // This class implements FileIOCallback to buffer the callback from a file IO
36 // operation from the actual net class.
37 class SyncCallback: public disk_cache::FileIOCallback {
38  public:
39   // |end_event_type| is the event type to log on completion.  Logs nothing on
40   // discard, or when the NetLog is not set to log all events.
SyncCallback(scoped_refptr<disk_cache::EntryImpl> entry,net::IOBuffer * buffer,net::CompletionOnceCallback callback,net::NetLogEventType end_event_type)41   SyncCallback(scoped_refptr<disk_cache::EntryImpl> entry,
42                net::IOBuffer* buffer,
43                net::CompletionOnceCallback callback,
44                net::NetLogEventType end_event_type)
45       : entry_(std::move(entry)),
46         callback_(std::move(callback)),
47         buf_(buffer),
48         end_event_type_(end_event_type) {
49     entry_->IncrementIoCount();
50   }
51 
52   SyncCallback(const SyncCallback&) = delete;
53   SyncCallback& operator=(const SyncCallback&) = delete;
54 
55   ~SyncCallback() override = default;
56 
57   void OnFileIOComplete(int bytes_copied) override;
58   void Discard();
59 
60  private:
61   scoped_refptr<disk_cache::EntryImpl> entry_;
62   net::CompletionOnceCallback callback_;
63   scoped_refptr<net::IOBuffer> buf_;
64   const net::NetLogEventType end_event_type_;
65 };
66 
OnFileIOComplete(int bytes_copied)67 void SyncCallback::OnFileIOComplete(int bytes_copied) {
68   entry_->DecrementIoCount();
69   if (!callback_.is_null()) {
70     if (entry_->net_log().IsCapturing()) {
71       disk_cache::NetLogReadWriteComplete(entry_->net_log(), end_event_type_,
72                                           net::NetLogEventPhase::END,
73                                           bytes_copied);
74     }
75     buf_ = nullptr;  // Release the buffer before invoking the callback.
76     std::move(callback_).Run(bytes_copied);
77   }
78   delete this;
79 }
80 
Discard()81 void SyncCallback::Discard() {
82   callback_.Reset();
83   buf_ = nullptr;
84   OnFileIOComplete(0);
85 }
86 
87 const int kMaxBufferSize = 1024 * 1024;  // 1 MB.
88 
89 }  // namespace
90 
91 namespace disk_cache {
92 
93 // This class handles individual memory buffers that store data before it is
94 // sent to disk. The buffer can start at any offset, but if we try to write to
95 // anywhere in the first 16KB of the file (kMaxBlockSize), we set the offset to
96 // zero. The buffer grows up to a size determined by the backend, to keep the
97 // total memory used under control.
98 class EntryImpl::UserBuffer {
99  public:
UserBuffer(BackendImpl * backend)100   explicit UserBuffer(BackendImpl* backend) : backend_(backend->GetWeakPtr()) {
101     buffer_.reserve(kMaxBlockSize);
102   }
103 
104   UserBuffer(const UserBuffer&) = delete;
105   UserBuffer& operator=(const UserBuffer&) = delete;
106 
~UserBuffer()107   ~UserBuffer() {
108     if (backend_.get())
109       backend_->BufferDeleted(capacity() - kMaxBlockSize);
110   }
111 
112   // Returns true if we can handle writing |len| bytes to |offset|.
113   bool PreWrite(int offset, int len);
114 
115   // Truncates the buffer to |offset| bytes.
116   void Truncate(int offset);
117 
118   // Writes |len| bytes from |buf| at the given |offset|.
119   void Write(int offset, IOBuffer* buf, int len);
120 
121   // Returns true if we can read |len| bytes from |offset|, given that the
122   // actual file has |eof| bytes stored. Note that the number of bytes to read
123   // may be modified by this method even though it returns false: that means we
124   // should do a smaller read from disk.
125   bool PreRead(int eof, int offset, int* len);
126 
127   // Read |len| bytes from |buf| at the given |offset|.
128   int Read(int offset, IOBuffer* buf, int len);
129 
130   // Prepare this buffer for reuse.
131   void Reset();
132 
Data()133   char* Data() { return buffer_.data(); }
Size()134   int Size() { return static_cast<int>(buffer_.size()); }
Start()135   int Start() { return offset_; }
End()136   int End() { return offset_ + Size(); }
137 
138  private:
capacity()139   int capacity() { return static_cast<int>(buffer_.capacity()); }
140   bool GrowBuffer(int required, int limit);
141 
142   base::WeakPtr<BackendImpl> backend_;
143   int offset_ = 0;
144   std::vector<char> buffer_;
145   bool grow_allowed_ = true;
146 };
147 
PreWrite(int offset,int len)148 bool EntryImpl::UserBuffer::PreWrite(int offset, int len) {
149   DCHECK_GE(offset, 0);
150   DCHECK_GE(len, 0);
151   DCHECK_GE(offset + len, 0);
152 
153   // We don't want to write before our current start.
154   if (offset < offset_)
155     return false;
156 
157   // Lets get the common case out of the way.
158   if (offset + len <= capacity())
159     return true;
160 
161   // If we are writing to the first 16K (kMaxBlockSize), we want to keep the
162   // buffer offset_ at 0.
163   if (!Size() && offset > kMaxBlockSize)
164     return GrowBuffer(len, kMaxBufferSize);
165 
166   int required = offset - offset_ + len;
167   return GrowBuffer(required, kMaxBufferSize * 6 / 5);
168 }
169 
Truncate(int offset)170 void EntryImpl::UserBuffer::Truncate(int offset) {
171   DCHECK_GE(offset, 0);
172   DCHECK_GE(offset, offset_);
173   DVLOG(3) << "Buffer truncate at " << offset << " current " << offset_;
174 
175   offset -= offset_;
176   if (Size() >= offset)
177     buffer_.resize(offset);
178 }
179 
Write(int offset,IOBuffer * buf,int len)180 void EntryImpl::UserBuffer::Write(int offset, IOBuffer* buf, int len) {
181   DCHECK_GE(offset, 0);
182   DCHECK_GE(len, 0);
183   DCHECK_GE(offset + len, 0);
184 
185   // 0-length writes that don't extend can just be ignored here, and are safe
186   // even if they're are before offset_, as truncates are handled elsewhere.
187   if (len == 0 && offset < End())
188     return;
189 
190   DCHECK_GE(offset, offset_);
191   DVLOG(3) << "Buffer write at " << offset << " current " << offset_;
192 
193   if (!Size() && offset > kMaxBlockSize)
194     offset_ = offset;
195 
196   offset -= offset_;
197 
198   if (offset > Size())
199     buffer_.resize(offset);
200 
201   if (!len)
202     return;
203 
204   char* buffer = buf->data();
205   int valid_len = Size() - offset;
206   int copy_len = std::min(valid_len, len);
207   if (copy_len) {
208     memcpy(&buffer_[offset], buffer, copy_len);
209     len -= copy_len;
210     buffer += copy_len;
211   }
212   if (!len)
213     return;
214 
215   buffer_.insert(buffer_.end(), buffer, buffer + len);
216 }
217 
PreRead(int eof,int offset,int * len)218 bool EntryImpl::UserBuffer::PreRead(int eof, int offset, int* len) {
219   DCHECK_GE(offset, 0);
220   DCHECK_GT(*len, 0);
221 
222   if (offset < offset_) {
223     // We are reading before this buffer.
224     if (offset >= eof)
225       return true;
226 
227     // If the read overlaps with the buffer, change its length so that there is
228     // no overlap.
229     *len = std::min(*len, offset_ - offset);
230     *len = std::min(*len, eof - offset);
231 
232     // We should read from disk.
233     return false;
234   }
235 
236   if (!Size())
237     return false;
238 
239   // See if we can fulfill the first part of the operation.
240   return (offset - offset_ < Size());
241 }
242 
Read(int offset,IOBuffer * buf,int len)243 int EntryImpl::UserBuffer::Read(int offset, IOBuffer* buf, int len) {
244   DCHECK_GE(offset, 0);
245   DCHECK_GT(len, 0);
246   DCHECK(Size() || offset < offset_);
247 
248   int clean_bytes = 0;
249   if (offset < offset_) {
250     // We don't have a file so lets fill the first part with 0.
251     clean_bytes = std::min(offset_ - offset, len);
252     memset(buf->data(), 0, clean_bytes);
253     if (len == clean_bytes)
254       return len;
255     offset = offset_;
256     len -= clean_bytes;
257   }
258 
259   int start = offset - offset_;
260   int available = Size() - start;
261   DCHECK_GE(start, 0);
262   DCHECK_GE(available, 0);
263   len = std::min(len, available);
264   memcpy(buf->data() + clean_bytes, &buffer_[start], len);
265   return len + clean_bytes;
266 }
267 
Reset()268 void EntryImpl::UserBuffer::Reset() {
269   if (!grow_allowed_) {
270     if (backend_.get())
271       backend_->BufferDeleted(capacity() - kMaxBlockSize);
272     grow_allowed_ = true;
273     std::vector<char> tmp;
274     buffer_.swap(tmp);
275     buffer_.reserve(kMaxBlockSize);
276   }
277   offset_ = 0;
278   buffer_.clear();
279 }
280 
GrowBuffer(int required,int limit)281 bool EntryImpl::UserBuffer::GrowBuffer(int required, int limit) {
282   DCHECK_GE(required, 0);
283   int current_size = capacity();
284   if (required <= current_size)
285     return true;
286 
287   if (required > limit)
288     return false;
289 
290   if (!backend_.get())
291     return false;
292 
293   int to_add = std::max(required - current_size, kMaxBlockSize * 4);
294   to_add = std::max(current_size, to_add);
295   required = std::min(current_size + to_add, limit);
296 
297   grow_allowed_ = backend_->IsAllocAllowed(current_size, required);
298   if (!grow_allowed_)
299     return false;
300 
301   DVLOG(3) << "Buffer grow to " << required;
302 
303   buffer_.reserve(required);
304   return true;
305 }
306 
307 // ------------------------------------------------------------------------
308 
EntryImpl(BackendImpl * backend,Addr address,bool read_only)309 EntryImpl::EntryImpl(BackendImpl* backend, Addr address, bool read_only)
310     : entry_(nullptr, Addr(0)),
311       node_(nullptr, Addr(0)),
312       backend_(backend->GetWeakPtr()),
313       read_only_(read_only) {
314   entry_.LazyInit(backend->File(address), address);
315 }
316 
DoomImpl()317 void EntryImpl::DoomImpl() {
318   if (doomed_ || !backend_.get())
319     return;
320 
321   SetPointerForInvalidEntry(backend_->GetCurrentEntryId());
322   backend_->InternalDoomEntry(this);
323 }
324 
ReadDataImpl(int index,int offset,IOBuffer * buf,int buf_len,CompletionOnceCallback callback)325 int EntryImpl::ReadDataImpl(int index,
326                             int offset,
327                             IOBuffer* buf,
328                             int buf_len,
329                             CompletionOnceCallback callback) {
330   if (net_log_.IsCapturing()) {
331     NetLogReadWriteData(net_log_, net::NetLogEventType::ENTRY_READ_DATA,
332                         net::NetLogEventPhase::BEGIN, index, offset, buf_len,
333                         false);
334   }
335 
336   int result =
337       InternalReadData(index, offset, buf, buf_len, std::move(callback));
338 
339   if (result != net::ERR_IO_PENDING && net_log_.IsCapturing()) {
340     NetLogReadWriteComplete(net_log_, net::NetLogEventType::ENTRY_READ_DATA,
341                             net::NetLogEventPhase::END, result);
342   }
343   return result;
344 }
345 
WriteDataImpl(int index,int offset,IOBuffer * buf,int buf_len,CompletionOnceCallback callback,bool truncate)346 int EntryImpl::WriteDataImpl(int index,
347                              int offset,
348                              IOBuffer* buf,
349                              int buf_len,
350                              CompletionOnceCallback callback,
351                              bool truncate) {
352   if (net_log_.IsCapturing()) {
353     NetLogReadWriteData(net_log_, net::NetLogEventType::ENTRY_WRITE_DATA,
354                         net::NetLogEventPhase::BEGIN, index, offset, buf_len,
355                         truncate);
356   }
357 
358   int result = InternalWriteData(index, offset, buf, buf_len,
359                                  std::move(callback), truncate);
360 
361   if (result != net::ERR_IO_PENDING && net_log_.IsCapturing()) {
362     NetLogReadWriteComplete(net_log_, net::NetLogEventType::ENTRY_WRITE_DATA,
363                             net::NetLogEventPhase::END, result);
364   }
365   return result;
366 }
367 
ReadSparseDataImpl(int64_t offset,IOBuffer * buf,int buf_len,CompletionOnceCallback callback)368 int EntryImpl::ReadSparseDataImpl(int64_t offset,
369                                   IOBuffer* buf,
370                                   int buf_len,
371                                   CompletionOnceCallback callback) {
372   DCHECK(node_.Data()->dirty || read_only_);
373   int result = InitSparseData();
374   if (net::OK != result)
375     return result;
376 
377   result = sparse_->StartIO(SparseControl::kReadOperation, offset, buf, buf_len,
378                             std::move(callback));
379   return result;
380 }
381 
WriteSparseDataImpl(int64_t offset,IOBuffer * buf,int buf_len,CompletionOnceCallback callback)382 int EntryImpl::WriteSparseDataImpl(int64_t offset,
383                                    IOBuffer* buf,
384                                    int buf_len,
385                                    CompletionOnceCallback callback) {
386   DCHECK(node_.Data()->dirty || read_only_);
387   int result = InitSparseData();
388   if (net::OK != result)
389     return result;
390 
391   result = sparse_->StartIO(SparseControl::kWriteOperation, offset, buf,
392                             buf_len, std::move(callback));
393   return result;
394 }
395 
GetAvailableRangeImpl(int64_t offset,int len)396 RangeResult EntryImpl::GetAvailableRangeImpl(int64_t offset, int len) {
397   int result = InitSparseData();
398   if (net::OK != result)
399     return RangeResult(static_cast<net::Error>(result));
400 
401   return sparse_->GetAvailableRange(offset, len);
402 }
403 
CancelSparseIOImpl()404 void EntryImpl::CancelSparseIOImpl() {
405   if (!sparse_.get())
406     return;
407 
408   sparse_->CancelIO();
409 }
410 
ReadyForSparseIOImpl(CompletionOnceCallback callback)411 int EntryImpl::ReadyForSparseIOImpl(CompletionOnceCallback callback) {
412   DCHECK(sparse_.get());
413   return sparse_->ReadyToUse(std::move(callback));
414 }
415 
GetHash()416 uint32_t EntryImpl::GetHash() {
417   return entry_.Data()->hash;
418 }
419 
CreateEntry(Addr node_address,const std::string & key,uint32_t hash)420 bool EntryImpl::CreateEntry(Addr node_address,
421                             const std::string& key,
422                             uint32_t hash) {
423   EntryStore* entry_store = entry_.Data();
424   RankingsNode* node = node_.Data();
425   memset(entry_store, 0, sizeof(EntryStore) * entry_.address().num_blocks());
426   memset(node, 0, sizeof(RankingsNode));
427   if (!node_.LazyInit(backend_->File(node_address), node_address))
428     return false;
429 
430   entry_store->rankings_node = node_address.value();
431   node->contents = entry_.address().value();
432 
433   entry_store->hash = hash;
434   entry_store->creation_time = Time::Now().ToInternalValue();
435   entry_store->key_len = static_cast<int32_t>(key.size());
436   if (entry_store->key_len > kMaxInternalKeyLength) {
437     Addr address(0);
438     if (!CreateBlock(entry_store->key_len + 1, &address))
439       return false;
440 
441     entry_store->long_key = address.value();
442     File* key_file = GetBackingFile(address, kKeyFileIndex);
443     key_ = key;
444 
445     size_t offset = 0;
446     if (address.is_block_file())
447       offset = address.start_block() * address.BlockSize() + kBlockHeaderSize;
448 
449     if (!key_file || !key_file->Write(key.data(), key.size() + 1, offset)) {
450       DeleteData(address, kKeyFileIndex);
451       return false;
452     }
453 
454     if (address.is_separate_file())
455       key_file->SetLength(key.size() + 1);
456   } else {
457     memcpy(entry_store->key, key.data(), key.size());
458     entry_store->key[key.size()] = '\0';
459   }
460   backend_->ModifyStorageSize(0, static_cast<int32_t>(key.size()));
461   node->dirty = backend_->GetCurrentEntryId();
462   return true;
463 }
464 
IsSameEntry(const std::string & key,uint32_t hash)465 bool EntryImpl::IsSameEntry(const std::string& key, uint32_t hash) {
466   if (entry_.Data()->hash != hash ||
467       static_cast<size_t>(entry_.Data()->key_len) != key.size())
468     return false;
469 
470   return (key.compare(GetKey()) == 0);
471 }
472 
InternalDoom()473 void EntryImpl::InternalDoom() {
474   net_log_.AddEvent(net::NetLogEventType::ENTRY_DOOM);
475   DCHECK(node_.HasData());
476   if (!node_.Data()->dirty) {
477     node_.Data()->dirty = backend_->GetCurrentEntryId();
478     node_.Store();
479   }
480   doomed_ = true;
481 }
482 
DeleteEntryData(bool everything)483 void EntryImpl::DeleteEntryData(bool everything) {
484   DCHECK(doomed_ || !everything);
485 
486   if (GetEntryFlags() & PARENT_ENTRY) {
487     // We have some child entries that must go away.
488     SparseControl::DeleteChildren(this);
489   }
490 
491   for (int index = 0; index < kNumStreams; index++) {
492     Addr address(entry_.Data()->data_addr[index]);
493     if (address.is_initialized()) {
494       backend_->ModifyStorageSize(entry_.Data()->data_size[index] -
495                                       unreported_size_[index], 0);
496       entry_.Data()->data_addr[index] = 0;
497       entry_.Data()->data_size[index] = 0;
498       entry_.Store();
499       DeleteData(address, index);
500     }
501   }
502 
503   if (!everything)
504     return;
505 
506   // Remove all traces of this entry.
507   backend_->RemoveEntry(this);
508 
509   // Note that at this point node_ and entry_ are just two blocks of data, and
510   // even if they reference each other, nobody should be referencing them.
511 
512   Addr address(entry_.Data()->long_key);
513   DeleteData(address, kKeyFileIndex);
514   backend_->ModifyStorageSize(entry_.Data()->key_len, 0);
515 
516   backend_->DeleteBlock(entry_.address(), true);
517   entry_.Discard();
518 
519   if (!LeaveRankingsBehind()) {
520     backend_->DeleteBlock(node_.address(), true);
521     node_.Discard();
522   }
523 }
524 
GetNextAddress()525 CacheAddr EntryImpl::GetNextAddress() {
526   return entry_.Data()->next;
527 }
528 
SetNextAddress(Addr address)529 void EntryImpl::SetNextAddress(Addr address) {
530   DCHECK_NE(address.value(), entry_.address().value());
531   entry_.Data()->next = address.value();
532   bool success = entry_.Store();
533   DCHECK(success);
534 }
535 
LoadNodeAddress()536 bool EntryImpl::LoadNodeAddress() {
537   Addr address(entry_.Data()->rankings_node);
538   if (!node_.LazyInit(backend_->File(address), address))
539     return false;
540   return node_.Load();
541 }
542 
Update()543 bool EntryImpl::Update() {
544   DCHECK(node_.HasData());
545 
546   if (read_only_)
547     return true;
548 
549   RankingsNode* rankings = node_.Data();
550   if (!rankings->dirty) {
551     rankings->dirty = backend_->GetCurrentEntryId();
552     if (!node_.Store())
553       return false;
554   }
555   return true;
556 }
557 
SetDirtyFlag(int32_t current_id)558 void EntryImpl::SetDirtyFlag(int32_t current_id) {
559   DCHECK(node_.HasData());
560   if (node_.Data()->dirty && current_id != node_.Data()->dirty)
561     dirty_ = true;
562 
563   if (!current_id)
564     dirty_ = true;
565 }
566 
SetPointerForInvalidEntry(int32_t new_id)567 void EntryImpl::SetPointerForInvalidEntry(int32_t new_id) {
568   node_.Data()->dirty = new_id;
569   node_.Store();
570 }
571 
LeaveRankingsBehind()572 bool EntryImpl::LeaveRankingsBehind() {
573   return !node_.Data()->contents;
574 }
575 
576 // This only includes checks that relate to the first block of the entry (the
577 // first 256 bytes), and values that should be set from the entry creation.
578 // Basically, even if there is something wrong with this entry, we want to see
579 // if it is possible to load the rankings node and delete them together.
SanityCheck()580 bool EntryImpl::SanityCheck() {
581   if (!entry_.VerifyHash())
582     return false;
583 
584   EntryStore* stored = entry_.Data();
585   if (!stored->rankings_node || stored->key_len <= 0)
586     return false;
587 
588   if (stored->reuse_count < 0 || stored->refetch_count < 0)
589     return false;
590 
591   Addr rankings_addr(stored->rankings_node);
592   if (!rankings_addr.SanityCheckForRankings())
593     return false;
594 
595   Addr next_addr(stored->next);
596   if (next_addr.is_initialized() && !next_addr.SanityCheckForEntry()) {
597     STRESS_NOTREACHED();
598     return false;
599   }
600   STRESS_DCHECK(next_addr.value() != entry_.address().value());
601 
602   if (stored->state > ENTRY_DOOMED || stored->state < ENTRY_NORMAL)
603     return false;
604 
605   Addr key_addr(stored->long_key);
606   if ((stored->key_len <= kMaxInternalKeyLength && key_addr.is_initialized()) ||
607       (stored->key_len > kMaxInternalKeyLength && !key_addr.is_initialized()))
608     return false;
609 
610   if (!key_addr.SanityCheck())
611     return false;
612 
613   if (key_addr.is_initialized() &&
614       ((stored->key_len < kMaxBlockSize && key_addr.is_separate_file()) ||
615        (stored->key_len >= kMaxBlockSize && key_addr.is_block_file())))
616     return false;
617 
618   int num_blocks = NumBlocksForEntry(stored->key_len);
619   if (entry_.address().num_blocks() != num_blocks)
620     return false;
621 
622   return true;
623 }
624 
DataSanityCheck()625 bool EntryImpl::DataSanityCheck() {
626   EntryStore* stored = entry_.Data();
627   Addr key_addr(stored->long_key);
628 
629   // The key must be NULL terminated.
630   if (!key_addr.is_initialized() && stored->key[stored->key_len])
631     return false;
632 
633   if (stored->hash != base::PersistentHash(GetKey()))
634     return false;
635 
636   for (int i = 0; i < kNumStreams; i++) {
637     Addr data_addr(stored->data_addr[i]);
638     int data_size = stored->data_size[i];
639     if (data_size < 0)
640       return false;
641     if (!data_size && data_addr.is_initialized())
642       return false;
643     if (!data_addr.SanityCheck())
644       return false;
645     if (!data_size)
646       continue;
647     if (data_size <= kMaxBlockSize && data_addr.is_separate_file())
648       return false;
649     if (data_size > kMaxBlockSize && data_addr.is_block_file())
650       return false;
651   }
652   return true;
653 }
654 
FixForDelete()655 void EntryImpl::FixForDelete() {
656   EntryStore* stored = entry_.Data();
657   Addr key_addr(stored->long_key);
658 
659   if (!key_addr.is_initialized())
660     stored->key[stored->key_len] = '\0';
661 
662   for (int i = 0; i < kNumStreams; i++) {
663     Addr data_addr(stored->data_addr[i]);
664     int data_size = stored->data_size[i];
665     if (data_addr.is_initialized()) {
666       if ((data_size <= kMaxBlockSize && data_addr.is_separate_file()) ||
667           (data_size > kMaxBlockSize && data_addr.is_block_file()) ||
668           !data_addr.SanityCheck()) {
669         STRESS_NOTREACHED();
670         // The address is weird so don't attempt to delete it.
671         stored->data_addr[i] = 0;
672         // In general, trust the stored size as it should be in sync with the
673         // total size tracked by the backend.
674       }
675     }
676     if (data_size < 0)
677       stored->data_size[i] = 0;
678   }
679   entry_.Store();
680 }
681 
IncrementIoCount()682 void EntryImpl::IncrementIoCount() {
683   backend_->IncrementIoCount();
684 }
685 
DecrementIoCount()686 void EntryImpl::DecrementIoCount() {
687   if (backend_.get())
688     backend_->DecrementIoCount();
689 }
690 
OnEntryCreated(BackendImpl * backend)691 void EntryImpl::OnEntryCreated(BackendImpl* backend) {
692   // Just grab a reference to the backround queue.
693   background_queue_ = backend->GetBackgroundQueue();
694 }
695 
SetTimes(base::Time last_used,base::Time last_modified)696 void EntryImpl::SetTimes(base::Time last_used, base::Time last_modified) {
697   node_.Data()->last_used = last_used.ToInternalValue();
698   node_.Data()->last_modified = last_modified.ToInternalValue();
699   node_.set_modified();
700 }
701 
BeginLogging(net::NetLog * net_log,bool created)702 void EntryImpl::BeginLogging(net::NetLog* net_log, bool created) {
703   DCHECK(!net_log_.net_log());
704   net_log_ = net::NetLogWithSource::Make(
705       net_log, net::NetLogSourceType::DISK_CACHE_ENTRY);
706   net_log_.BeginEvent(net::NetLogEventType::DISK_CACHE_ENTRY_IMPL, [&] {
707     return CreateNetLogParametersEntryCreationParams(this, created);
708   });
709 }
710 
net_log() const711 const net::NetLogWithSource& EntryImpl::net_log() const {
712   return net_log_;
713 }
714 
715 // static
NumBlocksForEntry(int key_size)716 int EntryImpl::NumBlocksForEntry(int key_size) {
717   // The longest key that can be stored using one block.
718   int key1_len =
719       static_cast<int>(sizeof(EntryStore) - offsetof(EntryStore, key));
720 
721   if (key_size < key1_len || key_size > kMaxInternalKeyLength)
722     return 1;
723 
724   return ((key_size - key1_len) / 256 + 2);
725 }
726 
727 // ------------------------------------------------------------------------
728 
Doom()729 void EntryImpl::Doom() {
730   if (background_queue_.get())
731     background_queue_->DoomEntryImpl(this);
732 }
733 
Close()734 void EntryImpl::Close() {
735   if (background_queue_.get())
736     background_queue_->CloseEntryImpl(this);
737 }
738 
GetKey() const739 std::string EntryImpl::GetKey() const {
740   CacheEntryBlock* entry = const_cast<CacheEntryBlock*>(&entry_);
741   int key_len = entry->Data()->key_len;
742   if (key_len <= kMaxInternalKeyLength)
743     return std::string(entry->Data()->key, key_len);
744 
745   // We keep a copy of the key so that we can always return it, even if the
746   // backend is disabled.
747   if (!key_.empty())
748     return key_;
749 
750   Addr address(entry->Data()->long_key);
751   DCHECK(address.is_initialized());
752   size_t offset = 0;
753   if (address.is_block_file())
754     offset = address.start_block() * address.BlockSize() + kBlockHeaderSize;
755 
756   static_assert(kNumStreams == kKeyFileIndex, "invalid key index");
757   File* key_file = const_cast<EntryImpl*>(this)->GetBackingFile(address,
758                                                                 kKeyFileIndex);
759   if (!key_file)
760     return std::string();
761 
762   ++key_len;  // We store a trailing \0 on disk.
763   if (!offset && key_file->GetLength() != static_cast<size_t>(key_len))
764     return std::string();
765 
766   // WriteInto will ensure that key_.length() == key_len - 1, and so
767   // key_.c_str()[key_len] will be '\0'. Taking advantage of this, do not
768   // attempt read up to the expected on-disk '\0' --- which would be |key_len|
769   // bytes total --- as if due to a corrupt file it isn't |key_| would get its
770   // internal nul messed up.
771   if (!key_file->Read(base::WriteInto(&key_, key_len), key_len - 1, offset))
772     key_.clear();
773   DCHECK_LE(strlen(key_.data()), static_cast<size_t>(key_len));
774   return key_;
775 }
776 
GetLastUsed() const777 Time EntryImpl::GetLastUsed() const {
778   CacheRankingsBlock* node = const_cast<CacheRankingsBlock*>(&node_);
779   return Time::FromInternalValue(node->Data()->last_used);
780 }
781 
GetLastModified() const782 Time EntryImpl::GetLastModified() const {
783   CacheRankingsBlock* node = const_cast<CacheRankingsBlock*>(&node_);
784   return Time::FromInternalValue(node->Data()->last_modified);
785 }
786 
GetDataSize(int index) const787 int32_t EntryImpl::GetDataSize(int index) const {
788   if (index < 0 || index >= kNumStreams)
789     return 0;
790 
791   CacheEntryBlock* entry = const_cast<CacheEntryBlock*>(&entry_);
792   return entry->Data()->data_size[index];
793 }
794 
ReadData(int index,int offset,IOBuffer * buf,int buf_len,CompletionOnceCallback callback)795 int EntryImpl::ReadData(int index,
796                         int offset,
797                         IOBuffer* buf,
798                         int buf_len,
799                         CompletionOnceCallback callback) {
800   if (callback.is_null())
801     return ReadDataImpl(index, offset, buf, buf_len, std::move(callback));
802 
803   DCHECK(node_.Data()->dirty || read_only_);
804   if (index < 0 || index >= kNumStreams)
805     return net::ERR_INVALID_ARGUMENT;
806 
807   int entry_size = entry_.Data()->data_size[index];
808   if (offset >= entry_size || offset < 0 || !buf_len)
809     return 0;
810 
811   if (buf_len < 0)
812     return net::ERR_INVALID_ARGUMENT;
813 
814   if (!background_queue_.get())
815     return net::ERR_UNEXPECTED;
816 
817   background_queue_->ReadData(this, index, offset, buf, buf_len,
818                               std::move(callback));
819   return net::ERR_IO_PENDING;
820 }
821 
WriteData(int index,int offset,IOBuffer * buf,int buf_len,CompletionOnceCallback callback,bool truncate)822 int EntryImpl::WriteData(int index,
823                          int offset,
824                          IOBuffer* buf,
825                          int buf_len,
826                          CompletionOnceCallback callback,
827                          bool truncate) {
828   if (callback.is_null()) {
829     return WriteDataImpl(index, offset, buf, buf_len, std::move(callback),
830                          truncate);
831   }
832 
833   DCHECK(node_.Data()->dirty || read_only_);
834   if (index < 0 || index >= kNumStreams)
835     return net::ERR_INVALID_ARGUMENT;
836 
837   if (offset < 0 || buf_len < 0)
838     return net::ERR_INVALID_ARGUMENT;
839 
840   if (!background_queue_.get())
841     return net::ERR_UNEXPECTED;
842 
843   background_queue_->WriteData(this, index, offset, buf, buf_len, truncate,
844                                std::move(callback));
845   return net::ERR_IO_PENDING;
846 }
847 
ReadSparseData(int64_t offset,IOBuffer * buf,int buf_len,CompletionOnceCallback callback)848 int EntryImpl::ReadSparseData(int64_t offset,
849                               IOBuffer* buf,
850                               int buf_len,
851                               CompletionOnceCallback callback) {
852   if (callback.is_null())
853     return ReadSparseDataImpl(offset, buf, buf_len, std::move(callback));
854 
855   if (!background_queue_.get())
856     return net::ERR_UNEXPECTED;
857 
858   background_queue_->ReadSparseData(this, offset, buf, buf_len,
859                                     std::move(callback));
860   return net::ERR_IO_PENDING;
861 }
862 
WriteSparseData(int64_t offset,IOBuffer * buf,int buf_len,CompletionOnceCallback callback)863 int EntryImpl::WriteSparseData(int64_t offset,
864                                IOBuffer* buf,
865                                int buf_len,
866                                CompletionOnceCallback callback) {
867   if (callback.is_null())
868     return WriteSparseDataImpl(offset, buf, buf_len, std::move(callback));
869 
870   if (!background_queue_.get())
871     return net::ERR_UNEXPECTED;
872 
873   background_queue_->WriteSparseData(this, offset, buf, buf_len,
874                                      std::move(callback));
875   return net::ERR_IO_PENDING;
876 }
877 
GetAvailableRange(int64_t offset,int len,RangeResultCallback callback)878 RangeResult EntryImpl::GetAvailableRange(int64_t offset,
879                                          int len,
880                                          RangeResultCallback callback) {
881   if (!background_queue_.get())
882     return RangeResult(net::ERR_UNEXPECTED);
883 
884   background_queue_->GetAvailableRange(this, offset, len, std::move(callback));
885   return RangeResult(net::ERR_IO_PENDING);
886 }
887 
CouldBeSparse() const888 bool EntryImpl::CouldBeSparse() const {
889   if (sparse_.get())
890     return true;
891 
892   auto sparse = std::make_unique<SparseControl>(const_cast<EntryImpl*>(this));
893   return sparse->CouldBeSparse();
894 }
895 
CancelSparseIO()896 void EntryImpl::CancelSparseIO() {
897   if (background_queue_.get())
898     background_queue_->CancelSparseIO(this);
899 }
900 
ReadyForSparseIO(CompletionOnceCallback callback)901 net::Error EntryImpl::ReadyForSparseIO(CompletionOnceCallback callback) {
902   if (!sparse_.get())
903     return net::OK;
904 
905   if (!background_queue_.get())
906     return net::ERR_UNEXPECTED;
907 
908   background_queue_->ReadyForSparseIO(this, std::move(callback));
909   return net::ERR_IO_PENDING;
910 }
911 
SetLastUsedTimeForTest(base::Time time)912 void EntryImpl::SetLastUsedTimeForTest(base::Time time) {
913   SetTimes(time, time);
914 }
915 
916 // When an entry is deleted from the cache, we clean up all the data associated
917 // with it for two reasons: to simplify the reuse of the block (we know that any
918 // unused block is filled with zeros), and to simplify the handling of write /
919 // read partial information from an entry (don't have to worry about returning
920 // data related to a previous cache entry because the range was not fully
921 // written before).
~EntryImpl()922 EntryImpl::~EntryImpl() {
923   if (!backend_.get()) {
924     entry_.clear_modified();
925     node_.clear_modified();
926     return;
927   }
928 
929   // Save the sparse info to disk. This will generate IO for this entry and
930   // maybe for a child entry, so it is important to do it before deleting this
931   // entry.
932   sparse_.reset();
933 
934   // Remove this entry from the list of open entries.
935   backend_->OnEntryDestroyBegin(entry_.address());
936 
937   if (doomed_) {
938     DeleteEntryData(true);
939   } else {
940 #if defined(NET_BUILD_STRESS_CACHE)
941     SanityCheck();
942 #endif
943     net_log_.AddEvent(net::NetLogEventType::ENTRY_CLOSE);
944     bool ret = true;
945     for (int index = 0; index < kNumStreams; index++) {
946       if (user_buffers_[index].get()) {
947         ret = Flush(index, 0);
948         if (!ret)
949           LOG(ERROR) << "Failed to save user data";
950       }
951       if (unreported_size_[index]) {
952         backend_->ModifyStorageSize(
953             entry_.Data()->data_size[index] - unreported_size_[index],
954             entry_.Data()->data_size[index]);
955       }
956     }
957 
958     if (!ret) {
959       // There was a failure writing the actual data. Mark the entry as dirty.
960       int current_id = backend_->GetCurrentEntryId();
961       node_.Data()->dirty = current_id == 1 ? -1 : current_id - 1;
962       node_.Store();
963     } else if (node_.HasData() && !dirty_ && node_.Data()->dirty) {
964       node_.Data()->dirty = 0;
965       node_.Store();
966     }
967   }
968 
969   net_log_.EndEvent(net::NetLogEventType::DISK_CACHE_ENTRY_IMPL);
970   backend_->OnEntryDestroyEnd();
971 }
972 
973 // ------------------------------------------------------------------------
974 
InternalReadData(int index,int offset,IOBuffer * buf,int buf_len,CompletionOnceCallback callback)975 int EntryImpl::InternalReadData(int index,
976                                 int offset,
977                                 IOBuffer* buf,
978                                 int buf_len,
979                                 CompletionOnceCallback callback) {
980   DCHECK(node_.Data()->dirty || read_only_);
981   DVLOG(2) << "Read from " << index << " at " << offset << " : " << buf_len;
982   if (index < 0 || index >= kNumStreams)
983     return net::ERR_INVALID_ARGUMENT;
984 
985   int entry_size = entry_.Data()->data_size[index];
986   if (offset >= entry_size || offset < 0 || !buf_len)
987     return 0;
988 
989   if (buf_len < 0)
990     return net::ERR_INVALID_ARGUMENT;
991 
992   if (!backend_.get())
993     return net::ERR_UNEXPECTED;
994 
995   int end_offset;
996   if (!base::CheckAdd(offset, buf_len).AssignIfValid(&end_offset) ||
997       end_offset > entry_size)
998     buf_len = entry_size - offset;
999 
1000   UpdateRank(false);
1001 
1002   backend_->OnEvent(Stats::READ_DATA);
1003   backend_->OnRead(buf_len);
1004 
1005   Addr address(entry_.Data()->data_addr[index]);
1006   int eof = address.is_initialized() ? entry_size : 0;
1007   if (user_buffers_[index].get() &&
1008       user_buffers_[index]->PreRead(eof, offset, &buf_len)) {
1009     // Complete the operation locally.
1010     buf_len = user_buffers_[index]->Read(offset, buf, buf_len);
1011     return buf_len;
1012   }
1013 
1014   address.set_value(entry_.Data()->data_addr[index]);
1015   if (!address.is_initialized()) {
1016     DoomImpl();
1017     return net::ERR_FAILED;
1018   }
1019 
1020   File* file = GetBackingFile(address, index);
1021   if (!file) {
1022     DoomImpl();
1023     LOG(ERROR) << "No file for " << std::hex << address.value();
1024     return net::ERR_FILE_NOT_FOUND;
1025   }
1026 
1027   size_t file_offset = offset;
1028   if (address.is_block_file()) {
1029     DCHECK_LE(offset + buf_len, kMaxBlockSize);
1030     file_offset += address.start_block() * address.BlockSize() +
1031                    kBlockHeaderSize;
1032   }
1033 
1034   SyncCallback* io_callback = nullptr;
1035   bool null_callback = callback.is_null();
1036   if (!null_callback) {
1037     io_callback =
1038         new SyncCallback(base::WrapRefCounted(this), buf, std::move(callback),
1039                          net::NetLogEventType::ENTRY_READ_DATA);
1040   }
1041 
1042   bool completed;
1043   if (!file->Read(buf->data(), buf_len, file_offset, io_callback, &completed)) {
1044     if (io_callback)
1045       io_callback->Discard();
1046     DoomImpl();
1047     return net::ERR_CACHE_READ_FAILURE;
1048   }
1049 
1050   if (io_callback && completed)
1051     io_callback->Discard();
1052 
1053   return (completed || null_callback) ? buf_len : net::ERR_IO_PENDING;
1054 }
1055 
InternalWriteData(int index,int offset,IOBuffer * buf,int buf_len,CompletionOnceCallback callback,bool truncate)1056 int EntryImpl::InternalWriteData(int index,
1057                                  int offset,
1058                                  IOBuffer* buf,
1059                                  int buf_len,
1060                                  CompletionOnceCallback callback,
1061                                  bool truncate) {
1062   DCHECK(node_.Data()->dirty || read_only_);
1063   DVLOG(2) << "Write to " << index << " at " << offset << " : " << buf_len;
1064   if (index < 0 || index >= kNumStreams)
1065     return net::ERR_INVALID_ARGUMENT;
1066 
1067   if (offset < 0 || buf_len < 0)
1068     return net::ERR_INVALID_ARGUMENT;
1069 
1070   if (!backend_.get())
1071     return net::ERR_UNEXPECTED;
1072 
1073   int max_file_size = backend_->MaxFileSize();
1074 
1075   int end_offset;
1076   if (offset > max_file_size || buf_len > max_file_size ||
1077       !base::CheckAdd(offset, buf_len).AssignIfValid(&end_offset) ||
1078       end_offset > max_file_size) {
1079     int size = base::CheckAdd(offset, buf_len)
1080                    .ValueOrDefault(std::numeric_limits<int32_t>::max());
1081     backend_->TooMuchStorageRequested(size);
1082     return net::ERR_FAILED;
1083   }
1084 
1085   // Read the size at this point (it may change inside prepare).
1086   int entry_size = entry_.Data()->data_size[index];
1087   bool extending = entry_size < offset + buf_len;
1088   truncate = truncate && entry_size > offset + buf_len;
1089   if (!PrepareTarget(index, offset, buf_len, truncate))
1090     return net::ERR_FAILED;
1091 
1092   if (extending || truncate)
1093     UpdateSize(index, entry_size, offset + buf_len);
1094 
1095   UpdateRank(true);
1096 
1097   backend_->OnEvent(Stats::WRITE_DATA);
1098   backend_->OnWrite(buf_len);
1099 
1100   if (user_buffers_[index].get()) {
1101     // Complete the operation locally.
1102     user_buffers_[index]->Write(offset, buf, buf_len);
1103     return buf_len;
1104   }
1105 
1106   Addr address(entry_.Data()->data_addr[index]);
1107   if (offset + buf_len == 0) {
1108     if (truncate) {
1109       DCHECK(!address.is_initialized());
1110     }
1111     return 0;
1112   }
1113 
1114   File* file = GetBackingFile(address, index);
1115   if (!file)
1116     return net::ERR_FILE_NOT_FOUND;
1117 
1118   size_t file_offset = offset;
1119   if (address.is_block_file()) {
1120     DCHECK_LE(offset + buf_len, kMaxBlockSize);
1121     file_offset += address.start_block() * address.BlockSize() +
1122                    kBlockHeaderSize;
1123   } else if (truncate || (extending && !buf_len)) {
1124     if (!file->SetLength(offset + buf_len))
1125       return net::ERR_FAILED;
1126   }
1127 
1128   if (!buf_len)
1129     return 0;
1130 
1131   SyncCallback* io_callback = nullptr;
1132   bool null_callback = callback.is_null();
1133   if (!null_callback) {
1134     io_callback = new SyncCallback(this, buf, std::move(callback),
1135                                    net::NetLogEventType::ENTRY_WRITE_DATA);
1136   }
1137 
1138   bool completed;
1139   if (!file->Write(buf->data(), buf_len, file_offset, io_callback,
1140                    &completed)) {
1141     if (io_callback)
1142       io_callback->Discard();
1143     return net::ERR_CACHE_WRITE_FAILURE;
1144   }
1145 
1146   if (io_callback && completed)
1147     io_callback->Discard();
1148 
1149   return (completed || null_callback) ? buf_len : net::ERR_IO_PENDING;
1150 }
1151 
1152 // ------------------------------------------------------------------------
1153 
CreateDataBlock(int index,int size)1154 bool EntryImpl::CreateDataBlock(int index, int size) {
1155   DCHECK(index >= 0 && index < kNumStreams);
1156 
1157   Addr address(entry_.Data()->data_addr[index]);
1158   if (!CreateBlock(size, &address))
1159     return false;
1160 
1161   entry_.Data()->data_addr[index] = address.value();
1162   entry_.Store();
1163   return true;
1164 }
1165 
CreateBlock(int size,Addr * address)1166 bool EntryImpl::CreateBlock(int size, Addr* address) {
1167   DCHECK(!address->is_initialized());
1168   if (!backend_.get())
1169     return false;
1170 
1171   FileType file_type = Addr::RequiredFileType(size);
1172   if (EXTERNAL == file_type) {
1173     if (size > backend_->MaxFileSize())
1174       return false;
1175     if (!backend_->CreateExternalFile(address))
1176       return false;
1177   } else {
1178     int num_blocks = Addr::RequiredBlocks(size, file_type);
1179 
1180     if (!backend_->CreateBlock(file_type, num_blocks, address))
1181       return false;
1182   }
1183   return true;
1184 }
1185 
1186 // Note that this method may end up modifying a block file so upon return the
1187 // involved block will be free, and could be reused for something else. If there
1188 // is a crash after that point (and maybe before returning to the caller), the
1189 // entry will be left dirty... and at some point it will be discarded; it is
1190 // important that the entry doesn't keep a reference to this address, or we'll
1191 // end up deleting the contents of |address| once again.
DeleteData(Addr address,int index)1192 void EntryImpl::DeleteData(Addr address, int index) {
1193   DCHECK(backend_.get());
1194   if (!address.is_initialized())
1195     return;
1196   if (address.is_separate_file()) {
1197     int failure = !base::DeleteFile(backend_->GetFileName(address));
1198     if (failure) {
1199       LOG(ERROR) << "Failed to delete " <<
1200           backend_->GetFileName(address).value() << " from the cache.";
1201     }
1202     if (files_[index].get())
1203       files_[index] = nullptr;  // Releases the object.
1204   } else {
1205     backend_->DeleteBlock(address, true);
1206   }
1207 }
1208 
UpdateRank(bool modified)1209 void EntryImpl::UpdateRank(bool modified) {
1210   if (!backend_.get())
1211     return;
1212 
1213   if (!doomed_) {
1214     // Everything is handled by the backend.
1215     backend_->UpdateRank(this, modified);
1216     return;
1217   }
1218 
1219   Time current = Time::Now();
1220   node_.Data()->last_used = current.ToInternalValue();
1221 
1222   if (modified)
1223     node_.Data()->last_modified = current.ToInternalValue();
1224 }
1225 
GetBackingFile(Addr address,int index)1226 File* EntryImpl::GetBackingFile(Addr address, int index) {
1227   if (!backend_.get())
1228     return nullptr;
1229 
1230   File* file;
1231   if (address.is_separate_file())
1232     file = GetExternalFile(address, index);
1233   else
1234     file = backend_->File(address);
1235   return file;
1236 }
1237 
GetExternalFile(Addr address,int index)1238 File* EntryImpl::GetExternalFile(Addr address, int index) {
1239   DCHECK(index >= 0 && index <= kKeyFileIndex);
1240   if (!files_[index].get()) {
1241     // For a key file, use mixed mode IO.
1242     auto file = base::MakeRefCounted<File>(kKeyFileIndex == index);
1243     if (file->Init(backend_->GetFileName(address)))
1244       files_[index].swap(file);
1245   }
1246   return files_[index].get();
1247 }
1248 
1249 // We keep a memory buffer for everything that ends up stored on a block file
1250 // (because we don't know yet the final data size), and for some of the data
1251 // that end up on external files. This function will initialize that memory
1252 // buffer and / or the files needed to store the data.
1253 //
1254 // In general, a buffer may overlap data already stored on disk, and in that
1255 // case, the contents of the buffer are the most accurate. It may also extend
1256 // the file, but we don't want to read from disk just to keep the buffer up to
1257 // date. This means that as soon as there is a chance to get confused about what
1258 // is the most recent version of some part of a file, we'll flush the buffer and
1259 // reuse it for the new data. Keep in mind that the normal use pattern is quite
1260 // simple (write sequentially from the beginning), so we optimize for handling
1261 // that case.
PrepareTarget(int index,int offset,int buf_len,bool truncate)1262 bool EntryImpl::PrepareTarget(int index, int offset, int buf_len,
1263                               bool truncate) {
1264   if (truncate)
1265     return HandleTruncation(index, offset, buf_len);
1266 
1267   if (!offset && !buf_len)
1268     return true;
1269 
1270   Addr address(entry_.Data()->data_addr[index]);
1271   if (address.is_initialized()) {
1272     if (address.is_block_file() && !MoveToLocalBuffer(index))
1273       return false;
1274 
1275     if (!user_buffers_[index].get() && offset < kMaxBlockSize) {
1276       // We are about to create a buffer for the first 16KB, make sure that we
1277       // preserve existing data.
1278       if (!CopyToLocalBuffer(index))
1279         return false;
1280     }
1281   }
1282 
1283   if (!user_buffers_[index].get())
1284     user_buffers_[index] = std::make_unique<UserBuffer>(backend_.get());
1285 
1286   return PrepareBuffer(index, offset, buf_len);
1287 }
1288 
1289 // We get to this function with some data already stored. If there is a
1290 // truncation that results on data stored internally, we'll explicitly
1291 // handle the case here.
HandleTruncation(int index,int offset,int buf_len)1292 bool EntryImpl::HandleTruncation(int index, int offset, int buf_len) {
1293   Addr address(entry_.Data()->data_addr[index]);
1294 
1295   int current_size = entry_.Data()->data_size[index];
1296   int new_size = offset + buf_len;
1297 
1298   // This is only called when actually truncating the file, not simply when
1299   // truncate = true is passed to WriteData(), which could be growing the file.
1300   DCHECK_LT(new_size, current_size);
1301 
1302   if (new_size == 0) {
1303     // This is by far the most common scenario.
1304     backend_->ModifyStorageSize(current_size - unreported_size_[index], 0);
1305     entry_.Data()->data_addr[index] = 0;
1306     entry_.Data()->data_size[index] = 0;
1307     unreported_size_[index] = 0;
1308     entry_.Store();
1309     DeleteData(address, index);
1310 
1311     user_buffers_[index].reset();
1312     return true;
1313   }
1314 
1315   // We never postpone truncating a file, if there is one, but we may postpone
1316   // telling the backend about the size reduction.
1317   if (user_buffers_[index].get()) {
1318     DCHECK_GE(current_size, user_buffers_[index]->Start());
1319     if (!address.is_initialized()) {
1320       // There is no overlap between the buffer and disk.
1321       if (new_size > user_buffers_[index]->Start()) {
1322         // Truncate our buffer.
1323         DCHECK_LT(new_size, user_buffers_[index]->End());
1324         user_buffers_[index]->Truncate(new_size);
1325 
1326         if (offset < user_buffers_[index]->Start()) {
1327           // Request to write before the current buffer's start, so flush it to
1328           // disk and re-init.
1329           UpdateSize(index, current_size, new_size);
1330           if (!Flush(index, 0))
1331             return false;
1332           return PrepareBuffer(index, offset, buf_len);
1333         } else {
1334           // Can just stick to using the memory buffer.
1335           return true;
1336         }
1337       }
1338 
1339       // Truncated to before the current buffer, so can just discard it.
1340       user_buffers_[index]->Reset();
1341       return PrepareBuffer(index, offset, buf_len);
1342     }
1343 
1344     // There is some overlap or we need to extend the file before the
1345     // truncation.
1346     if (offset > user_buffers_[index]->Start())
1347       user_buffers_[index]->Truncate(new_size);
1348     UpdateSize(index, current_size, new_size);
1349     if (!Flush(index, 0))
1350       return false;
1351     user_buffers_[index].reset();
1352   }
1353 
1354   // We have data somewhere, and it is not in a buffer.
1355   DCHECK(!user_buffers_[index].get());
1356   DCHECK(address.is_initialized());
1357 
1358   if (new_size > kMaxBlockSize)
1359     return true;  // Let the operation go directly to disk.
1360 
1361   return ImportSeparateFile(index, offset + buf_len);
1362 }
1363 
CopyToLocalBuffer(int index)1364 bool EntryImpl::CopyToLocalBuffer(int index) {
1365   Addr address(entry_.Data()->data_addr[index]);
1366   DCHECK(!user_buffers_[index].get());
1367   DCHECK(address.is_initialized());
1368 
1369   int len = std::min(entry_.Data()->data_size[index], kMaxBlockSize);
1370   user_buffers_[index] = std::make_unique<UserBuffer>(backend_.get());
1371   user_buffers_[index]->Write(len, nullptr, 0);
1372 
1373   File* file = GetBackingFile(address, index);
1374   int offset = 0;
1375 
1376   if (address.is_block_file())
1377     offset = address.start_block() * address.BlockSize() + kBlockHeaderSize;
1378 
1379   if (!file || !file->Read(user_buffers_[index]->Data(), len, offset, nullptr,
1380                            nullptr)) {
1381     user_buffers_[index].reset();
1382     return false;
1383   }
1384   return true;
1385 }
1386 
MoveToLocalBuffer(int index)1387 bool EntryImpl::MoveToLocalBuffer(int index) {
1388   if (!CopyToLocalBuffer(index))
1389     return false;
1390 
1391   Addr address(entry_.Data()->data_addr[index]);
1392   entry_.Data()->data_addr[index] = 0;
1393   entry_.Store();
1394   DeleteData(address, index);
1395 
1396   // If we lose this entry we'll see it as zero sized.
1397   int len = entry_.Data()->data_size[index];
1398   backend_->ModifyStorageSize(len - unreported_size_[index], 0);
1399   unreported_size_[index] = len;
1400   return true;
1401 }
1402 
ImportSeparateFile(int index,int new_size)1403 bool EntryImpl::ImportSeparateFile(int index, int new_size) {
1404   if (entry_.Data()->data_size[index] > new_size)
1405     UpdateSize(index, entry_.Data()->data_size[index], new_size);
1406 
1407   return MoveToLocalBuffer(index);
1408 }
1409 
PrepareBuffer(int index,int offset,int buf_len)1410 bool EntryImpl::PrepareBuffer(int index, int offset, int buf_len) {
1411   DCHECK(user_buffers_[index].get());
1412   if ((user_buffers_[index]->End() && offset > user_buffers_[index]->End()) ||
1413       offset > entry_.Data()->data_size[index]) {
1414     // We are about to extend the buffer or the file (with zeros), so make sure
1415     // that we are not overwriting anything.
1416     Addr address(entry_.Data()->data_addr[index]);
1417     if (address.is_initialized() && address.is_separate_file()) {
1418       if (!Flush(index, 0))
1419         return false;
1420       // There is an actual file already, and we don't want to keep track of
1421       // its length so we let this operation go straight to disk.
1422       // The only case when a buffer is allowed to extend the file (as in fill
1423       // with zeros before the start) is when there is no file yet to extend.
1424       user_buffers_[index].reset();
1425       return true;
1426     }
1427   }
1428 
1429   if (!user_buffers_[index]->PreWrite(offset, buf_len)) {
1430     if (!Flush(index, offset + buf_len))
1431       return false;
1432 
1433     // Lets try again.
1434     if (offset > user_buffers_[index]->End() ||
1435         !user_buffers_[index]->PreWrite(offset, buf_len)) {
1436       // We cannot complete the operation with a buffer.
1437       DCHECK(!user_buffers_[index]->Size());
1438       DCHECK(!user_buffers_[index]->Start());
1439       user_buffers_[index].reset();
1440     }
1441   }
1442   return true;
1443 }
1444 
Flush(int index,int min_len)1445 bool EntryImpl::Flush(int index, int min_len) {
1446   Addr address(entry_.Data()->data_addr[index]);
1447   DCHECK(user_buffers_[index].get());
1448   DCHECK(!address.is_initialized() || address.is_separate_file());
1449   DVLOG(3) << "Flush";
1450 
1451   int size = std::max(entry_.Data()->data_size[index], min_len);
1452   if (size && !address.is_initialized() && !CreateDataBlock(index, size))
1453     return false;
1454 
1455   if (!entry_.Data()->data_size[index]) {
1456     DCHECK(!user_buffers_[index]->Size());
1457     return true;
1458   }
1459 
1460   address.set_value(entry_.Data()->data_addr[index]);
1461 
1462   int len = user_buffers_[index]->Size();
1463   int offset = user_buffers_[index]->Start();
1464   if (!len && !offset)
1465     return true;
1466 
1467   if (address.is_block_file()) {
1468     DCHECK_EQ(len, entry_.Data()->data_size[index]);
1469     DCHECK(!offset);
1470     offset = address.start_block() * address.BlockSize() + kBlockHeaderSize;
1471   }
1472 
1473   File* file = GetBackingFile(address, index);
1474   if (!file)
1475     return false;
1476 
1477   if (!file->Write(user_buffers_[index]->Data(), len, offset, nullptr, nullptr))
1478     return false;
1479   user_buffers_[index]->Reset();
1480 
1481   return true;
1482 }
1483 
UpdateSize(int index,int old_size,int new_size)1484 void EntryImpl::UpdateSize(int index, int old_size, int new_size) {
1485   if (entry_.Data()->data_size[index] == new_size)
1486     return;
1487 
1488   unreported_size_[index] += new_size - old_size;
1489   entry_.Data()->data_size[index] = new_size;
1490   entry_.set_modified();
1491 }
1492 
InitSparseData()1493 int EntryImpl::InitSparseData() {
1494   if (sparse_.get())
1495     return net::OK;
1496 
1497   // Use a local variable so that sparse_ never goes from 'valid' to NULL.
1498   auto sparse = std::make_unique<SparseControl>(this);
1499   int result = sparse->Init();
1500   if (net::OK == result)
1501     sparse_.swap(sparse);
1502 
1503   return result;
1504 }
1505 
SetEntryFlags(uint32_t flags)1506 void EntryImpl::SetEntryFlags(uint32_t flags) {
1507   entry_.Data()->flags |= flags;
1508   entry_.set_modified();
1509 }
1510 
GetEntryFlags()1511 uint32_t EntryImpl::GetEntryFlags() {
1512   return entry_.Data()->flags;
1513 }
1514 
GetData(int index,std::unique_ptr<char[]> * buffer,Addr * address)1515 void EntryImpl::GetData(int index,
1516                         std::unique_ptr<char[]>* buffer,
1517                         Addr* address) {
1518   DCHECK(backend_.get());
1519   if (user_buffers_[index].get() && user_buffers_[index]->Size() &&
1520       !user_buffers_[index]->Start()) {
1521     // The data is already in memory, just copy it and we're done.
1522     int data_len = entry_.Data()->data_size[index];
1523     if (data_len <= user_buffers_[index]->Size()) {
1524       DCHECK(!user_buffers_[index]->Start());
1525       *buffer = std::make_unique<char[]>(data_len);
1526       memcpy(buffer->get(), user_buffers_[index]->Data(), data_len);
1527       return;
1528     }
1529   }
1530 
1531   // Bad news: we'd have to read the info from disk so instead we'll just tell
1532   // the caller where to read from.
1533   *buffer = nullptr;
1534   address->set_value(entry_.Data()->data_addr[index]);
1535   if (address->is_initialized()) {
1536     // Prevent us from deleting the block from the backing store.
1537     backend_->ModifyStorageSize(entry_.Data()->data_size[index] -
1538                                     unreported_size_[index], 0);
1539     entry_.Data()->data_addr[index] = 0;
1540     entry_.Data()->data_size[index] = 0;
1541   }
1542 }
1543 
1544 }  // namespace disk_cache
1545