• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2012 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifdef UNSAFE_BUFFERS_BUILD
6 // TODO(crbug.com/40284755): Remove this and spanify to fix the errors.
7 #pragma allow_unsafe_buffers
8 #endif
9 
10 #include "net/disk_cache/blockfile/entry_impl.h"
11 
12 #include <limits>
13 #include <memory>
14 
15 #include "base/files/file_util.h"
16 #include "base/hash/hash.h"
17 #include "base/numerics/safe_math.h"
18 #include "base/strings/string_util.h"
19 #include "base/time/time.h"
20 #include "net/base/io_buffer.h"
21 #include "net/base/net_errors.h"
22 #include "net/disk_cache/blockfile/backend_impl.h"
23 #include "net/disk_cache/blockfile/bitmap.h"
24 #include "net/disk_cache/blockfile/disk_format.h"
25 #include "net/disk_cache/blockfile/sparse_control.h"
26 #include "net/disk_cache/cache_util.h"
27 #include "net/disk_cache/net_log_parameters.h"
28 #include "net/log/net_log.h"
29 #include "net/log/net_log_event_type.h"
30 #include "net/log/net_log_source_type.h"
31 
32 using base::Time;
33 using base::TimeTicks;
34 
35 namespace {
36 
37 // Index for the file used to store the key, if any (files_[kKeyFileIndex]).
38 const int kKeyFileIndex = 3;
39 
40 // This class implements FileIOCallback to buffer the callback from a file IO
41 // operation from the actual net class.
42 class SyncCallback: public disk_cache::FileIOCallback {
43  public:
44   // |end_event_type| is the event type to log on completion.  Logs nothing on
45   // discard, or when the NetLog is not set to log all events.
SyncCallback(scoped_refptr<disk_cache::EntryImpl> entry,net::IOBuffer * buffer,net::CompletionOnceCallback callback,net::NetLogEventType end_event_type)46   SyncCallback(scoped_refptr<disk_cache::EntryImpl> entry,
47                net::IOBuffer* buffer,
48                net::CompletionOnceCallback callback,
49                net::NetLogEventType end_event_type)
50       : entry_(std::move(entry)),
51         callback_(std::move(callback)),
52         buf_(buffer),
53         end_event_type_(end_event_type) {
54     entry_->IncrementIoCount();
55   }
56 
57   SyncCallback(const SyncCallback&) = delete;
58   SyncCallback& operator=(const SyncCallback&) = delete;
59 
60   ~SyncCallback() override = default;
61 
62   void OnFileIOComplete(int bytes_copied) override;
63   void Discard();
64 
65  private:
66   scoped_refptr<disk_cache::EntryImpl> entry_;
67   net::CompletionOnceCallback callback_;
68   scoped_refptr<net::IOBuffer> buf_;
69   const net::NetLogEventType end_event_type_;
70 };
71 
OnFileIOComplete(int bytes_copied)72 void SyncCallback::OnFileIOComplete(int bytes_copied) {
73   entry_->DecrementIoCount();
74   if (!callback_.is_null()) {
75     if (entry_->net_log().IsCapturing()) {
76       disk_cache::NetLogReadWriteComplete(entry_->net_log(), end_event_type_,
77                                           net::NetLogEventPhase::END,
78                                           bytes_copied);
79     }
80     buf_ = nullptr;  // Release the buffer before invoking the callback.
81     std::move(callback_).Run(bytes_copied);
82   }
83   delete this;
84 }
85 
Discard()86 void SyncCallback::Discard() {
87   callback_.Reset();
88   buf_ = nullptr;
89   OnFileIOComplete(0);
90 }
91 
92 const int kMaxBufferSize = 1024 * 1024;  // 1 MB.
93 
94 }  // namespace
95 
96 namespace disk_cache {
97 
98 // This class handles individual memory buffers that store data before it is
99 // sent to disk. The buffer can start at any offset, but if we try to write to
100 // anywhere in the first 16KB of the file (kMaxBlockSize), we set the offset to
101 // zero. The buffer grows up to a size determined by the backend, to keep the
102 // total memory used under control.
103 class EntryImpl::UserBuffer {
104  public:
UserBuffer(BackendImpl * backend)105   explicit UserBuffer(BackendImpl* backend) : backend_(backend->GetWeakPtr()) {
106     buffer_.reserve(kMaxBlockSize);
107   }
108 
109   UserBuffer(const UserBuffer&) = delete;
110   UserBuffer& operator=(const UserBuffer&) = delete;
111 
~UserBuffer()112   ~UserBuffer() {
113     if (backend_.get())
114       backend_->BufferDeleted(capacity() - kMaxBlockSize);
115   }
116 
117   // Returns true if we can handle writing |len| bytes to |offset|.
118   bool PreWrite(int offset, int len);
119 
120   // Truncates the buffer to |offset| bytes.
121   void Truncate(int offset);
122 
123   // Writes |len| bytes from |buf| at the given |offset|.
124   void Write(int offset, IOBuffer* buf, int len);
125 
126   // Returns true if we can read |len| bytes from |offset|, given that the
127   // actual file has |eof| bytes stored. Note that the number of bytes to read
128   // may be modified by this method even though it returns false: that means we
129   // should do a smaller read from disk.
130   bool PreRead(int eof, int offset, int* len);
131 
132   // Read |len| bytes from |buf| at the given |offset|.
133   int Read(int offset, IOBuffer* buf, int len);
134 
135   // Prepare this buffer for reuse.
136   void Reset();
137 
Data()138   char* Data() { return buffer_.data(); }
Size()139   int Size() { return static_cast<int>(buffer_.size()); }
Start()140   int Start() { return offset_; }
End()141   int End() { return offset_ + Size(); }
142 
143  private:
capacity()144   int capacity() { return static_cast<int>(buffer_.capacity()); }
145   bool GrowBuffer(int required, int limit);
146 
147   base::WeakPtr<BackendImpl> backend_;
148   int offset_ = 0;
149   std::vector<char> buffer_;
150   bool grow_allowed_ = true;
151 };
152 
PreWrite(int offset,int len)153 bool EntryImpl::UserBuffer::PreWrite(int offset, int len) {
154   DCHECK_GE(offset, 0);
155   DCHECK_GE(len, 0);
156   DCHECK_GE(offset + len, 0);
157 
158   // We don't want to write before our current start.
159   if (offset < offset_)
160     return false;
161 
162   // Lets get the common case out of the way.
163   if (offset + len <= capacity())
164     return true;
165 
166   // If we are writing to the first 16K (kMaxBlockSize), we want to keep the
167   // buffer offset_ at 0.
168   if (!Size() && offset > kMaxBlockSize)
169     return GrowBuffer(len, kMaxBufferSize);
170 
171   int required = offset - offset_ + len;
172   return GrowBuffer(required, kMaxBufferSize * 6 / 5);
173 }
174 
Truncate(int offset)175 void EntryImpl::UserBuffer::Truncate(int offset) {
176   DCHECK_GE(offset, 0);
177   DCHECK_GE(offset, offset_);
178   DVLOG(3) << "Buffer truncate at " << offset << " current " << offset_;
179 
180   offset -= offset_;
181   if (Size() >= offset)
182     buffer_.resize(offset);
183 }
184 
Write(int offset,IOBuffer * buf,int len)185 void EntryImpl::UserBuffer::Write(int offset, IOBuffer* buf, int len) {
186   DCHECK_GE(offset, 0);
187   DCHECK_GE(len, 0);
188   DCHECK_GE(offset + len, 0);
189 
190   // 0-length writes that don't extend can just be ignored here, and are safe
191   // even if they're are before offset_, as truncates are handled elsewhere.
192   if (len == 0 && offset < End())
193     return;
194 
195   DCHECK_GE(offset, offset_);
196   DVLOG(3) << "Buffer write at " << offset << " current " << offset_;
197 
198   if (!Size() && offset > kMaxBlockSize)
199     offset_ = offset;
200 
201   offset -= offset_;
202 
203   if (offset > Size())
204     buffer_.resize(offset);
205 
206   if (!len)
207     return;
208 
209   char* buffer = buf->data();
210   int valid_len = Size() - offset;
211   int copy_len = std::min(valid_len, len);
212   if (copy_len) {
213     memcpy(&buffer_[offset], buffer, copy_len);
214     len -= copy_len;
215     buffer += copy_len;
216   }
217   if (!len)
218     return;
219 
220   buffer_.insert(buffer_.end(), buffer, buffer + len);
221 }
222 
PreRead(int eof,int offset,int * len)223 bool EntryImpl::UserBuffer::PreRead(int eof, int offset, int* len) {
224   DCHECK_GE(offset, 0);
225   DCHECK_GT(*len, 0);
226 
227   if (offset < offset_) {
228     // We are reading before this buffer.
229     if (offset >= eof)
230       return true;
231 
232     // If the read overlaps with the buffer, change its length so that there is
233     // no overlap.
234     *len = std::min(*len, offset_ - offset);
235     *len = std::min(*len, eof - offset);
236 
237     // We should read from disk.
238     return false;
239   }
240 
241   if (!Size())
242     return false;
243 
244   // See if we can fulfill the first part of the operation.
245   return (offset - offset_ < Size());
246 }
247 
Read(int offset,IOBuffer * buf,int len)248 int EntryImpl::UserBuffer::Read(int offset, IOBuffer* buf, int len) {
249   DCHECK_GE(offset, 0);
250   DCHECK_GT(len, 0);
251   DCHECK(Size() || offset < offset_);
252 
253   int clean_bytes = 0;
254   if (offset < offset_) {
255     // We don't have a file so lets fill the first part with 0.
256     clean_bytes = std::min(offset_ - offset, len);
257     memset(buf->data(), 0, clean_bytes);
258     if (len == clean_bytes)
259       return len;
260     offset = offset_;
261     len -= clean_bytes;
262   }
263 
264   int start = offset - offset_;
265   int available = Size() - start;
266   DCHECK_GE(start, 0);
267   DCHECK_GE(available, 0);
268   len = std::min(len, available);
269   memcpy(buf->data() + clean_bytes, &buffer_[start], len);
270   return len + clean_bytes;
271 }
272 
Reset()273 void EntryImpl::UserBuffer::Reset() {
274   if (!grow_allowed_) {
275     if (backend_.get())
276       backend_->BufferDeleted(capacity() - kMaxBlockSize);
277     grow_allowed_ = true;
278     std::vector<char> tmp;
279     buffer_.swap(tmp);
280     buffer_.reserve(kMaxBlockSize);
281   }
282   offset_ = 0;
283   buffer_.clear();
284 }
285 
GrowBuffer(int required,int limit)286 bool EntryImpl::UserBuffer::GrowBuffer(int required, int limit) {
287   DCHECK_GE(required, 0);
288   int current_size = capacity();
289   if (required <= current_size)
290     return true;
291 
292   if (required > limit)
293     return false;
294 
295   if (!backend_.get())
296     return false;
297 
298   int to_add = std::max(required - current_size, kMaxBlockSize * 4);
299   to_add = std::max(current_size, to_add);
300   required = std::min(current_size + to_add, limit);
301 
302   grow_allowed_ = backend_->IsAllocAllowed(current_size, required);
303   if (!grow_allowed_)
304     return false;
305 
306   DVLOG(3) << "Buffer grow to " << required;
307 
308   buffer_.reserve(required);
309   return true;
310 }
311 
312 // ------------------------------------------------------------------------
313 
EntryImpl(BackendImpl * backend,Addr address,bool read_only)314 EntryImpl::EntryImpl(BackendImpl* backend, Addr address, bool read_only)
315     : entry_(nullptr, Addr(0)),
316       node_(nullptr, Addr(0)),
317       backend_(backend->GetWeakPtr()),
318       read_only_(read_only) {
319   entry_.LazyInit(backend->File(address), address);
320 }
321 
DoomImpl()322 void EntryImpl::DoomImpl() {
323   if (doomed_ || !backend_.get())
324     return;
325 
326   SetPointerForInvalidEntry(backend_->GetCurrentEntryId());
327   backend_->InternalDoomEntry(this);
328 }
329 
ReadDataImpl(int index,int offset,IOBuffer * buf,int buf_len,CompletionOnceCallback callback)330 int EntryImpl::ReadDataImpl(int index,
331                             int offset,
332                             IOBuffer* buf,
333                             int buf_len,
334                             CompletionOnceCallback callback) {
335   if (net_log_.IsCapturing()) {
336     NetLogReadWriteData(net_log_, net::NetLogEventType::ENTRY_READ_DATA,
337                         net::NetLogEventPhase::BEGIN, index, offset, buf_len,
338                         false);
339   }
340 
341   int result =
342       InternalReadData(index, offset, buf, buf_len, std::move(callback));
343 
344   if (result != net::ERR_IO_PENDING && net_log_.IsCapturing()) {
345     NetLogReadWriteComplete(net_log_, net::NetLogEventType::ENTRY_READ_DATA,
346                             net::NetLogEventPhase::END, result);
347   }
348   return result;
349 }
350 
WriteDataImpl(int index,int offset,IOBuffer * buf,int buf_len,CompletionOnceCallback callback,bool truncate)351 int EntryImpl::WriteDataImpl(int index,
352                              int offset,
353                              IOBuffer* buf,
354                              int buf_len,
355                              CompletionOnceCallback callback,
356                              bool truncate) {
357   if (net_log_.IsCapturing()) {
358     NetLogReadWriteData(net_log_, net::NetLogEventType::ENTRY_WRITE_DATA,
359                         net::NetLogEventPhase::BEGIN, index, offset, buf_len,
360                         truncate);
361   }
362 
363   int result = InternalWriteData(index, offset, buf, buf_len,
364                                  std::move(callback), truncate);
365 
366   if (result != net::ERR_IO_PENDING && net_log_.IsCapturing()) {
367     NetLogReadWriteComplete(net_log_, net::NetLogEventType::ENTRY_WRITE_DATA,
368                             net::NetLogEventPhase::END, result);
369   }
370   return result;
371 }
372 
ReadSparseDataImpl(int64_t offset,IOBuffer * buf,int buf_len,CompletionOnceCallback callback)373 int EntryImpl::ReadSparseDataImpl(int64_t offset,
374                                   IOBuffer* buf,
375                                   int buf_len,
376                                   CompletionOnceCallback callback) {
377   DCHECK(node_.Data()->dirty || read_only_);
378   int result = InitSparseData();
379   if (net::OK != result)
380     return result;
381 
382   result = sparse_->StartIO(SparseControl::kReadOperation, offset, buf, buf_len,
383                             std::move(callback));
384   return result;
385 }
386 
WriteSparseDataImpl(int64_t offset,IOBuffer * buf,int buf_len,CompletionOnceCallback callback)387 int EntryImpl::WriteSparseDataImpl(int64_t offset,
388                                    IOBuffer* buf,
389                                    int buf_len,
390                                    CompletionOnceCallback callback) {
391   DCHECK(node_.Data()->dirty || read_only_);
392   int result = InitSparseData();
393   if (net::OK != result)
394     return result;
395 
396   result = sparse_->StartIO(SparseControl::kWriteOperation, offset, buf,
397                             buf_len, std::move(callback));
398   return result;
399 }
400 
GetAvailableRangeImpl(int64_t offset,int len)401 RangeResult EntryImpl::GetAvailableRangeImpl(int64_t offset, int len) {
402   int result = InitSparseData();
403   if (net::OK != result)
404     return RangeResult(static_cast<net::Error>(result));
405 
406   return sparse_->GetAvailableRange(offset, len);
407 }
408 
CancelSparseIOImpl()409 void EntryImpl::CancelSparseIOImpl() {
410   if (!sparse_.get())
411     return;
412 
413   sparse_->CancelIO();
414 }
415 
ReadyForSparseIOImpl(CompletionOnceCallback callback)416 int EntryImpl::ReadyForSparseIOImpl(CompletionOnceCallback callback) {
417   DCHECK(sparse_.get());
418   return sparse_->ReadyToUse(std::move(callback));
419 }
420 
GetHash()421 uint32_t EntryImpl::GetHash() {
422   return entry_.Data()->hash;
423 }
424 
CreateEntry(Addr node_address,const std::string & key,uint32_t hash)425 bool EntryImpl::CreateEntry(Addr node_address,
426                             const std::string& key,
427                             uint32_t hash) {
428   EntryStore* entry_store = entry_.Data();
429   RankingsNode* node = node_.Data();
430   memset(entry_store, 0, sizeof(EntryStore) * entry_.address().num_blocks());
431   memset(node, 0, sizeof(RankingsNode));
432   if (!node_.LazyInit(backend_->File(node_address), node_address))
433     return false;
434 
435   entry_store->rankings_node = node_address.value();
436   node->contents = entry_.address().value();
437 
438   entry_store->hash = hash;
439   entry_store->creation_time = Time::Now().ToInternalValue();
440   entry_store->key_len = static_cast<int32_t>(key.size());
441   if (entry_store->key_len > kMaxInternalKeyLength) {
442     Addr address(0);
443     if (!CreateBlock(entry_store->key_len + 1, &address))
444       return false;
445 
446     entry_store->long_key = address.value();
447     File* key_file = GetBackingFile(address, kKeyFileIndex);
448     key_ = key;
449 
450     size_t offset = 0;
451     if (address.is_block_file())
452       offset = address.start_block() * address.BlockSize() + kBlockHeaderSize;
453 
454     if (!key_file || !key_file->Write(key.data(), key.size() + 1, offset)) {
455       DeleteData(address, kKeyFileIndex);
456       return false;
457     }
458 
459     if (address.is_separate_file())
460       key_file->SetLength(key.size() + 1);
461   } else {
462     memcpy(entry_store->key, key.data(), key.size());
463     entry_store->key[key.size()] = '\0';
464   }
465   backend_->ModifyStorageSize(0, static_cast<int32_t>(key.size()));
466   node->dirty = backend_->GetCurrentEntryId();
467   return true;
468 }
469 
IsSameEntry(const std::string & key,uint32_t hash)470 bool EntryImpl::IsSameEntry(const std::string& key, uint32_t hash) {
471   if (entry_.Data()->hash != hash ||
472       static_cast<size_t>(entry_.Data()->key_len) != key.size())
473     return false;
474 
475   return (key.compare(GetKey()) == 0);
476 }
477 
InternalDoom()478 void EntryImpl::InternalDoom() {
479   net_log_.AddEvent(net::NetLogEventType::ENTRY_DOOM);
480   DCHECK(node_.HasData());
481   if (!node_.Data()->dirty) {
482     node_.Data()->dirty = backend_->GetCurrentEntryId();
483     node_.Store();
484   }
485   doomed_ = true;
486 }
487 
DeleteEntryData(bool everything)488 void EntryImpl::DeleteEntryData(bool everything) {
489   DCHECK(doomed_ || !everything);
490 
491   if (GetEntryFlags() & PARENT_ENTRY) {
492     // We have some child entries that must go away.
493     SparseControl::DeleteChildren(this);
494   }
495 
496   for (int index = 0; index < kNumStreams; index++) {
497     Addr address(entry_.Data()->data_addr[index]);
498     if (address.is_initialized()) {
499       backend_->ModifyStorageSize(entry_.Data()->data_size[index] -
500                                       unreported_size_[index], 0);
501       entry_.Data()->data_addr[index] = 0;
502       entry_.Data()->data_size[index] = 0;
503       entry_.Store();
504       DeleteData(address, index);
505     }
506   }
507 
508   if (!everything)
509     return;
510 
511   // Remove all traces of this entry.
512   backend_->RemoveEntry(this);
513 
514   // Note that at this point node_ and entry_ are just two blocks of data, and
515   // even if they reference each other, nobody should be referencing them.
516 
517   Addr address(entry_.Data()->long_key);
518   DeleteData(address, kKeyFileIndex);
519   backend_->ModifyStorageSize(entry_.Data()->key_len, 0);
520 
521   backend_->DeleteBlock(entry_.address(), true);
522   entry_.Discard();
523 
524   if (!LeaveRankingsBehind()) {
525     backend_->DeleteBlock(node_.address(), true);
526     node_.Discard();
527   }
528 }
529 
GetNextAddress()530 CacheAddr EntryImpl::GetNextAddress() {
531   return entry_.Data()->next;
532 }
533 
SetNextAddress(Addr address)534 void EntryImpl::SetNextAddress(Addr address) {
535   DCHECK_NE(address.value(), entry_.address().value());
536   entry_.Data()->next = address.value();
537   bool success = entry_.Store();
538   DCHECK(success);
539 }
540 
LoadNodeAddress()541 bool EntryImpl::LoadNodeAddress() {
542   Addr address(entry_.Data()->rankings_node);
543   if (!node_.LazyInit(backend_->File(address), address))
544     return false;
545   return node_.Load();
546 }
547 
Update()548 bool EntryImpl::Update() {
549   DCHECK(node_.HasData());
550 
551   if (read_only_)
552     return true;
553 
554   RankingsNode* rankings = node_.Data();
555   if (!rankings->dirty) {
556     rankings->dirty = backend_->GetCurrentEntryId();
557     if (!node_.Store())
558       return false;
559   }
560   return true;
561 }
562 
SetDirtyFlag(int32_t current_id)563 void EntryImpl::SetDirtyFlag(int32_t current_id) {
564   DCHECK(node_.HasData());
565   if (node_.Data()->dirty && current_id != node_.Data()->dirty)
566     dirty_ = true;
567 
568   if (!current_id)
569     dirty_ = true;
570 }
571 
SetPointerForInvalidEntry(int32_t new_id)572 void EntryImpl::SetPointerForInvalidEntry(int32_t new_id) {
573   node_.Data()->dirty = new_id;
574   node_.Store();
575 }
576 
LeaveRankingsBehind()577 bool EntryImpl::LeaveRankingsBehind() {
578   return !node_.Data()->contents;
579 }
580 
581 // This only includes checks that relate to the first block of the entry (the
582 // first 256 bytes), and values that should be set from the entry creation.
583 // Basically, even if there is something wrong with this entry, we want to see
584 // if it is possible to load the rankings node and delete them together.
SanityCheck()585 bool EntryImpl::SanityCheck() {
586   if (!entry_.VerifyHash())
587     return false;
588 
589   EntryStore* stored = entry_.Data();
590   if (!stored->rankings_node || stored->key_len <= 0)
591     return false;
592 
593   if (stored->reuse_count < 0 || stored->refetch_count < 0)
594     return false;
595 
596   Addr rankings_addr(stored->rankings_node);
597   if (!rankings_addr.SanityCheckForRankings())
598     return false;
599 
600   Addr next_addr(stored->next);
601   if (next_addr.is_initialized() && !next_addr.SanityCheckForEntry()) {
602     STRESS_NOTREACHED();
603     return false;
604   }
605   STRESS_DCHECK(next_addr.value() != entry_.address().value());
606 
607   if (stored->state > ENTRY_DOOMED || stored->state < ENTRY_NORMAL)
608     return false;
609 
610   Addr key_addr(stored->long_key);
611   if ((stored->key_len <= kMaxInternalKeyLength && key_addr.is_initialized()) ||
612       (stored->key_len > kMaxInternalKeyLength && !key_addr.is_initialized()))
613     return false;
614 
615   if (!key_addr.SanityCheck())
616     return false;
617 
618   if (key_addr.is_initialized() &&
619       ((stored->key_len < kMaxBlockSize && key_addr.is_separate_file()) ||
620        (stored->key_len >= kMaxBlockSize && key_addr.is_block_file())))
621     return false;
622 
623   int num_blocks = NumBlocksForEntry(stored->key_len);
624   if (entry_.address().num_blocks() != num_blocks)
625     return false;
626 
627   return true;
628 }
629 
DataSanityCheck()630 bool EntryImpl::DataSanityCheck() {
631   EntryStore* stored = entry_.Data();
632   Addr key_addr(stored->long_key);
633 
634   // The key must be NULL terminated.
635   if (!key_addr.is_initialized() && stored->key[stored->key_len])
636     return false;
637 
638   if (stored->hash != base::PersistentHash(GetKey()))
639     return false;
640 
641   for (int i = 0; i < kNumStreams; i++) {
642     Addr data_addr(stored->data_addr[i]);
643     int data_size = stored->data_size[i];
644     if (data_size < 0)
645       return false;
646     if (!data_size && data_addr.is_initialized())
647       return false;
648     if (!data_addr.SanityCheck())
649       return false;
650     if (!data_size)
651       continue;
652     if (data_size <= kMaxBlockSize && data_addr.is_separate_file())
653       return false;
654     if (data_size > kMaxBlockSize && data_addr.is_block_file())
655       return false;
656   }
657   return true;
658 }
659 
FixForDelete()660 void EntryImpl::FixForDelete() {
661   EntryStore* stored = entry_.Data();
662   Addr key_addr(stored->long_key);
663 
664   if (!key_addr.is_initialized())
665     stored->key[stored->key_len] = '\0';
666 
667   for (int i = 0; i < kNumStreams; i++) {
668     Addr data_addr(stored->data_addr[i]);
669     int data_size = stored->data_size[i];
670     if (data_addr.is_initialized()) {
671       if ((data_size <= kMaxBlockSize && data_addr.is_separate_file()) ||
672           (data_size > kMaxBlockSize && data_addr.is_block_file()) ||
673           !data_addr.SanityCheck()) {
674         STRESS_NOTREACHED();
675         // The address is weird so don't attempt to delete it.
676         stored->data_addr[i] = 0;
677         // In general, trust the stored size as it should be in sync with the
678         // total size tracked by the backend.
679       }
680     }
681     if (data_size < 0)
682       stored->data_size[i] = 0;
683   }
684   entry_.Store();
685 }
686 
IncrementIoCount()687 void EntryImpl::IncrementIoCount() {
688   backend_->IncrementIoCount();
689 }
690 
DecrementIoCount()691 void EntryImpl::DecrementIoCount() {
692   if (backend_.get())
693     backend_->DecrementIoCount();
694 }
695 
OnEntryCreated(BackendImpl * backend)696 void EntryImpl::OnEntryCreated(BackendImpl* backend) {
697   // Just grab a reference to the backround queue.
698   background_queue_ = backend->GetBackgroundQueue();
699 }
700 
SetTimes(base::Time last_used,base::Time last_modified)701 void EntryImpl::SetTimes(base::Time last_used, base::Time last_modified) {
702   node_.Data()->last_used = last_used.ToInternalValue();
703   node_.Data()->last_modified = last_modified.ToInternalValue();
704   node_.set_modified();
705 }
706 
BeginLogging(net::NetLog * net_log,bool created)707 void EntryImpl::BeginLogging(net::NetLog* net_log, bool created) {
708   DCHECK(!net_log_.net_log());
709   net_log_ = net::NetLogWithSource::Make(
710       net_log, net::NetLogSourceType::DISK_CACHE_ENTRY);
711   net_log_.BeginEvent(net::NetLogEventType::DISK_CACHE_ENTRY_IMPL, [&] {
712     return CreateNetLogParametersEntryCreationParams(this, created);
713   });
714 }
715 
net_log() const716 const net::NetLogWithSource& EntryImpl::net_log() const {
717   return net_log_;
718 }
719 
720 // static
NumBlocksForEntry(int key_size)721 int EntryImpl::NumBlocksForEntry(int key_size) {
722   // The longest key that can be stored using one block.
723   int key1_len =
724       static_cast<int>(sizeof(EntryStore) - offsetof(EntryStore, key));
725 
726   if (key_size < key1_len || key_size > kMaxInternalKeyLength)
727     return 1;
728 
729   return ((key_size - key1_len) / 256 + 2);
730 }
731 
732 // ------------------------------------------------------------------------
733 
Doom()734 void EntryImpl::Doom() {
735   if (background_queue_.get())
736     background_queue_->DoomEntryImpl(this);
737 }
738 
Close()739 void EntryImpl::Close() {
740   if (background_queue_.get())
741     background_queue_->CloseEntryImpl(this);
742 }
743 
GetKey() const744 std::string EntryImpl::GetKey() const {
745   CacheEntryBlock* entry = const_cast<CacheEntryBlock*>(&entry_);
746   int key_len = entry->Data()->key_len;
747   if (key_len <= kMaxInternalKeyLength)
748     return std::string(entry->Data()->key, key_len);
749 
750   // We keep a copy of the key so that we can always return it, even if the
751   // backend is disabled.
752   if (!key_.empty())
753     return key_;
754 
755   Addr address(entry->Data()->long_key);
756   DCHECK(address.is_initialized());
757   size_t offset = 0;
758   if (address.is_block_file())
759     offset = address.start_block() * address.BlockSize() + kBlockHeaderSize;
760 
761   static_assert(kNumStreams == kKeyFileIndex, "invalid key index");
762   File* key_file = const_cast<EntryImpl*>(this)->GetBackingFile(address,
763                                                                 kKeyFileIndex);
764   if (!key_file)
765     return std::string();
766 
767   // We store a trailing \0 on disk.
768   if (!offset && key_file->GetLength() != static_cast<size_t>(key_len + 1)) {
769     return std::string();
770   }
771 
772   // Do not attempt read up to the expected on-disk '\0' --- which would be
773   // |key_len + 1| bytes total --- as if due to a corrupt file it isn't |key_|
774   // would get its internal nul messed up.
775   key_.resize(key_len);
776   if (!key_file->Read(key_.data(), key_.size(), offset)) {
777     key_.clear();
778   }
779   DCHECK_LE(strlen(key_.data()), static_cast<size_t>(key_len));
780   return key_;
781 }
782 
GetLastUsed() const783 Time EntryImpl::GetLastUsed() const {
784   CacheRankingsBlock* node = const_cast<CacheRankingsBlock*>(&node_);
785   return Time::FromInternalValue(node->Data()->last_used);
786 }
787 
GetLastModified() const788 Time EntryImpl::GetLastModified() const {
789   CacheRankingsBlock* node = const_cast<CacheRankingsBlock*>(&node_);
790   return Time::FromInternalValue(node->Data()->last_modified);
791 }
792 
GetDataSize(int index) const793 int32_t EntryImpl::GetDataSize(int index) const {
794   if (index < 0 || index >= kNumStreams)
795     return 0;
796 
797   CacheEntryBlock* entry = const_cast<CacheEntryBlock*>(&entry_);
798   return entry->Data()->data_size[index];
799 }
800 
ReadData(int index,int offset,IOBuffer * buf,int buf_len,CompletionOnceCallback callback)801 int EntryImpl::ReadData(int index,
802                         int offset,
803                         IOBuffer* buf,
804                         int buf_len,
805                         CompletionOnceCallback callback) {
806   if (callback.is_null())
807     return ReadDataImpl(index, offset, buf, buf_len, std::move(callback));
808 
809   DCHECK(node_.Data()->dirty || read_only_);
810   if (index < 0 || index >= kNumStreams)
811     return net::ERR_INVALID_ARGUMENT;
812 
813   int entry_size = entry_.Data()->data_size[index];
814   if (offset >= entry_size || offset < 0 || !buf_len)
815     return 0;
816 
817   if (buf_len < 0)
818     return net::ERR_INVALID_ARGUMENT;
819 
820   if (!background_queue_.get())
821     return net::ERR_UNEXPECTED;
822 
823   background_queue_->ReadData(this, index, offset, buf, buf_len,
824                               std::move(callback));
825   return net::ERR_IO_PENDING;
826 }
827 
WriteData(int index,int offset,IOBuffer * buf,int buf_len,CompletionOnceCallback callback,bool truncate)828 int EntryImpl::WriteData(int index,
829                          int offset,
830                          IOBuffer* buf,
831                          int buf_len,
832                          CompletionOnceCallback callback,
833                          bool truncate) {
834   if (callback.is_null()) {
835     return WriteDataImpl(index, offset, buf, buf_len, std::move(callback),
836                          truncate);
837   }
838 
839   DCHECK(node_.Data()->dirty || read_only_);
840   if (index < 0 || index >= kNumStreams)
841     return net::ERR_INVALID_ARGUMENT;
842 
843   if (offset < 0 || buf_len < 0)
844     return net::ERR_INVALID_ARGUMENT;
845 
846   if (!background_queue_.get())
847     return net::ERR_UNEXPECTED;
848 
849   background_queue_->WriteData(this, index, offset, buf, buf_len, truncate,
850                                std::move(callback));
851   return net::ERR_IO_PENDING;
852 }
853 
ReadSparseData(int64_t offset,IOBuffer * buf,int buf_len,CompletionOnceCallback callback)854 int EntryImpl::ReadSparseData(int64_t offset,
855                               IOBuffer* buf,
856                               int buf_len,
857                               CompletionOnceCallback callback) {
858   if (callback.is_null())
859     return ReadSparseDataImpl(offset, buf, buf_len, std::move(callback));
860 
861   if (!background_queue_.get())
862     return net::ERR_UNEXPECTED;
863 
864   background_queue_->ReadSparseData(this, offset, buf, buf_len,
865                                     std::move(callback));
866   return net::ERR_IO_PENDING;
867 }
868 
WriteSparseData(int64_t offset,IOBuffer * buf,int buf_len,CompletionOnceCallback callback)869 int EntryImpl::WriteSparseData(int64_t offset,
870                                IOBuffer* buf,
871                                int buf_len,
872                                CompletionOnceCallback callback) {
873   if (callback.is_null())
874     return WriteSparseDataImpl(offset, buf, buf_len, std::move(callback));
875 
876   if (!background_queue_.get())
877     return net::ERR_UNEXPECTED;
878 
879   background_queue_->WriteSparseData(this, offset, buf, buf_len,
880                                      std::move(callback));
881   return net::ERR_IO_PENDING;
882 }
883 
GetAvailableRange(int64_t offset,int len,RangeResultCallback callback)884 RangeResult EntryImpl::GetAvailableRange(int64_t offset,
885                                          int len,
886                                          RangeResultCallback callback) {
887   if (!background_queue_.get())
888     return RangeResult(net::ERR_UNEXPECTED);
889 
890   background_queue_->GetAvailableRange(this, offset, len, std::move(callback));
891   return RangeResult(net::ERR_IO_PENDING);
892 }
893 
CouldBeSparse() const894 bool EntryImpl::CouldBeSparse() const {
895   if (sparse_.get())
896     return true;
897 
898   auto sparse = std::make_unique<SparseControl>(const_cast<EntryImpl*>(this));
899   return sparse->CouldBeSparse();
900 }
901 
CancelSparseIO()902 void EntryImpl::CancelSparseIO() {
903   if (background_queue_.get())
904     background_queue_->CancelSparseIO(this);
905 }
906 
ReadyForSparseIO(CompletionOnceCallback callback)907 net::Error EntryImpl::ReadyForSparseIO(CompletionOnceCallback callback) {
908   if (!sparse_.get())
909     return net::OK;
910 
911   if (!background_queue_.get())
912     return net::ERR_UNEXPECTED;
913 
914   background_queue_->ReadyForSparseIO(this, std::move(callback));
915   return net::ERR_IO_PENDING;
916 }
917 
SetLastUsedTimeForTest(base::Time time)918 void EntryImpl::SetLastUsedTimeForTest(base::Time time) {
919   SetTimes(time, time);
920 }
921 
922 // When an entry is deleted from the cache, we clean up all the data associated
923 // with it for two reasons: to simplify the reuse of the block (we know that any
924 // unused block is filled with zeros), and to simplify the handling of write /
925 // read partial information from an entry (don't have to worry about returning
926 // data related to a previous cache entry because the range was not fully
927 // written before).
~EntryImpl()928 EntryImpl::~EntryImpl() {
929   if (!backend_.get()) {
930     entry_.clear_modified();
931     node_.clear_modified();
932     return;
933   }
934 
935   // Save the sparse info to disk. This will generate IO for this entry and
936   // maybe for a child entry, so it is important to do it before deleting this
937   // entry.
938   sparse_.reset();
939 
940   // Remove this entry from the list of open entries.
941   backend_->OnEntryDestroyBegin(entry_.address());
942 
943   if (doomed_) {
944     DeleteEntryData(true);
945   } else {
946 #if defined(NET_BUILD_STRESS_CACHE)
947     SanityCheck();
948 #endif
949     net_log_.AddEvent(net::NetLogEventType::ENTRY_CLOSE);
950     bool ret = true;
951     for (int index = 0; index < kNumStreams; index++) {
952       if (user_buffers_[index].get()) {
953         ret = Flush(index, 0);
954         if (!ret)
955           LOG(ERROR) << "Failed to save user data";
956       }
957       if (unreported_size_[index]) {
958         backend_->ModifyStorageSize(
959             entry_.Data()->data_size[index] - unreported_size_[index],
960             entry_.Data()->data_size[index]);
961       }
962     }
963 
964     if (!ret) {
965       // There was a failure writing the actual data. Mark the entry as dirty.
966       int current_id = backend_->GetCurrentEntryId();
967       node_.Data()->dirty = current_id == 1 ? -1 : current_id - 1;
968       node_.Store();
969     } else if (node_.HasData() && !dirty_ && node_.Data()->dirty) {
970       node_.Data()->dirty = 0;
971       node_.Store();
972     }
973   }
974 
975   net_log_.EndEvent(net::NetLogEventType::DISK_CACHE_ENTRY_IMPL);
976   backend_->OnEntryDestroyEnd();
977 }
978 
979 // ------------------------------------------------------------------------
980 
InternalReadData(int index,int offset,IOBuffer * buf,int buf_len,CompletionOnceCallback callback)981 int EntryImpl::InternalReadData(int index,
982                                 int offset,
983                                 IOBuffer* buf,
984                                 int buf_len,
985                                 CompletionOnceCallback callback) {
986   DCHECK(node_.Data()->dirty || read_only_);
987   DVLOG(2) << "Read from " << index << " at " << offset << " : " << buf_len;
988   if (index < 0 || index >= kNumStreams)
989     return net::ERR_INVALID_ARGUMENT;
990 
991   int entry_size = entry_.Data()->data_size[index];
992   if (offset >= entry_size || offset < 0 || !buf_len)
993     return 0;
994 
995   if (buf_len < 0)
996     return net::ERR_INVALID_ARGUMENT;
997 
998   if (!backend_.get())
999     return net::ERR_UNEXPECTED;
1000 
1001   int end_offset;
1002   if (!base::CheckAdd(offset, buf_len).AssignIfValid(&end_offset) ||
1003       end_offset > entry_size)
1004     buf_len = entry_size - offset;
1005 
1006   UpdateRank(false);
1007 
1008   backend_->OnEvent(Stats::READ_DATA);
1009   backend_->OnRead(buf_len);
1010 
1011   Addr address(entry_.Data()->data_addr[index]);
1012   int eof = address.is_initialized() ? entry_size : 0;
1013   if (user_buffers_[index].get() &&
1014       user_buffers_[index]->PreRead(eof, offset, &buf_len)) {
1015     // Complete the operation locally.
1016     buf_len = user_buffers_[index]->Read(offset, buf, buf_len);
1017     return buf_len;
1018   }
1019 
1020   address.set_value(entry_.Data()->data_addr[index]);
1021   if (!address.is_initialized()) {
1022     DoomImpl();
1023     return net::ERR_FAILED;
1024   }
1025 
1026   File* file = GetBackingFile(address, index);
1027   if (!file) {
1028     DoomImpl();
1029     LOG(ERROR) << "No file for " << std::hex << address.value();
1030     return net::ERR_FILE_NOT_FOUND;
1031   }
1032 
1033   size_t file_offset = offset;
1034   if (address.is_block_file()) {
1035     DCHECK_LE(offset + buf_len, kMaxBlockSize);
1036     file_offset += address.start_block() * address.BlockSize() +
1037                    kBlockHeaderSize;
1038   }
1039 
1040   SyncCallback* io_callback = nullptr;
1041   bool null_callback = callback.is_null();
1042   if (!null_callback) {
1043     io_callback =
1044         new SyncCallback(base::WrapRefCounted(this), buf, std::move(callback),
1045                          net::NetLogEventType::ENTRY_READ_DATA);
1046   }
1047 
1048   bool completed;
1049   if (!file->Read(buf->data(), buf_len, file_offset, io_callback, &completed)) {
1050     if (io_callback)
1051       io_callback->Discard();
1052     DoomImpl();
1053     return net::ERR_CACHE_READ_FAILURE;
1054   }
1055 
1056   if (io_callback && completed)
1057     io_callback->Discard();
1058 
1059   return (completed || null_callback) ? buf_len : net::ERR_IO_PENDING;
1060 }
1061 
InternalWriteData(int index,int offset,IOBuffer * buf,int buf_len,CompletionOnceCallback callback,bool truncate)1062 int EntryImpl::InternalWriteData(int index,
1063                                  int offset,
1064                                  IOBuffer* buf,
1065                                  int buf_len,
1066                                  CompletionOnceCallback callback,
1067                                  bool truncate) {
1068   DCHECK(node_.Data()->dirty || read_only_);
1069   DVLOG(2) << "Write to " << index << " at " << offset << " : " << buf_len;
1070   if (index < 0 || index >= kNumStreams)
1071     return net::ERR_INVALID_ARGUMENT;
1072 
1073   if (offset < 0 || buf_len < 0)
1074     return net::ERR_INVALID_ARGUMENT;
1075 
1076   if (!backend_.get())
1077     return net::ERR_UNEXPECTED;
1078 
1079   int max_file_size = backend_->MaxFileSize();
1080 
1081   int end_offset;
1082   if (offset > max_file_size || buf_len > max_file_size ||
1083       !base::CheckAdd(offset, buf_len).AssignIfValid(&end_offset) ||
1084       end_offset > max_file_size) {
1085     int size = base::CheckAdd(offset, buf_len)
1086                    .ValueOrDefault(std::numeric_limits<int32_t>::max());
1087     backend_->TooMuchStorageRequested(size);
1088     return net::ERR_FAILED;
1089   }
1090 
1091   // Read the size at this point (it may change inside prepare).
1092   int entry_size = entry_.Data()->data_size[index];
1093   bool extending = entry_size < offset + buf_len;
1094   truncate = truncate && entry_size > offset + buf_len;
1095   if (!PrepareTarget(index, offset, buf_len, truncate))
1096     return net::ERR_FAILED;
1097 
1098   if (extending || truncate)
1099     UpdateSize(index, entry_size, offset + buf_len);
1100 
1101   UpdateRank(true);
1102 
1103   backend_->OnEvent(Stats::WRITE_DATA);
1104   backend_->OnWrite(buf_len);
1105 
1106   if (user_buffers_[index].get()) {
1107     // Complete the operation locally.
1108     user_buffers_[index]->Write(offset, buf, buf_len);
1109     return buf_len;
1110   }
1111 
1112   Addr address(entry_.Data()->data_addr[index]);
1113   if (offset + buf_len == 0) {
1114     if (truncate) {
1115       DCHECK(!address.is_initialized());
1116     }
1117     return 0;
1118   }
1119 
1120   File* file = GetBackingFile(address, index);
1121   if (!file)
1122     return net::ERR_FILE_NOT_FOUND;
1123 
1124   size_t file_offset = offset;
1125   if (address.is_block_file()) {
1126     DCHECK_LE(offset + buf_len, kMaxBlockSize);
1127     file_offset += address.start_block() * address.BlockSize() +
1128                    kBlockHeaderSize;
1129   } else if (truncate || (extending && !buf_len)) {
1130     if (!file->SetLength(offset + buf_len))
1131       return net::ERR_FAILED;
1132   }
1133 
1134   if (!buf_len)
1135     return 0;
1136 
1137   SyncCallback* io_callback = nullptr;
1138   bool null_callback = callback.is_null();
1139   if (!null_callback) {
1140     io_callback = new SyncCallback(this, buf, std::move(callback),
1141                                    net::NetLogEventType::ENTRY_WRITE_DATA);
1142   }
1143 
1144   bool completed;
1145   if (!file->Write(buf->data(), buf_len, file_offset, io_callback,
1146                    &completed)) {
1147     if (io_callback)
1148       io_callback->Discard();
1149     return net::ERR_CACHE_WRITE_FAILURE;
1150   }
1151 
1152   if (io_callback && completed)
1153     io_callback->Discard();
1154 
1155   return (completed || null_callback) ? buf_len : net::ERR_IO_PENDING;
1156 }
1157 
1158 // ------------------------------------------------------------------------
1159 
CreateDataBlock(int index,int size)1160 bool EntryImpl::CreateDataBlock(int index, int size) {
1161   DCHECK(index >= 0 && index < kNumStreams);
1162 
1163   Addr address(entry_.Data()->data_addr[index]);
1164   if (!CreateBlock(size, &address))
1165     return false;
1166 
1167   entry_.Data()->data_addr[index] = address.value();
1168   entry_.Store();
1169   return true;
1170 }
1171 
CreateBlock(int size,Addr * address)1172 bool EntryImpl::CreateBlock(int size, Addr* address) {
1173   DCHECK(!address->is_initialized());
1174   if (!backend_.get())
1175     return false;
1176 
1177   FileType file_type = Addr::RequiredFileType(size);
1178   if (EXTERNAL == file_type) {
1179     if (size > backend_->MaxFileSize())
1180       return false;
1181     if (!backend_->CreateExternalFile(address))
1182       return false;
1183   } else {
1184     int num_blocks = Addr::RequiredBlocks(size, file_type);
1185 
1186     if (!backend_->CreateBlock(file_type, num_blocks, address))
1187       return false;
1188   }
1189   return true;
1190 }
1191 
1192 // Note that this method may end up modifying a block file so upon return the
1193 // involved block will be free, and could be reused for something else. If there
1194 // is a crash after that point (and maybe before returning to the caller), the
1195 // entry will be left dirty... and at some point it will be discarded; it is
1196 // important that the entry doesn't keep a reference to this address, or we'll
1197 // end up deleting the contents of |address| once again.
DeleteData(Addr address,int index)1198 void EntryImpl::DeleteData(Addr address, int index) {
1199   DCHECK(backend_.get());
1200   if (!address.is_initialized())
1201     return;
1202   if (address.is_separate_file()) {
1203     int failure = !base::DeleteFile(backend_->GetFileName(address));
1204     if (failure) {
1205       LOG(ERROR) << "Failed to delete " <<
1206           backend_->GetFileName(address).value() << " from the cache.";
1207     }
1208     if (files_[index].get())
1209       files_[index] = nullptr;  // Releases the object.
1210   } else {
1211     backend_->DeleteBlock(address, true);
1212   }
1213 }
1214 
UpdateRank(bool modified)1215 void EntryImpl::UpdateRank(bool modified) {
1216   if (!backend_.get())
1217     return;
1218 
1219   if (!doomed_) {
1220     // Everything is handled by the backend.
1221     backend_->UpdateRank(this, modified);
1222     return;
1223   }
1224 
1225   Time current = Time::Now();
1226   node_.Data()->last_used = current.ToInternalValue();
1227 
1228   if (modified)
1229     node_.Data()->last_modified = current.ToInternalValue();
1230 }
1231 
GetBackingFile(Addr address,int index)1232 File* EntryImpl::GetBackingFile(Addr address, int index) {
1233   if (!backend_.get())
1234     return nullptr;
1235 
1236   File* file;
1237   if (address.is_separate_file())
1238     file = GetExternalFile(address, index);
1239   else
1240     file = backend_->File(address);
1241   return file;
1242 }
1243 
GetExternalFile(Addr address,int index)1244 File* EntryImpl::GetExternalFile(Addr address, int index) {
1245   DCHECK(index >= 0 && index <= kKeyFileIndex);
1246   if (!files_[index].get()) {
1247     // For a key file, use mixed mode IO.
1248     auto file = base::MakeRefCounted<File>(kKeyFileIndex == index);
1249     if (file->Init(backend_->GetFileName(address)))
1250       files_[index].swap(file);
1251   }
1252   return files_[index].get();
1253 }
1254 
1255 // We keep a memory buffer for everything that ends up stored on a block file
1256 // (because we don't know yet the final data size), and for some of the data
1257 // that end up on external files. This function will initialize that memory
1258 // buffer and / or the files needed to store the data.
1259 //
1260 // In general, a buffer may overlap data already stored on disk, and in that
1261 // case, the contents of the buffer are the most accurate. It may also extend
1262 // the file, but we don't want to read from disk just to keep the buffer up to
1263 // date. This means that as soon as there is a chance to get confused about what
1264 // is the most recent version of some part of a file, we'll flush the buffer and
1265 // reuse it for the new data. Keep in mind that the normal use pattern is quite
1266 // simple (write sequentially from the beginning), so we optimize for handling
1267 // that case.
PrepareTarget(int index,int offset,int buf_len,bool truncate)1268 bool EntryImpl::PrepareTarget(int index, int offset, int buf_len,
1269                               bool truncate) {
1270   if (truncate)
1271     return HandleTruncation(index, offset, buf_len);
1272 
1273   if (!offset && !buf_len)
1274     return true;
1275 
1276   Addr address(entry_.Data()->data_addr[index]);
1277   if (address.is_initialized()) {
1278     if (address.is_block_file() && !MoveToLocalBuffer(index))
1279       return false;
1280 
1281     if (!user_buffers_[index].get() && offset < kMaxBlockSize) {
1282       // We are about to create a buffer for the first 16KB, make sure that we
1283       // preserve existing data.
1284       if (!CopyToLocalBuffer(index))
1285         return false;
1286     }
1287   }
1288 
1289   if (!user_buffers_[index].get())
1290     user_buffers_[index] = std::make_unique<UserBuffer>(backend_.get());
1291 
1292   return PrepareBuffer(index, offset, buf_len);
1293 }
1294 
1295 // We get to this function with some data already stored. If there is a
1296 // truncation that results on data stored internally, we'll explicitly
1297 // handle the case here.
HandleTruncation(int index,int offset,int buf_len)1298 bool EntryImpl::HandleTruncation(int index, int offset, int buf_len) {
1299   Addr address(entry_.Data()->data_addr[index]);
1300 
1301   int current_size = entry_.Data()->data_size[index];
1302   int new_size = offset + buf_len;
1303 
1304   // This is only called when actually truncating the file, not simply when
1305   // truncate = true is passed to WriteData(), which could be growing the file.
1306   DCHECK_LT(new_size, current_size);
1307 
1308   if (new_size == 0) {
1309     // This is by far the most common scenario.
1310     backend_->ModifyStorageSize(current_size - unreported_size_[index], 0);
1311     entry_.Data()->data_addr[index] = 0;
1312     entry_.Data()->data_size[index] = 0;
1313     unreported_size_[index] = 0;
1314     entry_.Store();
1315     DeleteData(address, index);
1316 
1317     user_buffers_[index].reset();
1318     return true;
1319   }
1320 
1321   // We never postpone truncating a file, if there is one, but we may postpone
1322   // telling the backend about the size reduction.
1323   if (user_buffers_[index].get()) {
1324     DCHECK_GE(current_size, user_buffers_[index]->Start());
1325     if (!address.is_initialized()) {
1326       // There is no overlap between the buffer and disk.
1327       if (new_size > user_buffers_[index]->Start()) {
1328         // Truncate our buffer.
1329         DCHECK_LT(new_size, user_buffers_[index]->End());
1330         user_buffers_[index]->Truncate(new_size);
1331 
1332         if (offset < user_buffers_[index]->Start()) {
1333           // Request to write before the current buffer's start, so flush it to
1334           // disk and re-init.
1335           UpdateSize(index, current_size, new_size);
1336           if (!Flush(index, 0))
1337             return false;
1338           return PrepareBuffer(index, offset, buf_len);
1339         } else {
1340           // Can just stick to using the memory buffer.
1341           return true;
1342         }
1343       }
1344 
1345       // Truncated to before the current buffer, so can just discard it.
1346       user_buffers_[index]->Reset();
1347       return PrepareBuffer(index, offset, buf_len);
1348     }
1349 
1350     // There is some overlap or we need to extend the file before the
1351     // truncation.
1352     if (offset > user_buffers_[index]->Start())
1353       user_buffers_[index]->Truncate(new_size);
1354     UpdateSize(index, current_size, new_size);
1355     if (!Flush(index, 0))
1356       return false;
1357     user_buffers_[index].reset();
1358   }
1359 
1360   // We have data somewhere, and it is not in a buffer.
1361   DCHECK(!user_buffers_[index].get());
1362   DCHECK(address.is_initialized());
1363 
1364   if (new_size > kMaxBlockSize)
1365     return true;  // Let the operation go directly to disk.
1366 
1367   return ImportSeparateFile(index, offset + buf_len);
1368 }
1369 
CopyToLocalBuffer(int index)1370 bool EntryImpl::CopyToLocalBuffer(int index) {
1371   Addr address(entry_.Data()->data_addr[index]);
1372   DCHECK(!user_buffers_[index].get());
1373   DCHECK(address.is_initialized());
1374 
1375   int len = std::min(entry_.Data()->data_size[index], kMaxBlockSize);
1376   user_buffers_[index] = std::make_unique<UserBuffer>(backend_.get());
1377   user_buffers_[index]->Write(len, nullptr, 0);
1378 
1379   File* file = GetBackingFile(address, index);
1380   int offset = 0;
1381 
1382   if (address.is_block_file())
1383     offset = address.start_block() * address.BlockSize() + kBlockHeaderSize;
1384 
1385   if (!file || !file->Read(user_buffers_[index]->Data(), len, offset, nullptr,
1386                            nullptr)) {
1387     user_buffers_[index].reset();
1388     return false;
1389   }
1390   return true;
1391 }
1392 
MoveToLocalBuffer(int index)1393 bool EntryImpl::MoveToLocalBuffer(int index) {
1394   if (!CopyToLocalBuffer(index))
1395     return false;
1396 
1397   Addr address(entry_.Data()->data_addr[index]);
1398   entry_.Data()->data_addr[index] = 0;
1399   entry_.Store();
1400   DeleteData(address, index);
1401 
1402   // If we lose this entry we'll see it as zero sized.
1403   int len = entry_.Data()->data_size[index];
1404   backend_->ModifyStorageSize(len - unreported_size_[index], 0);
1405   unreported_size_[index] = len;
1406   return true;
1407 }
1408 
ImportSeparateFile(int index,int new_size)1409 bool EntryImpl::ImportSeparateFile(int index, int new_size) {
1410   if (entry_.Data()->data_size[index] > new_size)
1411     UpdateSize(index, entry_.Data()->data_size[index], new_size);
1412 
1413   return MoveToLocalBuffer(index);
1414 }
1415 
PrepareBuffer(int index,int offset,int buf_len)1416 bool EntryImpl::PrepareBuffer(int index, int offset, int buf_len) {
1417   DCHECK(user_buffers_[index].get());
1418   if ((user_buffers_[index]->End() && offset > user_buffers_[index]->End()) ||
1419       offset > entry_.Data()->data_size[index]) {
1420     // We are about to extend the buffer or the file (with zeros), so make sure
1421     // that we are not overwriting anything.
1422     Addr address(entry_.Data()->data_addr[index]);
1423     if (address.is_initialized() && address.is_separate_file()) {
1424       if (!Flush(index, 0))
1425         return false;
1426       // There is an actual file already, and we don't want to keep track of
1427       // its length so we let this operation go straight to disk.
1428       // The only case when a buffer is allowed to extend the file (as in fill
1429       // with zeros before the start) is when there is no file yet to extend.
1430       user_buffers_[index].reset();
1431       return true;
1432     }
1433   }
1434 
1435   if (!user_buffers_[index]->PreWrite(offset, buf_len)) {
1436     if (!Flush(index, offset + buf_len))
1437       return false;
1438 
1439     // Lets try again.
1440     if (offset > user_buffers_[index]->End() ||
1441         !user_buffers_[index]->PreWrite(offset, buf_len)) {
1442       // We cannot complete the operation with a buffer.
1443       DCHECK(!user_buffers_[index]->Size());
1444       DCHECK(!user_buffers_[index]->Start());
1445       user_buffers_[index].reset();
1446     }
1447   }
1448   return true;
1449 }
1450 
Flush(int index,int min_len)1451 bool EntryImpl::Flush(int index, int min_len) {
1452   Addr address(entry_.Data()->data_addr[index]);
1453   DCHECK(user_buffers_[index].get());
1454   DCHECK(!address.is_initialized() || address.is_separate_file());
1455   DVLOG(3) << "Flush";
1456 
1457   int size = std::max(entry_.Data()->data_size[index], min_len);
1458   if (size && !address.is_initialized() && !CreateDataBlock(index, size))
1459     return false;
1460 
1461   if (!entry_.Data()->data_size[index]) {
1462     DCHECK(!user_buffers_[index]->Size());
1463     return true;
1464   }
1465 
1466   address.set_value(entry_.Data()->data_addr[index]);
1467 
1468   int len = user_buffers_[index]->Size();
1469   int offset = user_buffers_[index]->Start();
1470   if (!len && !offset)
1471     return true;
1472 
1473   if (address.is_block_file()) {
1474     DCHECK_EQ(len, entry_.Data()->data_size[index]);
1475     DCHECK(!offset);
1476     offset = address.start_block() * address.BlockSize() + kBlockHeaderSize;
1477   }
1478 
1479   File* file = GetBackingFile(address, index);
1480   if (!file)
1481     return false;
1482 
1483   if (!file->Write(user_buffers_[index]->Data(), len, offset, nullptr, nullptr))
1484     return false;
1485   user_buffers_[index]->Reset();
1486 
1487   return true;
1488 }
1489 
UpdateSize(int index,int old_size,int new_size)1490 void EntryImpl::UpdateSize(int index, int old_size, int new_size) {
1491   if (entry_.Data()->data_size[index] == new_size)
1492     return;
1493 
1494   unreported_size_[index] += new_size - old_size;
1495   entry_.Data()->data_size[index] = new_size;
1496   entry_.set_modified();
1497 }
1498 
InitSparseData()1499 int EntryImpl::InitSparseData() {
1500   if (sparse_.get())
1501     return net::OK;
1502 
1503   // Use a local variable so that sparse_ never goes from 'valid' to NULL.
1504   auto sparse = std::make_unique<SparseControl>(this);
1505   int result = sparse->Init();
1506   if (net::OK == result)
1507     sparse_.swap(sparse);
1508 
1509   return result;
1510 }
1511 
SetEntryFlags(uint32_t flags)1512 void EntryImpl::SetEntryFlags(uint32_t flags) {
1513   entry_.Data()->flags |= flags;
1514   entry_.set_modified();
1515 }
1516 
GetEntryFlags()1517 uint32_t EntryImpl::GetEntryFlags() {
1518   return entry_.Data()->flags;
1519 }
1520 
GetData(int index,std::unique_ptr<char[]> * buffer,Addr * address)1521 void EntryImpl::GetData(int index,
1522                         std::unique_ptr<char[]>* buffer,
1523                         Addr* address) {
1524   DCHECK(backend_.get());
1525   if (user_buffers_[index].get() && user_buffers_[index]->Size() &&
1526       !user_buffers_[index]->Start()) {
1527     // The data is already in memory, just copy it and we're done.
1528     int data_len = entry_.Data()->data_size[index];
1529     if (data_len <= user_buffers_[index]->Size()) {
1530       DCHECK(!user_buffers_[index]->Start());
1531       *buffer = std::make_unique<char[]>(data_len);
1532       memcpy(buffer->get(), user_buffers_[index]->Data(), data_len);
1533       return;
1534     }
1535   }
1536 
1537   // Bad news: we'd have to read the info from disk so instead we'll just tell
1538   // the caller where to read from.
1539   *buffer = nullptr;
1540   address->set_value(entry_.Data()->data_addr[index]);
1541   if (address->is_initialized()) {
1542     // Prevent us from deleting the block from the backing store.
1543     backend_->ModifyStorageSize(entry_.Data()->data_size[index] -
1544                                     unreported_size_[index], 0);
1545     entry_.Data()->data_addr[index] = 0;
1546     entry_.Data()->data_size[index] = 0;
1547   }
1548 }
1549 
1550 }  // namespace disk_cache
1551