• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2012 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifdef UNSAFE_BUFFERS_BUILD
6 // TODO(crbug.com/40284755): Remove this and spanify to fix the errors.
7 #pragma allow_unsafe_buffers
8 #endif
9 
10 #include "net/disk_cache/memory/mem_entry_impl.h"
11 
12 #include <algorithm>
13 #include <memory>
14 #include <utility>
15 
16 #include "base/check_op.h"
17 #include "base/format_macros.h"
18 #include "base/functional/bind.h"
19 #include "base/metrics/histogram_macros.h"
20 #include "base/numerics/safe_math.h"
21 #include "base/strings/stringprintf.h"
22 #include "base/values.h"
23 #include "net/base/interval.h"
24 #include "net/base/io_buffer.h"
25 #include "net/base/net_errors.h"
26 #include "net/disk_cache/memory/mem_backend_impl.h"
27 #include "net/disk_cache/net_log_parameters.h"
28 #include "net/log/net_log_event_type.h"
29 #include "net/log/net_log_source_type.h"
30 
31 using base::Time;
32 
33 namespace disk_cache {
34 
35 namespace {
36 
37 const int kSparseData = 1;
38 
39 // Maximum size of a child of sparse entry is 2 to the power of this number.
40 const int kMaxChildEntryBits = 12;
41 
42 // Sparse entry children have maximum size of 4KB.
43 const int kMaxChildEntrySize = 1 << kMaxChildEntryBits;
44 
45 // Convert global offset to child index.
ToChildIndex(int64_t offset)46 int64_t ToChildIndex(int64_t offset) {
47   return offset >> kMaxChildEntryBits;
48 }
49 
50 // Convert global offset to offset in child entry.
ToChildOffset(int64_t offset)51 int ToChildOffset(int64_t offset) {
52   return static_cast<int>(offset & (kMaxChildEntrySize - 1));
53 }
54 
55 // Returns a name for a child entry given the base_name of the parent and the
56 // child_id.  This name is only used for logging purposes.
57 // If the entry is called entry_name, child entries will be named something
58 // like Range_entry_name:YYY where YYY is the number of the particular child.
GenerateChildName(const std::string & base_name,int64_t child_id)59 std::string GenerateChildName(const std::string& base_name, int64_t child_id) {
60   return base::StringPrintf("Range_%s:%" PRId64, base_name.c_str(), child_id);
61 }
62 
63 // Returns NetLog parameters for the creation of a MemEntryImpl. A separate
64 // function is needed because child entries don't store their key().
NetLogEntryCreationParams(const MemEntryImpl * entry)65 base::Value::Dict NetLogEntryCreationParams(const MemEntryImpl* entry) {
66   base::Value::Dict dict;
67   std::string key;
68   switch (entry->type()) {
69     case MemEntryImpl::EntryType::kParent:
70       key = entry->key();
71       break;
72     case MemEntryImpl::EntryType::kChild:
73       key = GenerateChildName(entry->parent()->key(), entry->child_id());
74       break;
75   }
76   dict.Set("key", key);
77   dict.Set("created", true);
78   return dict;
79 }
80 
81 }  // namespace
82 
MemEntryImpl(base::WeakPtr<MemBackendImpl> backend,const std::string & key,net::NetLog * net_log)83 MemEntryImpl::MemEntryImpl(base::WeakPtr<MemBackendImpl> backend,
84                            const std::string& key,
85                            net::NetLog* net_log)
86     : MemEntryImpl(backend,
87                    key,
88                    0,        // child_id
89                    nullptr,  // parent
90                    net_log) {
91   Open();
92   // Just creating the entry (without any data) could cause the storage to
93   // grow beyond capacity, but we allow such infractions.
94   backend_->ModifyStorageSize(GetStorageSize());
95 }
96 
MemEntryImpl(base::WeakPtr<MemBackendImpl> backend,int64_t child_id,MemEntryImpl * parent,net::NetLog * net_log)97 MemEntryImpl::MemEntryImpl(base::WeakPtr<MemBackendImpl> backend,
98                            int64_t child_id,
99                            MemEntryImpl* parent,
100                            net::NetLog* net_log)
101     : MemEntryImpl(backend,
102                    std::string(),  // key
103                    child_id,
104                    parent,
105                    net_log) {
106   (*parent_->children_)[child_id] = this;
107 }
108 
Open()109 void MemEntryImpl::Open() {
110   // Only a parent entry can be opened.
111   DCHECK_EQ(EntryType::kParent, type());
112   CHECK_NE(ref_count_, std::numeric_limits<uint32_t>::max());
113   ++ref_count_;
114   DCHECK(!doomed_);
115 }
116 
InUse() const117 bool MemEntryImpl::InUse() const {
118   if (type() == EntryType::kChild)
119     return parent_->InUse();
120 
121   return ref_count_ > 0;
122 }
123 
GetStorageSize() const124 int MemEntryImpl::GetStorageSize() const {
125   int storage_size = static_cast<int32_t>(key_.size());
126   for (const auto& i : data_)
127     storage_size += i.size();
128   return storage_size;
129 }
130 
UpdateStateOnUse(EntryModified modified_enum)131 void MemEntryImpl::UpdateStateOnUse(EntryModified modified_enum) {
132   if (!doomed_ && backend_)
133     backend_->OnEntryUpdated(this);
134 
135   last_used_ = MemBackendImpl::Now(backend_);
136   if (modified_enum == ENTRY_WAS_MODIFIED)
137     last_modified_ = last_used_;
138 }
139 
Doom()140 void MemEntryImpl::Doom() {
141   if (!doomed_) {
142     doomed_ = true;
143     if (backend_)
144       backend_->OnEntryDoomed(this);
145     net_log_.AddEvent(net::NetLogEventType::ENTRY_DOOM);
146   }
147   if (!ref_count_)
148     delete this;
149 }
150 
Close()151 void MemEntryImpl::Close() {
152   DCHECK_EQ(EntryType::kParent, type());
153   CHECK_GT(ref_count_, 0u);
154   --ref_count_;
155   if (ref_count_ == 0 && !doomed_) {
156     // At this point the user is clearly done writing, so make sure there isn't
157     // wastage due to exponential growth of vector for main data stream.
158     Compact();
159     if (children_) {
160       for (const auto& child_info : *children_) {
161         if (child_info.second != this)
162           child_info.second->Compact();
163       }
164     }
165   }
166   if (!ref_count_ && doomed_)
167     delete this;
168 }
169 
GetKey() const170 std::string MemEntryImpl::GetKey() const {
171   // A child entry doesn't have key so this method should not be called.
172   DCHECK_EQ(EntryType::kParent, type());
173   return key_;
174 }
175 
GetLastUsed() const176 Time MemEntryImpl::GetLastUsed() const {
177   return last_used_;
178 }
179 
GetLastModified() const180 Time MemEntryImpl::GetLastModified() const {
181   return last_modified_;
182 }
183 
GetDataSize(int index) const184 int32_t MemEntryImpl::GetDataSize(int index) const {
185   if (index < 0 || index >= kNumStreams)
186     return 0;
187   return data_[index].size();
188 }
189 
ReadData(int index,int offset,IOBuffer * buf,int buf_len,CompletionOnceCallback callback)190 int MemEntryImpl::ReadData(int index,
191                            int offset,
192                            IOBuffer* buf,
193                            int buf_len,
194                            CompletionOnceCallback callback) {
195   if (net_log_.IsCapturing()) {
196     NetLogReadWriteData(net_log_, net::NetLogEventType::ENTRY_READ_DATA,
197                         net::NetLogEventPhase::BEGIN, index, offset, buf_len,
198                         false);
199   }
200 
201   int result = InternalReadData(index, offset, buf, buf_len);
202 
203   if (net_log_.IsCapturing()) {
204     NetLogReadWriteComplete(net_log_, net::NetLogEventType::ENTRY_READ_DATA,
205                             net::NetLogEventPhase::END, result);
206   }
207   return result;
208 }
209 
WriteData(int index,int offset,IOBuffer * buf,int buf_len,CompletionOnceCallback callback,bool truncate)210 int MemEntryImpl::WriteData(int index,
211                             int offset,
212                             IOBuffer* buf,
213                             int buf_len,
214                             CompletionOnceCallback callback,
215                             bool truncate) {
216   if (net_log_.IsCapturing()) {
217     NetLogReadWriteData(net_log_, net::NetLogEventType::ENTRY_WRITE_DATA,
218                         net::NetLogEventPhase::BEGIN, index, offset, buf_len,
219                         truncate);
220   }
221 
222   int result = InternalWriteData(index, offset, buf, buf_len, truncate);
223 
224   if (net_log_.IsCapturing()) {
225     NetLogReadWriteComplete(net_log_, net::NetLogEventType::ENTRY_WRITE_DATA,
226                             net::NetLogEventPhase::END, result);
227   }
228 
229   return result;
230 }
231 
ReadSparseData(int64_t offset,IOBuffer * buf,int buf_len,CompletionOnceCallback callback)232 int MemEntryImpl::ReadSparseData(int64_t offset,
233                                  IOBuffer* buf,
234                                  int buf_len,
235                                  CompletionOnceCallback callback) {
236   if (net_log_.IsCapturing()) {
237     NetLogSparseOperation(net_log_, net::NetLogEventType::SPARSE_READ,
238                           net::NetLogEventPhase::BEGIN, offset, buf_len);
239   }
240   int result = InternalReadSparseData(offset, buf, buf_len);
241   if (net_log_.IsCapturing())
242     net_log_.EndEvent(net::NetLogEventType::SPARSE_READ);
243   return result;
244 }
245 
WriteSparseData(int64_t offset,IOBuffer * buf,int buf_len,CompletionOnceCallback callback)246 int MemEntryImpl::WriteSparseData(int64_t offset,
247                                   IOBuffer* buf,
248                                   int buf_len,
249                                   CompletionOnceCallback callback) {
250   if (net_log_.IsCapturing()) {
251     NetLogSparseOperation(net_log_, net::NetLogEventType::SPARSE_WRITE,
252                           net::NetLogEventPhase::BEGIN, offset, buf_len);
253   }
254   int result = InternalWriteSparseData(offset, buf, buf_len);
255   if (net_log_.IsCapturing())
256     net_log_.EndEvent(net::NetLogEventType::SPARSE_WRITE);
257   return result;
258 }
259 
GetAvailableRange(int64_t offset,int len,RangeResultCallback callback)260 RangeResult MemEntryImpl::GetAvailableRange(int64_t offset,
261                                             int len,
262                                             RangeResultCallback callback) {
263   if (net_log_.IsCapturing()) {
264     NetLogSparseOperation(net_log_, net::NetLogEventType::SPARSE_GET_RANGE,
265                           net::NetLogEventPhase::BEGIN, offset, len);
266   }
267   RangeResult result = InternalGetAvailableRange(offset, len);
268   if (net_log_.IsCapturing()) {
269     net_log_.EndEvent(net::NetLogEventType::SPARSE_GET_RANGE, [&] {
270       return CreateNetLogGetAvailableRangeResultParams(result);
271     });
272   }
273   return result;
274 }
275 
CouldBeSparse() const276 bool MemEntryImpl::CouldBeSparse() const {
277   DCHECK_EQ(EntryType::kParent, type());
278   return (children_.get() != nullptr);
279 }
280 
ReadyForSparseIO(CompletionOnceCallback callback)281 net::Error MemEntryImpl::ReadyForSparseIO(CompletionOnceCallback callback) {
282   return net::OK;
283 }
284 
SetLastUsedTimeForTest(base::Time time)285 void MemEntryImpl::SetLastUsedTimeForTest(base::Time time) {
286   last_used_ = time;
287 }
288 
289 // ------------------------------------------------------------------------
290 
MemEntryImpl(base::WeakPtr<MemBackendImpl> backend,const::std::string & key,int64_t child_id,MemEntryImpl * parent,net::NetLog * net_log)291 MemEntryImpl::MemEntryImpl(base::WeakPtr<MemBackendImpl> backend,
292                            const ::std::string& key,
293                            int64_t child_id,
294                            MemEntryImpl* parent,
295                            net::NetLog* net_log)
296     : key_(key),
297       child_id_(child_id),
298       parent_(parent),
299       last_modified_(MemBackendImpl::Now(backend)),
300       last_used_(last_modified_),
301       backend_(backend) {
302   backend_->OnEntryInserted(this);
303   net_log_ = net::NetLogWithSource::Make(
304       net_log, net::NetLogSourceType::MEMORY_CACHE_ENTRY);
305   net_log_.BeginEvent(net::NetLogEventType::DISK_CACHE_MEM_ENTRY_IMPL,
306                       [&] { return NetLogEntryCreationParams(this); });
307 }
308 
~MemEntryImpl()309 MemEntryImpl::~MemEntryImpl() {
310   if (backend_)
311     backend_->ModifyStorageSize(-GetStorageSize());
312 
313   if (type() == EntryType::kParent) {
314     if (children_) {
315       EntryMap children;
316       children_->swap(children);
317 
318       for (auto& it : children) {
319         // Since |this| is stored in the map, it should be guarded against
320         // double dooming, which will result in double destruction.
321         if (it.second != this)
322           it.second->Doom();
323       }
324     }
325   } else {
326     parent_->children_->erase(child_id_);
327   }
328   net_log_.EndEvent(net::NetLogEventType::DISK_CACHE_MEM_ENTRY_IMPL);
329 }
330 
InternalReadData(int index,int offset,IOBuffer * buf,int buf_len)331 int MemEntryImpl::InternalReadData(int index, int offset, IOBuffer* buf,
332                                    int buf_len) {
333   DCHECK(type() == EntryType::kParent || index == kSparseData);
334 
335   if (index < 0 || index >= kNumStreams || buf_len < 0)
336     return net::ERR_INVALID_ARGUMENT;
337 
338   int entry_size = data_[index].size();
339   if (offset >= entry_size || offset < 0 || !buf_len)
340     return 0;
341 
342   int end_offset;
343   if (!base::CheckAdd(offset, buf_len).AssignIfValid(&end_offset) ||
344       end_offset > entry_size)
345     buf_len = entry_size - offset;
346 
347   UpdateStateOnUse(ENTRY_WAS_NOT_MODIFIED);
348   std::copy(data_[index].begin() + offset,
349             data_[index].begin() + offset + buf_len, buf->data());
350   return buf_len;
351 }
352 
InternalWriteData(int index,int offset,IOBuffer * buf,int buf_len,bool truncate)353 int MemEntryImpl::InternalWriteData(int index, int offset, IOBuffer* buf,
354                                     int buf_len, bool truncate) {
355   DCHECK(type() == EntryType::kParent || index == kSparseData);
356   if (!backend_)
357     return net::ERR_INSUFFICIENT_RESOURCES;
358 
359   if (index < 0 || index >= kNumStreams)
360     return net::ERR_INVALID_ARGUMENT;
361 
362   if (offset < 0 || buf_len < 0)
363     return net::ERR_INVALID_ARGUMENT;
364 
365   const int max_file_size = backend_->MaxFileSize();
366 
367   int end_offset;
368   if (offset > max_file_size || buf_len > max_file_size ||
369       !base::CheckAdd(offset, buf_len).AssignIfValid(&end_offset) ||
370       end_offset > max_file_size) {
371     return net::ERR_FAILED;
372   }
373 
374   std::vector<char>& data = data_[index];
375   const int old_data_size = base::checked_cast<int>(data.size());
376 
377   // Overwrite any data that fits inside the existing file.
378   if (offset < old_data_size && buf_len > 0) {
379     const int bytes_to_copy = std::min(old_data_size - offset, buf_len);
380     std::copy(buf->data(), buf->data() + bytes_to_copy, data.begin() + offset);
381   }
382 
383   const int delta = end_offset - old_data_size;
384   if (truncate && delta < 0) {
385     // We permit reducing the size even if the storage size has been exceeded,
386     // since it can only improve the situation. See https://crbug.com/331839344.
387     backend_->ModifyStorageSize(delta);
388     data.resize(end_offset);
389   } else if (delta > 0) {
390     backend_->ModifyStorageSize(delta);
391     if (backend_->HasExceededStorageSize()) {
392       backend_->ModifyStorageSize(-delta);
393       return net::ERR_INSUFFICIENT_RESOURCES;
394     }
395 
396     // Zero fill any hole.
397     int current_size = old_data_size;
398     if (current_size < offset) {
399       data.resize(offset);
400       current_size = offset;
401     }
402     // Append any data after the old end of the file.
403     if (end_offset > current_size) {
404       data.insert(data.end(), buf->data() + current_size - offset,
405                   buf->data() + buf_len);
406     }
407   }
408 
409   UpdateStateOnUse(ENTRY_WAS_MODIFIED);
410 
411   return buf_len;
412 }
413 
InternalReadSparseData(int64_t offset,IOBuffer * buf,int buf_len)414 int MemEntryImpl::InternalReadSparseData(int64_t offset,
415                                          IOBuffer* buf,
416                                          int buf_len) {
417   DCHECK_EQ(EntryType::kParent, type());
418 
419   if (!InitSparseInfo())
420     return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
421 
422   if (offset < 0 || buf_len < 0)
423     return net::ERR_INVALID_ARGUMENT;
424 
425   // Ensure that offset + buf_len does not overflow. This ensures that
426   // offset + io_buf->BytesConsumed() never overflows below.
427   // The result of std::min is guaranteed to fit into int since buf_len did.
428   buf_len = std::min(static_cast<int64_t>(buf_len),
429                      std::numeric_limits<int64_t>::max() - offset);
430 
431   // We will keep using this buffer and adjust the offset in this buffer.
432   scoped_refptr<net::DrainableIOBuffer> io_buf =
433       base::MakeRefCounted<net::DrainableIOBuffer>(buf, buf_len);
434 
435   // Iterate until we have read enough.
436   while (io_buf->BytesRemaining()) {
437     MemEntryImpl* child = GetChild(offset + io_buf->BytesConsumed(), false);
438 
439     // No child present for that offset.
440     if (!child)
441       break;
442 
443     // We then need to prepare the child offset and len.
444     int child_offset = ToChildOffset(offset + io_buf->BytesConsumed());
445 
446     // If we are trying to read from a position that the child entry has no data
447     // we should stop.
448     if (child_offset < child->child_first_pos_)
449       break;
450     if (net_log_.IsCapturing()) {
451       NetLogSparseReadWrite(net_log_,
452                             net::NetLogEventType::SPARSE_READ_CHILD_DATA,
453                             net::NetLogEventPhase::BEGIN,
454                             child->net_log_.source(), io_buf->BytesRemaining());
455     }
456     int ret =
457         child->ReadData(kSparseData, child_offset, io_buf.get(),
458                         io_buf->BytesRemaining(), CompletionOnceCallback());
459     if (net_log_.IsCapturing()) {
460       net_log_.EndEventWithNetErrorCode(
461           net::NetLogEventType::SPARSE_READ_CHILD_DATA, ret);
462     }
463 
464     // If we encounter an error in one entry, return immediately.
465     if (ret < 0)
466       return ret;
467     else if (ret == 0)
468       break;
469 
470     // Increment the counter by number of bytes read in the child entry.
471     io_buf->DidConsume(ret);
472   }
473 
474   UpdateStateOnUse(ENTRY_WAS_NOT_MODIFIED);
475   return io_buf->BytesConsumed();
476 }
477 
InternalWriteSparseData(int64_t offset,IOBuffer * buf,int buf_len)478 int MemEntryImpl::InternalWriteSparseData(int64_t offset,
479                                           IOBuffer* buf,
480                                           int buf_len) {
481   DCHECK_EQ(EntryType::kParent, type());
482 
483   if (!InitSparseInfo())
484     return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
485 
486   // We can't generally do this without the backend since we need it to create
487   // child entries.
488   if (!backend_)
489     return net::ERR_FAILED;
490 
491   // Check that offset + buf_len does not overflow. This ensures that
492   // offset + io_buf->BytesConsumed() never overflows below.
493   if (offset < 0 || buf_len < 0 || !base::CheckAdd(offset, buf_len).IsValid())
494     return net::ERR_INVALID_ARGUMENT;
495 
496   scoped_refptr<net::DrainableIOBuffer> io_buf =
497       base::MakeRefCounted<net::DrainableIOBuffer>(buf, buf_len);
498 
499   // This loop walks through child entries continuously starting from |offset|
500   // and writes blocks of data (of maximum size kMaxChildEntrySize) into each
501   // child entry until all |buf_len| bytes are written. The write operation can
502   // start in the middle of an entry.
503   while (io_buf->BytesRemaining()) {
504     MemEntryImpl* child = GetChild(offset + io_buf->BytesConsumed(), true);
505     int child_offset = ToChildOffset(offset + io_buf->BytesConsumed());
506 
507     // Find the right amount to write, this evaluates the remaining bytes to
508     // write and remaining capacity of this child entry.
509     int write_len =
510         std::min(io_buf->BytesRemaining(), kMaxChildEntrySize - child_offset);
511 
512     // Keep a record of the last byte position (exclusive) in the child.
513     int data_size = child->GetDataSize(kSparseData);
514 
515     if (net_log_.IsCapturing()) {
516       NetLogSparseReadWrite(
517           net_log_, net::NetLogEventType::SPARSE_WRITE_CHILD_DATA,
518           net::NetLogEventPhase::BEGIN, child->net_log_.source(), write_len);
519     }
520 
521     // Always writes to the child entry. This operation may overwrite data
522     // previously written.
523     // TODO(hclam): if there is data in the entry and this write is not
524     // continuous we may want to discard this write.
525     int ret = child->WriteData(kSparseData, child_offset, io_buf.get(),
526                                write_len, CompletionOnceCallback(), true);
527     if (net_log_.IsCapturing()) {
528       net_log_.EndEventWithNetErrorCode(
529           net::NetLogEventType::SPARSE_WRITE_CHILD_DATA, ret);
530     }
531     if (ret < 0)
532       return ret;
533     else if (ret == 0)
534       break;
535 
536     // Keep a record of the first byte position in the child if the write was
537     // not aligned nor continuous. This is to enable witting to the middle
538     // of an entry and still keep track of data off the aligned edge.
539     if (data_size != child_offset)
540       child->child_first_pos_ = child_offset;
541 
542     // Adjust the offset in the IO buffer.
543     io_buf->DidConsume(ret);
544   }
545 
546   UpdateStateOnUse(ENTRY_WAS_MODIFIED);
547   return io_buf->BytesConsumed();
548 }
549 
InternalGetAvailableRange(int64_t offset,int len)550 RangeResult MemEntryImpl::InternalGetAvailableRange(int64_t offset, int len) {
551   DCHECK_EQ(EntryType::kParent, type());
552 
553   if (!InitSparseInfo())
554     return RangeResult(net::ERR_CACHE_OPERATION_NOT_SUPPORTED);
555 
556   if (offset < 0 || len < 0)
557     return RangeResult(net::ERR_INVALID_ARGUMENT);
558 
559   // Truncate |len| to make sure that |offset + len| does not overflow.
560   // This is OK since one can't write that far anyway.
561   // The result of std::min is guaranteed to fit into int since |len| did.
562   len = std::min(static_cast<int64_t>(len),
563                  std::numeric_limits<int64_t>::max() - offset);
564 
565   net::Interval<int64_t> requested(offset, offset + len);
566 
567   // Find the first relevant child, if any --- may have to skip over
568   // one entry as it may be before the range (consider, for example,
569   // if the request is for [2048, 10000), while [0, 1024) is a valid range
570   // for the entry).
571   EntryMap::const_iterator i = children_->lower_bound(ToChildIndex(offset));
572   if (i != children_->cend() && !ChildInterval(i).Intersects(requested))
573     ++i;
574   net::Interval<int64_t> found;
575   if (i != children_->cend() &&
576       requested.Intersects(ChildInterval(i), &found)) {
577     // Found something relevant; now just need to expand this out if next
578     // children are contiguous and relevant to the request.
579     while (true) {
580       ++i;
581       net::Interval<int64_t> relevant_in_next_child;
582       if (i == children_->cend() ||
583           !requested.Intersects(ChildInterval(i), &relevant_in_next_child) ||
584           relevant_in_next_child.min() != found.max()) {
585         break;
586       }
587 
588       found.SpanningUnion(relevant_in_next_child);
589     }
590 
591     return RangeResult(found.min(), found.Length());
592   }
593 
594   return RangeResult(offset, 0);
595 }
596 
InitSparseInfo()597 bool MemEntryImpl::InitSparseInfo() {
598   DCHECK_EQ(EntryType::kParent, type());
599 
600   if (!children_) {
601     // If we already have some data in sparse stream but we are being
602     // initialized as a sparse entry, we should fail.
603     if (GetDataSize(kSparseData))
604       return false;
605     children_ = std::make_unique<EntryMap>();
606 
607     // The parent entry stores data for the first block, so save this object to
608     // index 0.
609     (*children_)[0] = this;
610   }
611   return true;
612 }
613 
GetChild(int64_t offset,bool create)614 MemEntryImpl* MemEntryImpl::GetChild(int64_t offset, bool create) {
615   DCHECK_EQ(EntryType::kParent, type());
616   int64_t index = ToChildIndex(offset);
617   auto i = children_->find(index);
618   if (i != children_->end())
619     return i->second;
620   if (create)
621     return new MemEntryImpl(backend_, index, this, net_log_.net_log());
622   return nullptr;
623 }
624 
ChildInterval(MemEntryImpl::EntryMap::const_iterator i)625 net::Interval<int64_t> MemEntryImpl::ChildInterval(
626     MemEntryImpl::EntryMap::const_iterator i) {
627   DCHECK(i != children_->cend());
628   const MemEntryImpl* child = i->second;
629   // The valid range in child is [child_first_pos_, DataSize), since the child
630   // entry ops just use standard disk_cache::Entry API, so DataSize is
631   // not aware of any hole in the beginning.
632   int64_t child_responsibility_start = (i->first) * kMaxChildEntrySize;
633   return net::Interval<int64_t>(
634       child_responsibility_start + child->child_first_pos_,
635       child_responsibility_start + child->GetDataSize(kSparseData));
636 }
637 
Compact()638 void MemEntryImpl::Compact() {
639   // Stream 0 should already be fine since it's written out in a single WriteData().
640   data_[1].shrink_to_fit();
641   data_[2].shrink_to_fit();
642 }
643 
644 }  // namespace disk_cache
645