1 // Copyright 2012 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "net/disk_cache/memory/mem_entry_impl.h"
6
7 #include <algorithm>
8 #include <memory>
9 #include <utility>
10
11 #include "base/check_op.h"
12 #include "base/format_macros.h"
13 #include "base/functional/bind.h"
14 #include "base/metrics/histogram_macros.h"
15 #include "base/numerics/safe_math.h"
16 #include "base/strings/stringprintf.h"
17 #include "base/values.h"
18 #include "net/base/interval.h"
19 #include "net/base/io_buffer.h"
20 #include "net/base/net_errors.h"
21 #include "net/disk_cache/memory/mem_backend_impl.h"
22 #include "net/disk_cache/net_log_parameters.h"
23 #include "net/log/net_log_event_type.h"
24 #include "net/log/net_log_source_type.h"
25
26 using base::Time;
27
28 namespace disk_cache {
29
30 namespace {
31
32 const int kSparseData = 1;
33
34 // Maximum size of a child of sparse entry is 2 to the power of this number.
35 const int kMaxChildEntryBits = 12;
36
37 // Sparse entry children have maximum size of 4KB.
38 const int kMaxChildEntrySize = 1 << kMaxChildEntryBits;
39
40 // Convert global offset to child index.
ToChildIndex(int64_t offset)41 int64_t ToChildIndex(int64_t offset) {
42 return offset >> kMaxChildEntryBits;
43 }
44
45 // Convert global offset to offset in child entry.
ToChildOffset(int64_t offset)46 int ToChildOffset(int64_t offset) {
47 return static_cast<int>(offset & (kMaxChildEntrySize - 1));
48 }
49
50 // Returns a name for a child entry given the base_name of the parent and the
51 // child_id. This name is only used for logging purposes.
52 // If the entry is called entry_name, child entries will be named something
53 // like Range_entry_name:YYY where YYY is the number of the particular child.
GenerateChildName(const std::string & base_name,int64_t child_id)54 std::string GenerateChildName(const std::string& base_name, int64_t child_id) {
55 return base::StringPrintf("Range_%s:%" PRId64, base_name.c_str(), child_id);
56 }
57
58 // Returns NetLog parameters for the creation of a MemEntryImpl. A separate
59 // function is needed because child entries don't store their key().
NetLogEntryCreationParams(const MemEntryImpl * entry)60 base::Value::Dict NetLogEntryCreationParams(const MemEntryImpl* entry) {
61 base::Value::Dict dict;
62 std::string key;
63 switch (entry->type()) {
64 case MemEntryImpl::EntryType::kParent:
65 key = entry->key();
66 break;
67 case MemEntryImpl::EntryType::kChild:
68 key = GenerateChildName(entry->parent()->key(), entry->child_id());
69 break;
70 }
71 dict.Set("key", key);
72 dict.Set("created", true);
73 return dict;
74 }
75
76 } // namespace
77
MemEntryImpl(base::WeakPtr<MemBackendImpl> backend,const std::string & key,net::NetLog * net_log)78 MemEntryImpl::MemEntryImpl(base::WeakPtr<MemBackendImpl> backend,
79 const std::string& key,
80 net::NetLog* net_log)
81 : MemEntryImpl(backend,
82 key,
83 0, // child_id
84 nullptr, // parent
85 net_log) {
86 Open();
87 // Just creating the entry (without any data) could cause the storage to
88 // grow beyond capacity, but we allow such infractions.
89 backend_->ModifyStorageSize(GetStorageSize());
90 }
91
MemEntryImpl(base::WeakPtr<MemBackendImpl> backend,int64_t child_id,MemEntryImpl * parent,net::NetLog * net_log)92 MemEntryImpl::MemEntryImpl(base::WeakPtr<MemBackendImpl> backend,
93 int64_t child_id,
94 MemEntryImpl* parent,
95 net::NetLog* net_log)
96 : MemEntryImpl(backend,
97 std::string(), // key
98 child_id,
99 parent,
100 net_log) {
101 (*parent_->children_)[child_id] = this;
102 }
103
Open()104 void MemEntryImpl::Open() {
105 // Only a parent entry can be opened.
106 DCHECK_EQ(EntryType::kParent, type());
107 CHECK_NE(ref_count_, std::numeric_limits<uint32_t>::max());
108 ++ref_count_;
109 DCHECK(!doomed_);
110 }
111
InUse() const112 bool MemEntryImpl::InUse() const {
113 if (type() == EntryType::kChild)
114 return parent_->InUse();
115
116 return ref_count_ > 0;
117 }
118
GetStorageSize() const119 int MemEntryImpl::GetStorageSize() const {
120 int storage_size = static_cast<int32_t>(key_.size());
121 for (const auto& i : data_)
122 storage_size += i.size();
123 return storage_size;
124 }
125
UpdateStateOnUse(EntryModified modified_enum)126 void MemEntryImpl::UpdateStateOnUse(EntryModified modified_enum) {
127 if (!doomed_ && backend_)
128 backend_->OnEntryUpdated(this);
129
130 last_used_ = MemBackendImpl::Now(backend_);
131 if (modified_enum == ENTRY_WAS_MODIFIED)
132 last_modified_ = last_used_;
133 }
134
Doom()135 void MemEntryImpl::Doom() {
136 if (!doomed_) {
137 doomed_ = true;
138 if (backend_)
139 backend_->OnEntryDoomed(this);
140 net_log_.AddEvent(net::NetLogEventType::ENTRY_DOOM);
141 }
142 if (!ref_count_)
143 delete this;
144 }
145
Close()146 void MemEntryImpl::Close() {
147 DCHECK_EQ(EntryType::kParent, type());
148 CHECK_GT(ref_count_, 0u);
149 --ref_count_;
150 if (ref_count_ == 0 && !doomed_) {
151 // At this point the user is clearly done writing, so make sure there isn't
152 // wastage due to exponential growth of vector for main data stream.
153 Compact();
154 if (children_) {
155 for (const auto& child_info : *children_) {
156 if (child_info.second != this)
157 child_info.second->Compact();
158 }
159 }
160 }
161 if (!ref_count_ && doomed_)
162 delete this;
163 }
164
GetKey() const165 std::string MemEntryImpl::GetKey() const {
166 // A child entry doesn't have key so this method should not be called.
167 DCHECK_EQ(EntryType::kParent, type());
168 return key_;
169 }
170
GetLastUsed() const171 Time MemEntryImpl::GetLastUsed() const {
172 return last_used_;
173 }
174
GetLastModified() const175 Time MemEntryImpl::GetLastModified() const {
176 return last_modified_;
177 }
178
GetDataSize(int index) const179 int32_t MemEntryImpl::GetDataSize(int index) const {
180 if (index < 0 || index >= kNumStreams)
181 return 0;
182 return data_[index].size();
183 }
184
ReadData(int index,int offset,IOBuffer * buf,int buf_len,CompletionOnceCallback callback)185 int MemEntryImpl::ReadData(int index,
186 int offset,
187 IOBuffer* buf,
188 int buf_len,
189 CompletionOnceCallback callback) {
190 if (net_log_.IsCapturing()) {
191 NetLogReadWriteData(net_log_, net::NetLogEventType::ENTRY_READ_DATA,
192 net::NetLogEventPhase::BEGIN, index, offset, buf_len,
193 false);
194 }
195
196 int result = InternalReadData(index, offset, buf, buf_len);
197
198 if (net_log_.IsCapturing()) {
199 NetLogReadWriteComplete(net_log_, net::NetLogEventType::ENTRY_READ_DATA,
200 net::NetLogEventPhase::END, result);
201 }
202 return result;
203 }
204
WriteData(int index,int offset,IOBuffer * buf,int buf_len,CompletionOnceCallback callback,bool truncate)205 int MemEntryImpl::WriteData(int index,
206 int offset,
207 IOBuffer* buf,
208 int buf_len,
209 CompletionOnceCallback callback,
210 bool truncate) {
211 if (net_log_.IsCapturing()) {
212 NetLogReadWriteData(net_log_, net::NetLogEventType::ENTRY_WRITE_DATA,
213 net::NetLogEventPhase::BEGIN, index, offset, buf_len,
214 truncate);
215 }
216
217 int result = InternalWriteData(index, offset, buf, buf_len, truncate);
218
219 if (net_log_.IsCapturing()) {
220 NetLogReadWriteComplete(net_log_, net::NetLogEventType::ENTRY_WRITE_DATA,
221 net::NetLogEventPhase::END, result);
222 }
223
224 return result;
225 }
226
ReadSparseData(int64_t offset,IOBuffer * buf,int buf_len,CompletionOnceCallback callback)227 int MemEntryImpl::ReadSparseData(int64_t offset,
228 IOBuffer* buf,
229 int buf_len,
230 CompletionOnceCallback callback) {
231 if (net_log_.IsCapturing()) {
232 NetLogSparseOperation(net_log_, net::NetLogEventType::SPARSE_READ,
233 net::NetLogEventPhase::BEGIN, offset, buf_len);
234 }
235 int result = InternalReadSparseData(offset, buf, buf_len);
236 if (net_log_.IsCapturing())
237 net_log_.EndEvent(net::NetLogEventType::SPARSE_READ);
238 return result;
239 }
240
WriteSparseData(int64_t offset,IOBuffer * buf,int buf_len,CompletionOnceCallback callback)241 int MemEntryImpl::WriteSparseData(int64_t offset,
242 IOBuffer* buf,
243 int buf_len,
244 CompletionOnceCallback callback) {
245 if (net_log_.IsCapturing()) {
246 NetLogSparseOperation(net_log_, net::NetLogEventType::SPARSE_WRITE,
247 net::NetLogEventPhase::BEGIN, offset, buf_len);
248 }
249 int result = InternalWriteSparseData(offset, buf, buf_len);
250 if (net_log_.IsCapturing())
251 net_log_.EndEvent(net::NetLogEventType::SPARSE_WRITE);
252 return result;
253 }
254
GetAvailableRange(int64_t offset,int len,RangeResultCallback callback)255 RangeResult MemEntryImpl::GetAvailableRange(int64_t offset,
256 int len,
257 RangeResultCallback callback) {
258 if (net_log_.IsCapturing()) {
259 NetLogSparseOperation(net_log_, net::NetLogEventType::SPARSE_GET_RANGE,
260 net::NetLogEventPhase::BEGIN, offset, len);
261 }
262 RangeResult result = InternalGetAvailableRange(offset, len);
263 if (net_log_.IsCapturing()) {
264 net_log_.EndEvent(net::NetLogEventType::SPARSE_GET_RANGE, [&] {
265 return CreateNetLogGetAvailableRangeResultParams(result);
266 });
267 }
268 return result;
269 }
270
CouldBeSparse() const271 bool MemEntryImpl::CouldBeSparse() const {
272 DCHECK_EQ(EntryType::kParent, type());
273 return (children_.get() != nullptr);
274 }
275
ReadyForSparseIO(CompletionOnceCallback callback)276 net::Error MemEntryImpl::ReadyForSparseIO(CompletionOnceCallback callback) {
277 return net::OK;
278 }
279
SetLastUsedTimeForTest(base::Time time)280 void MemEntryImpl::SetLastUsedTimeForTest(base::Time time) {
281 last_used_ = time;
282 }
283
284 // ------------------------------------------------------------------------
285
MemEntryImpl(base::WeakPtr<MemBackendImpl> backend,const::std::string & key,int64_t child_id,MemEntryImpl * parent,net::NetLog * net_log)286 MemEntryImpl::MemEntryImpl(base::WeakPtr<MemBackendImpl> backend,
287 const ::std::string& key,
288 int64_t child_id,
289 MemEntryImpl* parent,
290 net::NetLog* net_log)
291 : key_(key),
292 child_id_(child_id),
293 parent_(parent),
294 last_modified_(MemBackendImpl::Now(backend)),
295 last_used_(last_modified_),
296 backend_(backend) {
297 backend_->OnEntryInserted(this);
298 net_log_ = net::NetLogWithSource::Make(
299 net_log, net::NetLogSourceType::MEMORY_CACHE_ENTRY);
300 net_log_.BeginEvent(net::NetLogEventType::DISK_CACHE_MEM_ENTRY_IMPL,
301 [&] { return NetLogEntryCreationParams(this); });
302 }
303
~MemEntryImpl()304 MemEntryImpl::~MemEntryImpl() {
305 if (backend_)
306 backend_->ModifyStorageSize(-GetStorageSize());
307
308 if (type() == EntryType::kParent) {
309 if (children_) {
310 EntryMap children;
311 children_->swap(children);
312
313 for (auto& it : children) {
314 // Since |this| is stored in the map, it should be guarded against
315 // double dooming, which will result in double destruction.
316 if (it.second != this)
317 it.second->Doom();
318 }
319 }
320 } else {
321 parent_->children_->erase(child_id_);
322 }
323 net_log_.EndEvent(net::NetLogEventType::DISK_CACHE_MEM_ENTRY_IMPL);
324 }
325
InternalReadData(int index,int offset,IOBuffer * buf,int buf_len)326 int MemEntryImpl::InternalReadData(int index, int offset, IOBuffer* buf,
327 int buf_len) {
328 DCHECK(type() == EntryType::kParent || index == kSparseData);
329
330 if (index < 0 || index >= kNumStreams || buf_len < 0)
331 return net::ERR_INVALID_ARGUMENT;
332
333 int entry_size = data_[index].size();
334 if (offset >= entry_size || offset < 0 || !buf_len)
335 return 0;
336
337 int end_offset;
338 if (!base::CheckAdd(offset, buf_len).AssignIfValid(&end_offset) ||
339 end_offset > entry_size)
340 buf_len = entry_size - offset;
341
342 UpdateStateOnUse(ENTRY_WAS_NOT_MODIFIED);
343 std::copy(data_[index].begin() + offset,
344 data_[index].begin() + offset + buf_len, buf->data());
345 return buf_len;
346 }
347
InternalWriteData(int index,int offset,IOBuffer * buf,int buf_len,bool truncate)348 int MemEntryImpl::InternalWriteData(int index, int offset, IOBuffer* buf,
349 int buf_len, bool truncate) {
350 DCHECK(type() == EntryType::kParent || index == kSparseData);
351 if (!backend_)
352 return net::ERR_INSUFFICIENT_RESOURCES;
353
354 if (index < 0 || index >= kNumStreams)
355 return net::ERR_INVALID_ARGUMENT;
356
357 if (offset < 0 || buf_len < 0)
358 return net::ERR_INVALID_ARGUMENT;
359
360 int max_file_size = backend_->MaxFileSize();
361
362 int end_offset;
363 if (offset > max_file_size || buf_len > max_file_size ||
364 !base::CheckAdd(offset, buf_len).AssignIfValid(&end_offset) ||
365 end_offset > max_file_size) {
366 return net::ERR_FAILED;
367 }
368
369 int old_data_size = data_[index].size();
370 if (truncate || old_data_size < end_offset) {
371 int delta = end_offset - old_data_size;
372 backend_->ModifyStorageSize(delta);
373 if (backend_->HasExceededStorageSize()) {
374 backend_->ModifyStorageSize(-delta);
375 return net::ERR_INSUFFICIENT_RESOURCES;
376 }
377
378 data_[index].resize(end_offset);
379
380 // Zero fill any hole.
381 if (old_data_size < offset) {
382 std::fill(data_[index].begin() + old_data_size,
383 data_[index].begin() + offset, 0);
384 }
385 }
386
387 UpdateStateOnUse(ENTRY_WAS_MODIFIED);
388
389 if (!buf_len)
390 return 0;
391
392 std::copy(buf->data(), buf->data() + buf_len, data_[index].begin() + offset);
393 return buf_len;
394 }
395
InternalReadSparseData(int64_t offset,IOBuffer * buf,int buf_len)396 int MemEntryImpl::InternalReadSparseData(int64_t offset,
397 IOBuffer* buf,
398 int buf_len) {
399 DCHECK_EQ(EntryType::kParent, type());
400
401 if (!InitSparseInfo())
402 return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
403
404 if (offset < 0 || buf_len < 0)
405 return net::ERR_INVALID_ARGUMENT;
406
407 // Ensure that offset + buf_len does not overflow. This ensures that
408 // offset + io_buf->BytesConsumed() never overflows below.
409 // The result of std::min is guaranteed to fit into int since buf_len did.
410 buf_len = std::min(static_cast<int64_t>(buf_len),
411 std::numeric_limits<int64_t>::max() - offset);
412
413 // We will keep using this buffer and adjust the offset in this buffer.
414 scoped_refptr<net::DrainableIOBuffer> io_buf =
415 base::MakeRefCounted<net::DrainableIOBuffer>(buf, buf_len);
416
417 // Iterate until we have read enough.
418 while (io_buf->BytesRemaining()) {
419 MemEntryImpl* child = GetChild(offset + io_buf->BytesConsumed(), false);
420
421 // No child present for that offset.
422 if (!child)
423 break;
424
425 // We then need to prepare the child offset and len.
426 int child_offset = ToChildOffset(offset + io_buf->BytesConsumed());
427
428 // If we are trying to read from a position that the child entry has no data
429 // we should stop.
430 if (child_offset < child->child_first_pos_)
431 break;
432 if (net_log_.IsCapturing()) {
433 NetLogSparseReadWrite(net_log_,
434 net::NetLogEventType::SPARSE_READ_CHILD_DATA,
435 net::NetLogEventPhase::BEGIN,
436 child->net_log_.source(), io_buf->BytesRemaining());
437 }
438 int ret =
439 child->ReadData(kSparseData, child_offset, io_buf.get(),
440 io_buf->BytesRemaining(), CompletionOnceCallback());
441 if (net_log_.IsCapturing()) {
442 net_log_.EndEventWithNetErrorCode(
443 net::NetLogEventType::SPARSE_READ_CHILD_DATA, ret);
444 }
445
446 // If we encounter an error in one entry, return immediately.
447 if (ret < 0)
448 return ret;
449 else if (ret == 0)
450 break;
451
452 // Increment the counter by number of bytes read in the child entry.
453 io_buf->DidConsume(ret);
454 }
455
456 UpdateStateOnUse(ENTRY_WAS_NOT_MODIFIED);
457 return io_buf->BytesConsumed();
458 }
459
InternalWriteSparseData(int64_t offset,IOBuffer * buf,int buf_len)460 int MemEntryImpl::InternalWriteSparseData(int64_t offset,
461 IOBuffer* buf,
462 int buf_len) {
463 DCHECK_EQ(EntryType::kParent, type());
464
465 if (!InitSparseInfo())
466 return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
467
468 // We can't generally do this without the backend since we need it to create
469 // child entries.
470 if (!backend_)
471 return net::ERR_FAILED;
472
473 // Check that offset + buf_len does not overflow. This ensures that
474 // offset + io_buf->BytesConsumed() never overflows below.
475 if (offset < 0 || buf_len < 0 || !base::CheckAdd(offset, buf_len).IsValid())
476 return net::ERR_INVALID_ARGUMENT;
477
478 scoped_refptr<net::DrainableIOBuffer> io_buf =
479 base::MakeRefCounted<net::DrainableIOBuffer>(buf, buf_len);
480
481 // This loop walks through child entries continuously starting from |offset|
482 // and writes blocks of data (of maximum size kMaxChildEntrySize) into each
483 // child entry until all |buf_len| bytes are written. The write operation can
484 // start in the middle of an entry.
485 while (io_buf->BytesRemaining()) {
486 MemEntryImpl* child = GetChild(offset + io_buf->BytesConsumed(), true);
487 int child_offset = ToChildOffset(offset + io_buf->BytesConsumed());
488
489 // Find the right amount to write, this evaluates the remaining bytes to
490 // write and remaining capacity of this child entry.
491 int write_len =
492 std::min(io_buf->BytesRemaining(), kMaxChildEntrySize - child_offset);
493
494 // Keep a record of the last byte position (exclusive) in the child.
495 int data_size = child->GetDataSize(kSparseData);
496
497 if (net_log_.IsCapturing()) {
498 NetLogSparseReadWrite(
499 net_log_, net::NetLogEventType::SPARSE_WRITE_CHILD_DATA,
500 net::NetLogEventPhase::BEGIN, child->net_log_.source(), write_len);
501 }
502
503 // Always writes to the child entry. This operation may overwrite data
504 // previously written.
505 // TODO(hclam): if there is data in the entry and this write is not
506 // continuous we may want to discard this write.
507 int ret = child->WriteData(kSparseData, child_offset, io_buf.get(),
508 write_len, CompletionOnceCallback(), true);
509 if (net_log_.IsCapturing()) {
510 net_log_.EndEventWithNetErrorCode(
511 net::NetLogEventType::SPARSE_WRITE_CHILD_DATA, ret);
512 }
513 if (ret < 0)
514 return ret;
515 else if (ret == 0)
516 break;
517
518 // Keep a record of the first byte position in the child if the write was
519 // not aligned nor continuous. This is to enable witting to the middle
520 // of an entry and still keep track of data off the aligned edge.
521 if (data_size != child_offset)
522 child->child_first_pos_ = child_offset;
523
524 // Adjust the offset in the IO buffer.
525 io_buf->DidConsume(ret);
526 }
527
528 UpdateStateOnUse(ENTRY_WAS_MODIFIED);
529 return io_buf->BytesConsumed();
530 }
531
InternalGetAvailableRange(int64_t offset,int len)532 RangeResult MemEntryImpl::InternalGetAvailableRange(int64_t offset, int len) {
533 DCHECK_EQ(EntryType::kParent, type());
534
535 if (!InitSparseInfo())
536 return RangeResult(net::ERR_CACHE_OPERATION_NOT_SUPPORTED);
537
538 if (offset < 0 || len < 0)
539 return RangeResult(net::ERR_INVALID_ARGUMENT);
540
541 // Truncate |len| to make sure that |offset + len| does not overflow.
542 // This is OK since one can't write that far anyway.
543 // The result of std::min is guaranteed to fit into int since |len| did.
544 len = std::min(static_cast<int64_t>(len),
545 std::numeric_limits<int64_t>::max() - offset);
546
547 net::Interval<int64_t> requested(offset, offset + len);
548
549 // Find the first relevant child, if any --- may have to skip over
550 // one entry as it may be before the range (consider, for example,
551 // if the request is for [2048, 10000), while [0, 1024) is a valid range
552 // for the entry).
553 EntryMap::const_iterator i = children_->lower_bound(ToChildIndex(offset));
554 if (i != children_->cend() && !ChildInterval(i).Intersects(requested))
555 ++i;
556 net::Interval<int64_t> found;
557 if (i != children_->cend() &&
558 requested.Intersects(ChildInterval(i), &found)) {
559 // Found something relevant; now just need to expand this out if next
560 // children are contiguous and relevant to the request.
561 while (true) {
562 ++i;
563 net::Interval<int64_t> relevant_in_next_child;
564 if (i == children_->cend() ||
565 !requested.Intersects(ChildInterval(i), &relevant_in_next_child) ||
566 relevant_in_next_child.min() != found.max()) {
567 break;
568 }
569
570 found.SpanningUnion(relevant_in_next_child);
571 }
572
573 return RangeResult(found.min(), found.Length());
574 }
575
576 return RangeResult(offset, 0);
577 }
578
InitSparseInfo()579 bool MemEntryImpl::InitSparseInfo() {
580 DCHECK_EQ(EntryType::kParent, type());
581
582 if (!children_) {
583 // If we already have some data in sparse stream but we are being
584 // initialized as a sparse entry, we should fail.
585 if (GetDataSize(kSparseData))
586 return false;
587 children_ = std::make_unique<EntryMap>();
588
589 // The parent entry stores data for the first block, so save this object to
590 // index 0.
591 (*children_)[0] = this;
592 }
593 return true;
594 }
595
GetChild(int64_t offset,bool create)596 MemEntryImpl* MemEntryImpl::GetChild(int64_t offset, bool create) {
597 DCHECK_EQ(EntryType::kParent, type());
598 int64_t index = ToChildIndex(offset);
599 auto i = children_->find(index);
600 if (i != children_->end())
601 return i->second;
602 if (create)
603 return new MemEntryImpl(backend_, index, this, net_log_.net_log());
604 return nullptr;
605 }
606
ChildInterval(MemEntryImpl::EntryMap::const_iterator i)607 net::Interval<int64_t> MemEntryImpl::ChildInterval(
608 MemEntryImpl::EntryMap::const_iterator i) {
609 DCHECK(i != children_->cend());
610 const MemEntryImpl* child = i->second;
611 // The valid range in child is [child_first_pos_, DataSize), since the child
612 // entry ops just use standard disk_cache::Entry API, so DataSize is
613 // not aware of any hole in the beginning.
614 int64_t child_responsibility_start = (i->first) * kMaxChildEntrySize;
615 return net::Interval<int64_t>(
616 child_responsibility_start + child->child_first_pos_,
617 child_responsibility_start + child->GetDataSize(kSparseData));
618 }
619
Compact()620 void MemEntryImpl::Compact() {
621 // Stream 0 should already be fine since it's written out in a single WriteData().
622 data_[1].shrink_to_fit();
623 data_[2].shrink_to_fit();
624 }
625
626 } // namespace disk_cache
627