1 // Copyright 2012 The Chromium Authors 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 // Defines the public interface of the disk cache. For more details see 6 // http://dev.chromium.org/developers/design-documents/network-stack/disk-cache 7 8 #ifndef NET_DISK_CACHE_DISK_CACHE_H_ 9 #define NET_DISK_CACHE_DISK_CACHE_H_ 10 11 #include <stdint.h> 12 13 #include <memory> 14 #include <string> 15 #include <vector> 16 17 #include "base/files/file.h" 18 #include "base/memory/ref_counted.h" 19 #include "base/strings/string_split.h" 20 #include "base/task/sequenced_task_runner.h" 21 #include "base/time/time.h" 22 #include "build/build_config.h" 23 #include "net/base/cache_type.h" 24 #include "net/base/completion_once_callback.h" 25 #include "net/base/net_errors.h" 26 #include "net/base/net_export.h" 27 #include "net/base/request_priority.h" 28 #include "third_party/abseil-cpp/absl/types/optional.h" 29 30 namespace base { 31 class FilePath; 32 33 namespace android { 34 class ApplicationStatusListener; 35 } // namespace android 36 37 } // namespace base 38 39 namespace net { 40 class IOBuffer; 41 class NetLog; 42 } 43 44 namespace disk_cache { 45 46 class Entry; 47 class Backend; 48 class EntryResult; 49 class BackendFileOperationsFactory; 50 struct RangeResult; 51 using EntryResultCallback = base::OnceCallback<void(EntryResult)>; 52 using RangeResultCallback = base::OnceCallback<void(const RangeResult&)>; 53 54 // How to handle resetting the back-end cache from the previous session. 55 // See CreateCacheBackend() for its usage. 56 enum class ResetHandling { kReset, kResetOnError, kNeverReset }; 57 58 struct NET_EXPORT BackendResult { 59 BackendResult(); 60 ~BackendResult(); 61 BackendResult(BackendResult&&); 62 BackendResult& operator=(BackendResult&&); 63 64 BackendResult(const BackendResult&) = delete; 65 BackendResult& operator=(const BackendResult&) = delete; 66 67 // `error_in` should not be net::OK for MakeError(). 68 static BackendResult MakeError(net::Error error_in); 69 // `backend_in` should not be nullptr for Make(). 70 static BackendResult Make(std::unique_ptr<Backend> backend_in); 71 72 net::Error net_error = net::ERR_FAILED; 73 std::unique_ptr<Backend> backend; 74 }; 75 76 using BackendResultCallback = base::OnceCallback<void(BackendResult)>; 77 78 // Returns an instance of a Backend of the given `type`. `file_operations` 79 // (nullable) is used to broker file operations in sandboxed environments. 80 // Currently `file_operations` is only used for the simple backend. 81 // `path` points to a folder where the cached data will be stored (if 82 // appropriate). This cache instance must be the only object that will be 83 // reading or writing files to that folder (if another one exists, and `type` is 84 // not net::DISK_CACHE this operation will not complete until the previous 85 // duplicate gets destroyed and finishes all I/O). The returned object should be 86 // deleted when not needed anymore. 87 // 88 // If `reset_handling` is set to kResetOnError and there is a problem with the 89 // cache initialization, the files will be deleted and a new set will be 90 // created. If it's set to kReset, this will happen even if there isn't a 91 // problem with cache initialization. Finally, if it's set to kNeverReset, the 92 // cache creation will fail if there is a problem with cache initialization. 93 // 94 // `max_bytes` is the maximum size the cache can grow to. If zero is passed in 95 // as `max_bytes`, the cache will determine the value to use. 96 // 97 // `net_error` in return value of the function is a net error code. If it is 98 // ERR_IO_PENDING, the `callback` will be invoked when a backend is available or 99 // a fatal error condition is reached. `backend` in return value or parameter 100 // to callback can be nullptr if a fatal error is found. 101 NET_EXPORT BackendResult 102 CreateCacheBackend(net::CacheType type, 103 net::BackendType backend_type, 104 scoped_refptr<BackendFileOperationsFactory> file_operations, 105 const base::FilePath& path, 106 int64_t max_bytes, 107 ResetHandling reset_handling, 108 net::NetLog* net_log, 109 BackendResultCallback callback); 110 111 #if BUILDFLAG(IS_ANDROID) 112 // Similar to the function above, but takes an |app_status_listener| which is 113 // used to listen for when the Android application status changes, so we can 114 // flush the cache to disk when the app goes to the background. 115 NET_EXPORT BackendResult CreateCacheBackend( 116 net::CacheType type, 117 net::BackendType backend_type, 118 scoped_refptr<BackendFileOperationsFactory> file_operations, 119 const base::FilePath& path, 120 int64_t max_bytes, 121 ResetHandling reset_handling, 122 net::NetLog* net_log, 123 BackendResultCallback callback, 124 base::android::ApplicationStatusListener* app_status_listener); 125 #endif 126 127 // Variant of the above that calls |post_cleanup_callback| once all the I/O 128 // that was in flight has completed post-destruction. |post_cleanup_callback| 129 // will get invoked even if the creation fails. The invocation will always be 130 // via the event loop, and never direct. 131 // 132 // This is currently unsupported for |type| == net::DISK_CACHE. 133 // 134 // Note that this will not wait for |post_cleanup_callback| of a previous 135 // instance for |path| to run. 136 NET_EXPORT BackendResult 137 CreateCacheBackend(net::CacheType type, 138 net::BackendType backend_type, 139 scoped_refptr<BackendFileOperationsFactory> file_operations, 140 const base::FilePath& path, 141 int64_t max_bytes, 142 ResetHandling reset_handling, 143 net::NetLog* net_log, 144 base::OnceClosure post_cleanup_callback, 145 BackendResultCallback callback); 146 147 // This will flush any internal threads used by backends created w/o an 148 // externally injected thread specified, so tests can be sure that all I/O 149 // has finished before inspecting the world. 150 NET_EXPORT void FlushCacheThreadForTesting(); 151 152 // Async version of FlushCacheThreadForTesting. `callback` will be called on 153 // the calling sequence. 154 NET_EXPORT void FlushCacheThreadAsynchronouslyForTesting( 155 base::OnceClosure cllback); 156 157 // The root interface for a disk cache instance. 158 class NET_EXPORT Backend { 159 public: 160 using CompletionOnceCallback = net::CompletionOnceCallback; 161 using Int64CompletionOnceCallback = net::Int64CompletionOnceCallback; 162 using EntryResultCallback = disk_cache::EntryResultCallback; 163 using EntryResult = disk_cache::EntryResult; 164 165 class Iterator { 166 public: 167 virtual ~Iterator() = default; 168 169 // OpenNextEntry returns a result with net_error() |net::OK| and provided 170 // entry if there is an entry to enumerate which it can return immediately. 171 // It returns a result with net_error() |net::ERR_FAILED| at the end of 172 // enumeration. If the function returns a result with net_error() 173 // |net::ERR_IO_PENDING|, then the final result will be passed to the 174 // provided |callback|, otherwise |callback| will not be called. If any 175 // entry in the cache is modified during iteration, the result of this 176 // function is thereafter undefined. 177 // 178 // Calling OpenNextEntry after the backend which created it is destroyed 179 // may fail with |net::ERR_FAILED|; however it should not crash. 180 // 181 // Some cache backends make stronger guarantees about mutation during 182 // iteration, see top comment in simple_backend_impl.h for details. 183 virtual EntryResult OpenNextEntry(EntryResultCallback callback) = 0; 184 }; 185 186 // If the backend is destroyed when there are operations in progress (any 187 // callback that has not been invoked yet), this method cancels said 188 // operations so the callbacks are not invoked, possibly leaving the work 189 // half way (for instance, dooming just a few entries). Note that pending IO 190 // for a given Entry (as opposed to the Backend) will still generate a 191 // callback. 192 // Warning: there is some inconsistency in details between different backends 193 // on what will succeed and what will fail. In particular the blockfile 194 // backend will leak entries closed after backend deletion, while others 195 // handle it properly. Backend(net::CacheType cache_type)196 explicit Backend(net::CacheType cache_type) : cache_type_(cache_type) {} 197 virtual ~Backend() = default; 198 199 // Returns the type of this cache. GetCacheType()200 net::CacheType GetCacheType() const { return cache_type_; } 201 202 // Returns the number of entries in the cache. 203 virtual int32_t GetEntryCount() const = 0; 204 205 // Atomically attempts to open an existing entry based on |key| or, if none 206 // already exists, to create a new entry. Returns an EntryResult object, 207 // which contains 1) network error code; 2) if the error code is OK, 208 // an owning pointer to either a preexisting or a newly created 209 // entry; 3) a bool indicating if the entry was opened or not. When the entry 210 // pointer is no longer needed, its Close() method should be called. If this 211 // method return value has net_error() == ERR_IO_PENDING, the 212 // |callback| will be invoked when the entry is available. The |priority| of 213 // the entry determines its priority in the background worker pools. 214 // 215 // This method should be the preferred way to obtain an entry over using 216 // OpenEntry() or CreateEntry() separately in order to simplify consumer 217 // logic. 218 virtual EntryResult OpenOrCreateEntry(const std::string& key, 219 net::RequestPriority priority, 220 EntryResultCallback callback) = 0; 221 222 // Opens an existing entry, returning status code, and, if successful, an 223 // entry pointer packaged up into an EntryResult. If return value's 224 // net_error() is ERR_IO_PENDING, the |callback| will be invoked when the 225 // entry is available. The |priority| of the entry determines its priority in 226 // the background worker pools. 227 virtual EntryResult OpenEntry(const std::string& key, 228 net::RequestPriority priority, 229 EntryResultCallback) = 0; 230 231 // Creates a new entry, returning status code, and, if successful, and 232 // an entry pointer packaged up into an EntryResult. If return value's 233 // net_error() is ERR_IO_PENDING, the |callback| will be invoked when the 234 // entry is available. The |priority| of the entry determines its priority in 235 // the background worker pools. 236 virtual EntryResult CreateEntry(const std::string& key, 237 net::RequestPriority priority, 238 EntryResultCallback callback) = 0; 239 240 // Marks the entry, specified by the given key, for deletion. The return value 241 // is a net error code. If this method returns ERR_IO_PENDING, the |callback| 242 // will be invoked after the entry is doomed. 243 virtual net::Error DoomEntry(const std::string& key, 244 net::RequestPriority priority, 245 CompletionOnceCallback callback) = 0; 246 247 // Marks all entries for deletion. The return value is a net error code. If 248 // this method returns ERR_IO_PENDING, the |callback| will be invoked when the 249 // operation completes. 250 virtual net::Error DoomAllEntries(CompletionOnceCallback callback) = 0; 251 252 // Marks a range of entries for deletion. This supports unbounded deletes in 253 // either direction by using null Time values for either argument. The return 254 // value is a net error code. If this method returns ERR_IO_PENDING, the 255 // |callback| will be invoked when the operation completes. 256 // Entries with |initial_time| <= access time < |end_time| are deleted. 257 virtual net::Error DoomEntriesBetween(base::Time initial_time, 258 base::Time end_time, 259 CompletionOnceCallback callback) = 0; 260 261 // Marks all entries accessed since |initial_time| for deletion. The return 262 // value is a net error code. If this method returns ERR_IO_PENDING, the 263 // |callback| will be invoked when the operation completes. 264 // Entries with |initial_time| <= access time are deleted. 265 virtual net::Error DoomEntriesSince(base::Time initial_time, 266 CompletionOnceCallback callback) = 0; 267 268 // Calculate the total size of the cache. The return value is the size in 269 // bytes or a net error code. If this method returns ERR_IO_PENDING, 270 // the |callback| will be invoked when the operation completes. 271 virtual int64_t CalculateSizeOfAllEntries( 272 Int64CompletionOnceCallback callback) = 0; 273 274 // Calculate the size of all cache entries accessed between |initial_time| and 275 // |end_time|. 276 // The return value is the size in bytes or a net error code. The default 277 // implementation returns ERR_NOT_IMPLEMENTED and should only be overwritten 278 // if there is an efficient way for the backend to determine the size for a 279 // subset of the cache without reading the whole cache from disk. 280 // If this method returns ERR_IO_PENDING, the |callback| will be invoked when 281 // the operation completes. 282 virtual int64_t CalculateSizeOfEntriesBetween( 283 base::Time initial_time, 284 base::Time end_time, 285 Int64CompletionOnceCallback callback); 286 287 // Returns an iterator which will enumerate all entries of the cache in an 288 // undefined order. 289 virtual std::unique_ptr<Iterator> CreateIterator() = 0; 290 291 // Return a list of cache statistics. 292 virtual void GetStats(base::StringPairs* stats) = 0; 293 294 // Called whenever an external cache in the system reuses the resource 295 // referred to by |key|. 296 virtual void OnExternalCacheHit(const std::string& key) = 0; 297 298 // Backends can optionally permit one to store, probabilistically, up to a 299 // byte associated with a key of an existing entry in memory. 300 301 // GetEntryInMemoryData has the following behavior: 302 // - If the data is not available at this time for any reason, returns 0. 303 // - Otherwise, returns a value that was with very high probability 304 // given to SetEntryInMemoryData(|key|) (and with a very low probability 305 // to a different key that collides in the in-memory index). 306 // 307 // Due to the probability of collisions, including those that can be induced 308 // by hostile 3rd parties, this interface should not be used to make decisions 309 // that affect correctness (especially security). 310 virtual uint8_t GetEntryInMemoryData(const std::string& key); 311 virtual void SetEntryInMemoryData(const std::string& key, uint8_t data); 312 313 // Returns the maximum length an individual stream can have. 314 virtual int64_t MaxFileSize() const = 0; 315 316 private: 317 const net::CacheType cache_type_; 318 }; 319 320 // This interface represents an entry in the disk cache. 321 class NET_EXPORT Entry { 322 public: 323 using CompletionOnceCallback = net::CompletionOnceCallback; 324 using IOBuffer = net::IOBuffer; 325 using RangeResultCallback = disk_cache::RangeResultCallback; 326 using RangeResult = disk_cache::RangeResult; 327 328 // Marks this cache entry for deletion. 329 virtual void Doom() = 0; 330 331 // Releases this entry. Calling this method does not cancel pending IO 332 // operations on this entry. Even after the last reference to this object has 333 // been released, pending completion callbacks may be invoked. 334 virtual void Close() = 0; 335 336 // Returns the key associated with this cache entry. 337 virtual std::string GetKey() const = 0; 338 339 // Returns the time when this cache entry was last used. 340 virtual base::Time GetLastUsed() const = 0; 341 342 // Returns the time when this cache entry was last modified. 343 virtual base::Time GetLastModified() const = 0; 344 345 // Returns the size of the cache data with the given index. 346 virtual int32_t GetDataSize(int index) const = 0; 347 348 // Copies cached data into the given buffer of length |buf_len|. Returns the 349 // number of bytes read or a network error code. If this function returns 350 // ERR_IO_PENDING, the completion callback will be called on the current 351 // thread when the operation completes, and a reference to |buf| will be 352 // retained until the callback is called. Note that as long as the function 353 // does not complete immediately, the callback will always be invoked, even 354 // after Close has been called; in other words, the caller may close this 355 // entry without having to wait for all the callbacks, and still rely on the 356 // cleanup performed from the callback code. 357 virtual int ReadData(int index, 358 int offset, 359 IOBuffer* buf, 360 int buf_len, 361 CompletionOnceCallback callback) = 0; 362 363 // Copies data from the given buffer of length |buf_len| into the cache. 364 // Returns the number of bytes written or a network error code. If this 365 // function returns ERR_IO_PENDING, the completion callback will be called 366 // on the current thread when the operation completes, and a reference to 367 // |buf| will be retained until the callback is called. Note that as long as 368 // the function does not complete immediately, the callback will always be 369 // invoked, even after Close has been called; in other words, the caller may 370 // close this entry without having to wait for all the callbacks, and still 371 // rely on the cleanup performed from the callback code. 372 // If truncate is true, this call will truncate the stored data at the end of 373 // what we are writing here. 374 virtual int WriteData(int index, 375 int offset, 376 IOBuffer* buf, 377 int buf_len, 378 CompletionOnceCallback callback, 379 bool truncate) = 0; 380 381 // Sparse entries support: 382 // 383 // A Backend implementation can support sparse entries, so the cache keeps 384 // track of which parts of the entry have been written before. The backend 385 // will never return data that was not written previously, so reading from 386 // such region will return 0 bytes read (or actually the number of bytes read 387 // before reaching that region). 388 // 389 // There are only two streams for sparse entries: a regular control stream 390 // (index 0) that must be accessed through the regular API (ReadData and 391 // WriteData), and one sparse stream that must me accessed through the sparse- 392 // aware API that follows. Calling a non-sparse aware method with an index 393 // argument other than 0 is a mistake that results in implementation specific 394 // behavior. Using a sparse-aware method with an entry that was not stored 395 // using the same API, or with a backend that doesn't support sparse entries 396 // will return ERR_CACHE_OPERATION_NOT_SUPPORTED. 397 // 398 // The storage granularity of the implementation should be at least 1 KB. In 399 // other words, storing less than 1 KB may result in an implementation 400 // dropping the data completely, and writing at offsets not aligned with 1 KB, 401 // or with lengths not a multiple of 1 KB may result in the first or last part 402 // of the data being discarded. However, two consecutive writes should not 403 // result in a hole in between the two parts as long as they are sequential 404 // (the second one starts where the first one ended), and there is no other 405 // write between them. 406 // 407 // The Backend implementation is free to evict any range from the cache at any 408 // moment, so in practice, the previously stated granularity of 1 KB is not 409 // as bad as it sounds. 410 // 411 // The sparse methods don't support multiple simultaneous IO operations to the 412 // same physical entry, so in practice a single object should be instantiated 413 // for a given key at any given time. Once an operation has been issued, the 414 // caller should wait until it completes before starting another one. This 415 // requirement includes the case when an entry is closed while some operation 416 // is in progress and another object is instantiated; any IO operation will 417 // fail while the previous operation is still in-flight. In order to deal with 418 // this requirement, the caller could either wait until the operation 419 // completes before closing the entry, or call CancelSparseIO() before closing 420 // the entry, and call ReadyForSparseIO() on the new entry and wait for the 421 // callback before issuing new operations. 422 423 // Behaves like ReadData() except that this method is used to access sparse 424 // entries. 425 virtual int ReadSparseData(int64_t offset, 426 IOBuffer* buf, 427 int buf_len, 428 CompletionOnceCallback callback) = 0; 429 430 // Behaves like WriteData() except that this method is used to access sparse 431 // entries. |truncate| is not part of this interface because a sparse entry 432 // is not expected to be reused with new data. To delete the old data and 433 // start again, or to reduce the total size of the stream data (which implies 434 // that the content has changed), the whole entry should be doomed and 435 // re-created. 436 virtual int WriteSparseData(int64_t offset, 437 IOBuffer* buf, 438 int buf_len, 439 CompletionOnceCallback callback) = 0; 440 441 // Returns information about the currently stored portion of a sparse entry. 442 // |offset| and |len| describe a particular range that should be scanned to 443 // find out if it is stored or not. Please see the documentation of 444 // RangeResult for more details. 445 virtual RangeResult GetAvailableRange(int64_t offset, 446 int len, 447 RangeResultCallback callback) = 0; 448 449 // Returns true if this entry could be a sparse entry or false otherwise. This 450 // is a quick test that may return true even if the entry is not really 451 // sparse. This method doesn't modify the state of this entry (it will not 452 // create sparse tracking data). GetAvailableRange or ReadSparseData can be 453 // used to perform a definitive test of whether an existing entry is sparse or 454 // not, but that method may modify the current state of the entry (making it 455 // sparse, for instance). The purpose of this method is to test an existing 456 // entry, but without generating actual IO to perform a thorough check. 457 virtual bool CouldBeSparse() const = 0; 458 459 // Cancels any pending sparse IO operation (if any). The completion callback 460 // of the operation in question will still be called when the operation 461 // finishes, but the operation will finish sooner when this method is used. 462 virtual void CancelSparseIO() = 0; 463 464 // Returns OK if this entry can be used immediately. If that is not the 465 // case, returns ERR_IO_PENDING and invokes the provided callback when this 466 // entry is ready to use. This method always returns OK for non-sparse 467 // entries, and returns ERR_IO_PENDING when a previous operation was cancelled 468 // (by calling CancelSparseIO), but the cache is still busy with it. If there 469 // is a pending operation that has not been cancelled, this method will return 470 // OK although another IO operation cannot be issued at this time; in this 471 // case the caller should just wait for the regular callback to be invoked 472 // instead of using this method to provide another callback. 473 // 474 // Note that CancelSparseIO may have been called on another instance of this 475 // object that refers to the same physical disk entry. 476 // Note: This method is deprecated. 477 virtual net::Error ReadyForSparseIO(CompletionOnceCallback callback) = 0; 478 479 // Used in tests to set the last used time. Note that backend might have 480 // limited precision. Also note that this call may modify the last modified 481 // time. 482 virtual void SetLastUsedTimeForTest(base::Time time) = 0; 483 484 protected: 485 virtual ~Entry() = default; 486 }; 487 488 struct EntryDeleter { operatorEntryDeleter489 void operator()(Entry* entry) { 490 // Note that |entry| is ref-counted. 491 entry->Close(); 492 } 493 }; 494 495 // Automatically closes an entry when it goes out of scope. 496 // Warning: Be careful. Automatically closing may not be the desired behavior 497 // when writing to an entry. You may wish to doom first (e.g., in case writing 498 // hasn't yet completed but the browser is shutting down). 499 typedef std::unique_ptr<Entry, EntryDeleter> ScopedEntryPtr; 500 501 // Represents the result of an entry open or create operation. 502 // This is a move-only, owning type, which will close the entry it owns unless 503 // it's released from it via ReleaseEntry (or it's moved away from). 504 class NET_EXPORT EntryResult { 505 public: 506 EntryResult(); 507 ~EntryResult(); 508 EntryResult(EntryResult&&); 509 EntryResult& operator=(EntryResult&&); 510 511 EntryResult(const EntryResult&) = delete; 512 EntryResult& operator=(const EntryResult&) = delete; 513 514 // Creates an entry result representing successfully opened (pre-existing) 515 // cache entry. |new_entry| must be non-null. 516 static EntryResult MakeOpened(Entry* new_entry); 517 518 // Creates an entry result representing successfully created (new) 519 // cache entry. |new_entry| must be non-null. 520 static EntryResult MakeCreated(Entry* new_entry); 521 522 // Creates an entry result representing an error. Status must not be net::OK. 523 static EntryResult MakeError(net::Error status); 524 525 // Relinquishes ownership of the entry, and returns a pointer to it. 526 // Will return nullptr if there is no such entry. 527 // WARNING: clears net_error() to ERR_FAILED, opened() to false. 528 Entry* ReleaseEntry(); 529 530 // ReleaseEntry() will return a non-null pointer if and only if this is 531 // net::OK before the call to it. net_error()532 net::Error net_error() const { return net_error_; } 533 534 // Returns true if an existing entry was opened rather than a new one created. 535 // Implies net_error() == net::OK and non-null entry. opened()536 bool opened() const { return opened_; } 537 538 private: 539 // Invariant to keep: |entry_| != nullptr iff |net_error_| == net::OK; 540 // |opened_| set only if entry is set. 541 net::Error net_error_ = net::ERR_FAILED; 542 bool opened_ = false; 543 ScopedEntryPtr entry_; 544 }; 545 546 // Represents a result of GetAvailableRange. 547 struct NET_EXPORT RangeResult { 548 RangeResult() = default; RangeResultRangeResult549 explicit RangeResult(net::Error error) : net_error(error) {} 550 RangeResultRangeResult551 RangeResult(int64_t start, int available_len) 552 : net_error(net::OK), start(start), available_len(available_len) {} 553 554 // This is net::OK if operation succeeded, and `start` and `available_len` 555 // were set appropriately (potentially with 0 for `available_len`). 556 // 557 // In return value of GetAvailableRange(), net::ERR_IO_PENDING means that the 558 // result will be provided asynchronously via the callback. This can not occur 559 // in the value passed to the callback itself. 560 // 561 // In case the operation failed, this will be the error code. 562 net::Error net_error = net::ERR_FAILED; 563 564 // First byte within the range passed to GetAvailableRange that's available 565 // in the cache entry. 566 // 567 // Valid iff net_error is net::OK. 568 int64_t start = -1; 569 570 // Number of consecutive bytes stored within the requested range starting from 571 // `start` that can be read at once. This may be zero. 572 // 573 // Valid iff net_error is net::OK. 574 int available_len = 0; 575 }; 576 577 // The maximum size of cache that can be created for type 578 // GENERATED_WEBUI_BYTE_CODE_CACHE. There are only a handful of commonly 579 // accessed WebUI pages, which can each cache 0.5 - 1.5 MB of code. There is no 580 // point in having a very large WebUI code cache, even if lots of disk space is 581 // available. 582 constexpr int kMaxWebUICodeCacheSize = 5 * 1024 * 1024; 583 584 class UnboundBackendFileOperations; 585 586 // An interface to provide file operations so that the HTTP cache works on 587 // a sandboxed process. 588 // All the paths must be absolute paths. 589 // A BackendFileOperations object is bound to a sequence. 590 class BackendFileOperations { 591 public: 592 struct FileEnumerationEntry { 593 FileEnumerationEntry() = default; FileEnumerationEntryFileEnumerationEntry594 FileEnumerationEntry(base::FilePath path, 595 int64_t size, 596 base::Time last_accessed, 597 base::Time last_modified) 598 : path(std::move(path)), 599 size(size), 600 last_accessed(last_accessed), 601 last_modified(last_modified) {} 602 603 base::FilePath path; 604 int64_t size = 0; 605 base::Time last_accessed; 606 base::Time last_modified; 607 }; 608 609 // An enum representing the mode for DeleteFile function. 610 enum class DeleteFileMode { 611 // The default mode, meaning base::DeleteFile. 612 kDefault, 613 // Ensure that new files for the same name can be created immediately after 614 // deletion. Note that this is the default behavior on POSIX. On Windows 615 // this assumes that all the file handles for the file to be deleted are 616 // opened with FLAG_WIN_SHARE_DELETE. 617 kEnsureImmediateAvailability, 618 }; 619 620 // An interface to enumerate files in a directory. 621 // Indirect descendants are not listed, and directories are not listed. 622 class FileEnumerator { 623 public: 624 virtual ~FileEnumerator() = default; 625 626 // Returns the next file in the directory, if any. Returns nullopt if there 627 // are no further files (including the error case). The path of the 628 // returned entry should be a full path. 629 virtual absl::optional<FileEnumerationEntry> Next() = 0; 630 631 // Returns true if we've found an error during traversal. 632 virtual bool HasError() const = 0; 633 }; 634 635 virtual ~BackendFileOperations() = default; 636 637 // Creates a directory with the given path and returns whether that succeeded. 638 virtual bool CreateDirectory(const base::FilePath& path) = 0; 639 640 // Returns true if the given path exists on the local filesystem. 641 virtual bool PathExists(const base::FilePath& path) = 0; 642 643 // Returns true if the given path exists on the local filesystem and it's a 644 // directory. 645 virtual bool DirectoryExists(const base::FilePath& path) = 0; 646 647 // Opens a file with the given path and flags. Returns the opened file. 648 virtual base::File OpenFile(const base::FilePath& path, uint32_t flags) = 0; 649 650 // Deletes a file with the given path and returns whether that succeeded. 651 virtual bool DeleteFile(const base::FilePath& path, 652 DeleteFileMode mode = DeleteFileMode::kDefault) = 0; 653 654 // Renames a file `from_path` to `to_path`. Returns the error information. 655 virtual bool ReplaceFile(const base::FilePath& from_path, 656 const base::FilePath& to_path, 657 base::File::Error* error) = 0; 658 659 // Returns information about the given path. 660 virtual absl::optional<base::File::Info> GetFileInfo( 661 const base::FilePath& path) = 0; 662 663 // Creates an object that can be used to enumerate files in the specified 664 // directory. 665 virtual std::unique_ptr<FileEnumerator> EnumerateFiles( 666 const base::FilePath& path) = 0; 667 668 // Deletes the given directory recursively, asynchronously. `callback` will 669 // called with whether the operation succeeded. 670 // This is done by: 671 // 1. Renaming the directory to another directory, 672 // 2. Calling `callback` with the result, and 673 // 3. Deleting the directory. 674 // This means the caller won't know the result of 3. 675 virtual void CleanupDirectory(const base::FilePath& path, 676 base::OnceCallback<void(bool)> callback) = 0; 677 678 // Unbind this object from the sequence, and returns an 679 // UnboundBackendFileOperations which can be bound to any sequence. Once 680 // this method is called, no methods (except for the destructor) on this 681 // object must not be called. 682 virtual std::unique_ptr<UnboundBackendFileOperations> Unbind() = 0; 683 }; 684 685 // BackendFileOperations which is not yet bound to a sequence. 686 class UnboundBackendFileOperations { 687 public: 688 virtual ~UnboundBackendFileOperations() = default; 689 690 // This can be called at most once. 691 virtual std::unique_ptr<BackendFileOperations> Bind( 692 scoped_refptr<base::SequencedTaskRunner> task_runner) = 0; 693 }; 694 695 // A factory interface that creates BackendFileOperations. 696 class BackendFileOperationsFactory 697 : public base::RefCounted<BackendFileOperationsFactory> { 698 public: 699 // Creates a BackendFileOperations which is bound to `task_runner`. 700 virtual std::unique_ptr<BackendFileOperations> Create( 701 scoped_refptr<base::SequencedTaskRunner> task_runner) = 0; 702 703 // Creates an "unbound" BackendFileOperations. 704 virtual std::unique_ptr<UnboundBackendFileOperations> CreateUnbound() = 0; 705 706 protected: 707 friend class base::RefCounted<BackendFileOperationsFactory>; 708 virtual ~BackendFileOperationsFactory() = default; 709 }; 710 711 // A trivial BackendFileOperations implementation which uses corresponding 712 // base functions. 713 class NET_EXPORT TrivialFileOperations final : public BackendFileOperations { 714 public: 715 TrivialFileOperations(); 716 ~TrivialFileOperations() override; 717 718 // BackendFileOperations implementation: 719 bool CreateDirectory(const base::FilePath& path) override; 720 bool PathExists(const base::FilePath& path) override; 721 bool DirectoryExists(const base::FilePath& path) override; 722 base::File OpenFile(const base::FilePath& path, uint32_t flags) override; 723 bool DeleteFile(const base::FilePath& path, DeleteFileMode mode) override; 724 bool ReplaceFile(const base::FilePath& from_path, 725 const base::FilePath& to_path, 726 base::File::Error* error) override; 727 absl::optional<base::File::Info> GetFileInfo( 728 const base::FilePath& path) override; 729 std::unique_ptr<FileEnumerator> EnumerateFiles( 730 const base::FilePath& path) override; 731 void CleanupDirectory(const base::FilePath& path, 732 base::OnceCallback<void(bool)> callback) override; 733 std::unique_ptr<UnboundBackendFileOperations> Unbind() override; 734 735 private: 736 SEQUENCE_CHECKER(sequence_checker_); 737 #if DCHECK_IS_ON() 738 bool bound_ = true; 739 #endif 740 }; 741 742 class NET_EXPORT TrivialFileOperationsFactory 743 : public BackendFileOperationsFactory { 744 public: 745 TrivialFileOperationsFactory(); 746 747 // BackendFileOperationsFactory implementation: 748 std::unique_ptr<BackendFileOperations> Create( 749 scoped_refptr<base::SequencedTaskRunner> task_runner) override; 750 std::unique_ptr<UnboundBackendFileOperations> CreateUnbound() override; 751 752 private: 753 ~TrivialFileOperationsFactory() override; 754 755 SEQUENCE_CHECKER(sequence_checker_); 756 }; 757 758 } // namespace disk_cache 759 760 #endif // NET_DISK_CACHE_DISK_CACHE_H_ 761