• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #ifndef TENSORFLOW_CORE_LIB_IO_CACHE_H_
17 #define TENSORFLOW_CORE_LIB_IO_CACHE_H_
18 
19 #include "tensorflow/core/platform/stringpiece.h"
20 
21 // A Cache is an interface that maps keys to values.  It has internal
22 // synchronization and may be safely accessed concurrently from
23 // multiple threads.  It may automatically evict entries to make room
24 // for new entries.  Values have a specified charge against the cache
25 // capacity.  For example, a cache where the values are variable
26 // length strings, may use the length of the string as the charge for
27 // the string.
28 //
29 // A builtin cache implementation with a least-recently-used eviction
30 // policy is provided.  Clients may use their own implementations if
31 // they want something more sophisticated (like scan-resistance, a
32 // custom eviction policy, variable cache sizing, etc.)
33 
34 namespace tensorflow {
35 
36 using Slice = StringPiece;
37 
38 namespace table {
39 
40 class Cache;
41 
42 // Create a new cache with a fixed size capacity.  This implementation
43 // of Cache uses a least-recently-used eviction policy.
44 Cache* NewLRUCache(size_t capacity);
45 
46 class Cache {
47  public:
48   Cache() = default;
49 
50   Cache(const Cache&) = delete;
51   Cache& operator=(const Cache&) = delete;
52 
53   // Destroys all existing entries by calling the "deleter"
54   // function that was passed to the constructor.
55   virtual ~Cache();
56 
57   // Opaque handle to an entry stored in the cache.
58   struct Handle {};
59 
60   // Insert a mapping from key->value into the cache and assign it
61   // the specified charge against the total cache capacity.
62   //
63   // Returns a handle that corresponds to the mapping.  The caller
64   // must call this->Release(handle) when the returned mapping is no
65   // longer needed.
66   //
67   // When the inserted entry is no longer needed, the key and
68   // value will be passed to "deleter".
69   virtual Handle* Insert(const Slice& key, void* value, size_t charge,
70                          void (*deleter)(const Slice& key, void* value)) = 0;
71 
72   // If the cache has no mapping for "key", returns nullptr.
73   //
74   // Else return a handle that corresponds to the mapping.  The caller
75   // must call this->Release(handle) when the returned mapping is no
76   // longer needed.
77   virtual Handle* Lookup(const Slice& key) = 0;
78 
79   // Release a mapping returned by a previous Lookup().
80   // REQUIRES: handle must not have been released yet.
81   // REQUIRES: handle must have been returned by a method on *this.
82   virtual void Release(Handle* handle) = 0;
83 
84   // Return the value encapsulated in a handle returned by a
85   // successful Lookup().
86   // REQUIRES: handle must not have been released yet.
87   // REQUIRES: handle must have been returned by a method on *this.
88   virtual void* Value(Handle* handle) = 0;
89 
90   // If the cache contains entry for key, erase it.  Note that the
91   // underlying entry will be kept around until all existing handles
92   // to it have been released.
93   virtual void Erase(const Slice& key) = 0;
94 
95   // Return a new numeric id.  May be used by multiple clients who are
96   // sharing the same cache to partition the key space.  Typically the
97   // client will allocate a new id at startup and prepend the id to
98   // its cache keys.
99   virtual uint64_t NewId() = 0;
100 
101   // Remove all cache entries that are not actively in use.  Memory-constrained
102   // applications may wish to call this method to reduce memory usage.
103   // Default implementation of Prune() does nothing.  Subclasses are strongly
104   // encouraged to override the default implementation.  A future release of
105   // leveldb may change Prune() to a pure abstract method.
Prune()106   virtual void Prune() {}
107 
108   // Return an estimate of the combined charges of all elements stored in the
109   // cache.
110   virtual size_t TotalCharge() const = 0;
111 
112  private:
113   void LRU_Remove(Handle* e);
114   void LRU_Append(Handle* e);
115   void Unref(Handle* e);
116 
117   struct Rep;
118   Rep* rep_;
119 };
120 
121 }  // namespace table
122 
123 }  // namespace tensorflow
124 
125 #endif  // TENSORFLOW_CORE_LIB_IO_CACHE_H_
126