• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  ** Copyright 2011, The Android Open Source Project
3  **
4  ** Licensed under the Apache License, Version 2.0 (the "License");
5  ** you may not use this file except in compliance with the License.
6  ** You may obtain a copy of the License at
7  **
8  **     http://www.apache.org/licenses/LICENSE-2.0
9  **
10  ** Unless required by applicable law or agreed to in writing, software
11  ** distributed under the License is distributed on an "AS IS" BASIS,
12  ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  ** See the License for the specific language governing permissions and
14  ** limitations under the License.
15  */
16 
17 //#define LOG_NDEBUG 0
18 
19 #include "BlobCache.h"
20 
21 #include <android-base/properties.h>
22 #include <errno.h>
23 #include <inttypes.h>
24 #include <log/log.h>
25 
26 #include <chrono>
27 
28 namespace android {
29 
30 // BlobCache::Header::mMagicNumber value
31 static const uint32_t blobCacheMagic = ('_' << 24) + ('B' << 16) + ('b' << 8) + '$';
32 
33 // BlobCache::Header::mBlobCacheVersion value
34 static const uint32_t blobCacheVersion = 3;
35 
36 // BlobCache::Header::mDeviceVersion value
37 static const uint32_t blobCacheDeviceVersion = 1;
38 
BlobCache(size_t maxKeySize,size_t maxValueSize,size_t maxTotalSize)39 BlobCache::BlobCache(size_t maxKeySize, size_t maxValueSize, size_t maxTotalSize)
40       : mMaxTotalSize(maxTotalSize),
41         mMaxKeySize(maxKeySize),
42         mMaxValueSize(maxValueSize),
43         mTotalSize(0) {
44     int64_t now = std::chrono::steady_clock::now().time_since_epoch().count();
45 #ifdef _WIN32
46     srand(now);
47 #else
48     mRandState[0] = (now >> 0) & 0xFFFF;
49     mRandState[1] = (now >> 16) & 0xFFFF;
50     mRandState[2] = (now >> 32) & 0xFFFF;
51 #endif
52     ALOGV("initializing random seed using %lld", (unsigned long long)now);
53 }
54 
set(const void * key,size_t keySize,const void * value,size_t valueSize)55 BlobCache::InsertResult BlobCache::set(const void* key, size_t keySize, const void* value,
56                                        size_t valueSize) {
57     if (mMaxKeySize < keySize) {
58         ALOGV("set: not caching because the key is too large: %zu (limit: %zu)", keySize,
59               mMaxKeySize);
60         return InsertResult::kKeyTooBig;
61     }
62     if (mMaxValueSize < valueSize) {
63         ALOGV("set: not caching because the value is too large: %zu (limit: %zu)", valueSize,
64               mMaxValueSize);
65         return InsertResult::kValueTooBig;
66     }
67     if (mMaxTotalSize < keySize + valueSize) {
68         ALOGV("set: not caching because the combined key/value size is too "
69               "large: %zu (limit: %zu)",
70               keySize + valueSize, mMaxTotalSize);
71         return InsertResult::kCombinedTooBig;
72     }
73     if (keySize == 0) {
74         ALOGW("set: not caching because keySize is 0");
75         return InsertResult::kInvalidKeySize;
76     }
77     if (valueSize == 0) {
78         ALOGW("set: not caching because valueSize is 0");
79         return InsertResult::kInvalidValueSize;
80     }
81 
82     std::shared_ptr<Blob> cacheKey(new Blob(key, keySize, false));
83     CacheEntry cacheEntry(cacheKey, nullptr);
84 
85     bool didClean = false;
86     while (true) {
87         auto index = std::lower_bound(mCacheEntries.begin(), mCacheEntries.end(), cacheEntry);
88         if (index == mCacheEntries.end() || cacheEntry < *index) {
89             // Create a new cache entry.
90             std::shared_ptr<Blob> keyBlob(new Blob(key, keySize, true));
91             std::shared_ptr<Blob> valueBlob(new Blob(value, valueSize, true));
92             size_t newTotalSize = mTotalSize + keySize + valueSize;
93             if (mMaxTotalSize < newTotalSize) {
94                 if (isCleanable()) {
95                     // Clean the cache and try again.
96                     clean();
97                     didClean = true;
98                     continue;
99                 } else {
100                     ALOGV("set: not caching new key/value pair because the "
101                           "total cache size limit would be exceeded: %zu "
102                           "(limit: %zu)",
103                           keySize + valueSize, mMaxTotalSize);
104                     return InsertResult::kNotEnoughSpace;
105                 }
106             }
107             mCacheEntries.insert(index, CacheEntry(keyBlob, valueBlob));
108             mTotalSize = newTotalSize;
109             ALOGV("set: created new cache entry with %zu byte key and %zu byte value", keySize,
110                   valueSize);
111         } else {
112             // Update the existing cache entry.
113             std::shared_ptr<Blob> valueBlob(new Blob(value, valueSize, true));
114             std::shared_ptr<Blob> oldValueBlob(index->getValue());
115             size_t newTotalSize = mTotalSize + valueSize - oldValueBlob->getSize();
116             if (mMaxTotalSize < newTotalSize) {
117                 if (isCleanable()) {
118                     // Clean the cache and try again.
119                     clean();
120                     didClean = true;
121                     continue;
122                 } else {
123                     ALOGV("set: not caching new value because the total cache "
124                           "size limit would be exceeded: %zu (limit: %zu)",
125                           keySize + valueSize, mMaxTotalSize);
126                     return InsertResult::kNotEnoughSpace;
127                 }
128             }
129             index->setValue(valueBlob);
130             mTotalSize = newTotalSize;
131             ALOGV("set: updated existing cache entry with %zu byte key and %zu byte "
132                   "value",
133                   keySize, valueSize);
134         }
135         return didClean ? InsertResult::kDidClean : InsertResult::kInserted;
136     }
137 }
138 
get(const void * key,size_t keySize,void * value,size_t valueSize)139 size_t BlobCache::get(const void* key, size_t keySize, void* value, size_t valueSize) {
140     if (mMaxKeySize < keySize) {
141         ALOGV("get: not searching because the key is too large: %zu (limit %zu)", keySize,
142               mMaxKeySize);
143         return 0;
144     }
145     std::shared_ptr<Blob> cacheKey(new Blob(key, keySize, false));
146     CacheEntry cacheEntry(cacheKey, nullptr);
147     auto index = std::lower_bound(mCacheEntries.begin(), mCacheEntries.end(), cacheEntry);
148     if (index == mCacheEntries.end() || cacheEntry < *index) {
149         ALOGV("get: no cache entry found for key of size %zu", keySize);
150         return 0;
151     }
152 
153     // The key was found. Return the value if the caller's buffer is large
154     // enough.
155     std::shared_ptr<Blob> valueBlob(index->getValue());
156     size_t valueBlobSize = valueBlob->getSize();
157     if (valueBlobSize <= valueSize) {
158         ALOGV("get: copying %zu bytes to caller's buffer", valueBlobSize);
159         memcpy(value, valueBlob->getData(), valueBlobSize);
160     } else {
161         ALOGV("get: caller's buffer is too small for value: %zu (needs %zu)", valueSize,
162               valueBlobSize);
163     }
164     return valueBlobSize;
165 }
166 
align4(size_t size)167 static inline size_t align4(size_t size) {
168     return (size + 3) & ~3;
169 }
170 
getFlattenedSize() const171 size_t BlobCache::getFlattenedSize() const {
172     auto buildId = base::GetProperty("ro.build.id", "");
173     size_t size = align4(sizeof(Header) + buildId.size());
174     for (const CacheEntry& e : mCacheEntries) {
175         std::shared_ptr<Blob> const& keyBlob = e.getKey();
176         std::shared_ptr<Blob> const& valueBlob = e.getValue();
177         size += align4(sizeof(EntryHeader) + keyBlob->getSize() + valueBlob->getSize());
178     }
179     return size;
180 }
181 
flatten(void * buffer,size_t size) const182 int BlobCache::flatten(void* buffer, size_t size) const {
183     // Write the cache header
184     if (size < sizeof(Header)) {
185         ALOGE("flatten: not enough room for cache header");
186         return 0;
187     }
188     Header* header = reinterpret_cast<Header*>(buffer);
189     header->mMagicNumber = blobCacheMagic;
190     header->mBlobCacheVersion = blobCacheVersion;
191     header->mDeviceVersion = blobCacheDeviceVersion;
192     header->mNumEntries = mCacheEntries.size();
193     auto buildId = base::GetProperty("ro.build.id", "");
194     header->mBuildIdLength = buildId.size();
195     memcpy(header->mBuildId, buildId.c_str(), header->mBuildIdLength);
196 
197     // Write cache entries
198     uint8_t* byteBuffer = reinterpret_cast<uint8_t*>(buffer);
199     off_t byteOffset = align4(sizeof(Header) + header->mBuildIdLength);
200     for (const CacheEntry& e : mCacheEntries) {
201         std::shared_ptr<Blob> const& keyBlob = e.getKey();
202         std::shared_ptr<Blob> const& valueBlob = e.getValue();
203         size_t keySize = keyBlob->getSize();
204         size_t valueSize = valueBlob->getSize();
205 
206         size_t entrySize = sizeof(EntryHeader) + keySize + valueSize;
207         size_t totalSize = align4(entrySize);
208         if (byteOffset + totalSize > size) {
209             ALOGE("flatten: not enough room for cache entries");
210             return -EINVAL;
211         }
212 
213         EntryHeader* eheader = reinterpret_cast<EntryHeader*>(&byteBuffer[byteOffset]);
214         eheader->mKeySize = keySize;
215         eheader->mValueSize = valueSize;
216 
217         memcpy(eheader->mData, keyBlob->getData(), keySize);
218         memcpy(eheader->mData + keySize, valueBlob->getData(), valueSize);
219 
220         if (totalSize > entrySize) {
221             // We have padding bytes. Those will get written to storage, and contribute to the CRC,
222             // so make sure we zero-them to have reproducible results.
223             memset(eheader->mData + keySize + valueSize, 0, totalSize - entrySize);
224         }
225 
226         byteOffset += totalSize;
227     }
228 
229     return 0;
230 }
231 
unflatten(void const * buffer,size_t size)232 int BlobCache::unflatten(void const* buffer, size_t size) {
233     // All errors should result in the BlobCache being in an empty state.
234     mCacheEntries.clear();
235 
236     // Read the cache header
237     if (size < sizeof(Header)) {
238         ALOGE("unflatten: not enough room for cache header");
239         return -EINVAL;
240     }
241     const Header* header = reinterpret_cast<const Header*>(buffer);
242     if (header->mMagicNumber != blobCacheMagic) {
243         ALOGE("unflatten: bad magic number: %" PRIu32, header->mMagicNumber);
244         return -EINVAL;
245     }
246     auto buildId = base::GetProperty("ro.build.id", "");
247     if (header->mBlobCacheVersion != blobCacheVersion ||
248         header->mDeviceVersion != blobCacheDeviceVersion ||
249         buildId.size() != header->mBuildIdLength ||
250         strncmp(buildId.c_str(), header->mBuildId, buildId.size())) {
251         // We treat version mismatches as an empty cache.
252         return 0;
253     }
254 
255     // Read cache entries
256     const uint8_t* byteBuffer = reinterpret_cast<const uint8_t*>(buffer);
257     off_t byteOffset = align4(sizeof(Header) + header->mBuildIdLength);
258     size_t numEntries = header->mNumEntries;
259     for (size_t i = 0; i < numEntries; i++) {
260         if (byteOffset + sizeof(EntryHeader) > size) {
261             mCacheEntries.clear();
262             ALOGE("unflatten: not enough room for cache entry headers");
263             return -EINVAL;
264         }
265 
266         const EntryHeader* eheader = reinterpret_cast<const EntryHeader*>(&byteBuffer[byteOffset]);
267         size_t keySize = eheader->mKeySize;
268         size_t valueSize = eheader->mValueSize;
269         size_t entrySize = sizeof(EntryHeader) + keySize + valueSize;
270 
271         size_t totalSize = align4(entrySize);
272         if (byteOffset + totalSize > size) {
273             mCacheEntries.clear();
274             ALOGE("unflatten: not enough room for cache entry headers");
275             return -EINVAL;
276         }
277 
278         const uint8_t* data = eheader->mData;
279         set(data, keySize, data + keySize, valueSize);
280 
281         byteOffset += totalSize;
282     }
283 
284     return 0;
285 }
286 
blob_random()287 long int BlobCache::blob_random() {
288 #ifdef _WIN32
289     return rand();
290 #else
291     return nrand48(mRandState);
292 #endif
293 }
294 
clean()295 void BlobCache::clean() {
296     // Remove a random cache entry until the total cache size gets below half
297     // the maximum total cache size.
298     while (mTotalSize > mMaxTotalSize / 2) {
299         size_t i = size_t(blob_random() % (mCacheEntries.size()));
300         const CacheEntry& entry(mCacheEntries[i]);
301         mTotalSize -= entry.getKey()->getSize() + entry.getValue()->getSize();
302         mCacheEntries.erase(mCacheEntries.begin() + i);
303     }
304 }
305 
isCleanable() const306 bool BlobCache::isCleanable() const {
307     return mTotalSize > mMaxTotalSize / 2;
308 }
309 
Blob(const void * data,size_t size,bool copyData)310 BlobCache::Blob::Blob(const void* data, size_t size, bool copyData)
311       : mData(copyData ? malloc(size) : data), mSize(size), mOwnsData(copyData) {
312     if (data != nullptr && copyData) {
313         memcpy(const_cast<void*>(mData), data, size);
314     }
315 }
316 
~Blob()317 BlobCache::Blob::~Blob() {
318     if (mOwnsData) {
319         free(const_cast<void*>(mData));
320     }
321 }
322 
operator <(const Blob & rhs) const323 bool BlobCache::Blob::operator<(const Blob& rhs) const {
324     if (mSize == rhs.mSize) {
325         return memcmp(mData, rhs.mData, mSize) < 0;
326     } else {
327         return mSize < rhs.mSize;
328     }
329 }
330 
getData() const331 const void* BlobCache::Blob::getData() const {
332     return mData;
333 }
334 
getSize() const335 size_t BlobCache::Blob::getSize() const {
336     return mSize;
337 }
338 
CacheEntry()339 BlobCache::CacheEntry::CacheEntry() {}
340 
CacheEntry(const std::shared_ptr<Blob> & key,const std::shared_ptr<Blob> & value)341 BlobCache::CacheEntry::CacheEntry(const std::shared_ptr<Blob>& key,
342                                   const std::shared_ptr<Blob>& value)
343       : mKey(key), mValue(value) {}
344 
CacheEntry(const CacheEntry & ce)345 BlobCache::CacheEntry::CacheEntry(const CacheEntry& ce) : mKey(ce.mKey), mValue(ce.mValue) {}
346 
operator <(const CacheEntry & rhs) const347 bool BlobCache::CacheEntry::operator<(const CacheEntry& rhs) const {
348     return *mKey < *rhs.mKey;
349 }
350 
operator =(const CacheEntry & rhs)351 const BlobCache::CacheEntry& BlobCache::CacheEntry::operator=(const CacheEntry& rhs) {
352     mKey = rhs.mKey;
353     mValue = rhs.mValue;
354     return *this;
355 }
356 
getKey() const357 std::shared_ptr<BlobCache::Blob> BlobCache::CacheEntry::getKey() const {
358     return mKey;
359 }
360 
getValue() const361 std::shared_ptr<BlobCache::Blob> BlobCache::CacheEntry::getValue() const {
362     return mValue;
363 }
364 
setValue(const std::shared_ptr<Blob> & value)365 void BlobCache::CacheEntry::setValue(const std::shared_ptr<Blob>& value) {
366     mValue = value;
367 }
368 
369 } // namespace android
370