• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  ** Copyright 2011, The Android Open Source Project
3  **
4  ** Licensed under the Apache License, Version 2.0 (the "License");
5  ** you may not use this file except in compliance with the License.
6  ** You may obtain a copy of the License at
7  **
8  **     http://www.apache.org/licenses/LICENSE-2.0
9  **
10  ** Unless required by applicable law or agreed to in writing, software
11  ** distributed under the License is distributed on an "AS IS" BASIS,
12  ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  ** See the License for the specific language governing permissions and
14  ** limitations under the License.
15  */
16 
17 //#define LOG_NDEBUG 0
18 
19 #include "BlobCache.h"
20 
21 #include <errno.h>
22 #include <inttypes.h>
23 
24 #if defined(__ANDROID__)
25 #include <cutils/properties.h>
26 #else
27 #include <string.h>
28 #include <algorithm>
29 static const char property_value[] = "[HOST]";
30 #define PROPERTY_VALUE_MAX (sizeof(property_value) - 1)
property_get(const char * key,char * value,const char * default_value)31 static int property_get(const char *key, char *value, const char *default_value) {
32     if (!strcmp(key, "ro.build.id")) {
33         memcpy(value, property_value, PROPERTY_VALUE_MAX);
34         return PROPERTY_VALUE_MAX;
35     }
36     if (default_value) {
37         const size_t len = std::max(strlen(default_value) + 1, size_t(PROPERTY_VALUE_MAX));
38         memcpy(value, default_value, len);
39     }
40     return 0;
41 }
42 #endif
43 
44 #include <log/log.h>
45 
46 #include <algorithm>
47 #include <chrono>
48 
49 namespace android {
50 
51 // BlobCache::Header::mMagicNumber value
52 static const uint32_t blobCacheMagic = ('_' << 24) + ('B' << 16) + ('b' << 8) + '$';
53 
54 // BlobCache::Header::mBlobCacheVersion value
55 static const uint32_t blobCacheVersion = 3;
56 
57 // BlobCache::Header::mDeviceVersion value
58 static const uint32_t blobCacheDeviceVersion = 1;
59 
BlobCache(size_t maxKeySize,size_t maxValueSize,size_t maxTotalSize,Policy policy)60 BlobCache::BlobCache(size_t maxKeySize, size_t maxValueSize, size_t maxTotalSize, Policy policy):
61         mMaxKeySize(maxKeySize),
62         mMaxValueSize(maxValueSize),
63         mMaxTotalSize(maxTotalSize),
64         mPolicySelect(policy.first),
65         mPolicyCapacity(policy.second),
66         mTotalSize(0),
67         mAccessCount(0) {
68     int64_t now = std::chrono::steady_clock::now().time_since_epoch().count();
69 #ifdef _WIN32
70     srand(now);
71 #else
72     mRandState[0] = (now >> 0) & 0xFFFF;
73     mRandState[1] = (now >> 16) & 0xFFFF;
74     mRandState[2] = (now >> 32) & 0xFFFF;
75 #endif
76     ALOGV("initializing random seed using %lld", (unsigned long long)now);
77 }
78 
set(const void * key,size_t keySize,const void * value,size_t valueSize)79 void BlobCache::set(const void* key, size_t keySize, const void* value,
80         size_t valueSize) {
81     if (mMaxKeySize < keySize) {
82         ALOGV("set: not caching because the key is too large: %zu (limit: %zu)",
83                 keySize, mMaxKeySize);
84         return;
85     }
86     if (mMaxValueSize < valueSize) {
87         ALOGV("set: not caching because the value is too large: %zu (limit: %zu)",
88                 valueSize, mMaxValueSize);
89         return;
90     }
91     if (mMaxTotalSize < keySize + valueSize) {
92         ALOGV("set: not caching because the combined key/value size is too "
93                 "large: %zu (limit: %zu)", keySize + valueSize, mMaxTotalSize);
94         return;
95     }
96     if (keySize == 0) {
97         ALOGW("set: not caching because keySize is 0");
98         return;
99     }
100     if (valueSize <= 0) {
101         ALOGW("set: not caching because valueSize is 0");
102         return;
103     }
104 
105     std::shared_ptr<Blob> dummyKey(new Blob(key, keySize, false));
106     CacheEntry dummyEntry(dummyKey, NULL, 0);
107 
108     while (true) {
109         auto index = std::lower_bound(mCacheEntries.begin(), mCacheEntries.end(), dummyEntry);
110         if (index == mCacheEntries.end() || dummyEntry < *index) {
111             // Create a new cache entry.
112             std::shared_ptr<Blob> keyBlob(new Blob(key, keySize, true));
113             std::shared_ptr<Blob> valueBlob(new Blob(value, valueSize, true));
114             size_t newEntrySize = keySize + valueSize;
115             size_t newTotalSize = mTotalSize + newEntrySize;
116             if (mMaxTotalSize < newTotalSize) {
117                 if (isCleanable()) {
118                     // Clean the cache and try again.
119                     if (!clean(newEntrySize, NoEntry)) {
120                         // We have some kind of logic error -- perhaps
121                         // an inconsistency between isCleanable() and
122                         // findDownTo().
123                         ALOGE("set: not caching new key/value pair because "
124                               "cleaning failed");
125                         break;
126                     }
127                     continue;
128                 } else {
129                     ALOGV("set: not caching new key/value pair because the "
130                             "total cache size limit would be exceeded: %zu "
131                             "(limit: %zu)",
132                             keySize + valueSize, mMaxTotalSize);
133                     break;
134                 }
135             }
136             mCacheEntries.insert(index, CacheEntry(keyBlob, valueBlob, ++mAccessCount));
137             mTotalSize = newTotalSize;
138             ALOGV("set: created new cache entry with %zu byte key and %zu byte value",
139                     keySize, valueSize);
140         } else {
141             // Update the existing cache entry.
142             std::shared_ptr<Blob> valueBlob(new Blob(value, valueSize, true));
143             std::shared_ptr<Blob> oldValueBlob(index->getValue());
144             size_t newTotalSize = mTotalSize + valueSize - oldValueBlob->getSize();
145             if (mMaxTotalSize < newTotalSize) {
146                 if (isCleanable()) {
147                     // Clean the cache and try again.
148                     if (!clean(index->getKey()->getSize() + valueSize,
149                                index - mCacheEntries.begin())) {
150                         // We have some kind of logic error -- perhaps
151                         // an inconsistency between isCleanable() and
152                         // findDownTo().
153                         ALOGE("set: not caching new value because "
154                               "cleaning failed");
155                         break;
156                     }
157                     continue;
158                 } else {
159                     ALOGV("set: not caching new value because the total cache "
160                             "size limit would be exceeded: %zu (limit: %zu)",
161                             keySize + valueSize, mMaxTotalSize);
162                     break;
163                 }
164             }
165             index->setValue(valueBlob);
166             index->setRecency(++mAccessCount);
167             mTotalSize = newTotalSize;
168             ALOGV("set: updated existing cache entry with %zu byte key and %zu byte "
169                     "value", keySize, valueSize);
170         }
171         break;
172     }
173 }
174 
get(const void * key,size_t keySize,void * value,size_t valueSize)175 size_t BlobCache::get(const void* key, size_t keySize, void* value,
176         size_t valueSize) {
177     void *dummy;
178     return get(key, keySize, &dummy,
179                [value, valueSize](size_t allocSize) {
180                    return (allocSize <= valueSize ? value : nullptr);
181                });
182 }
183 
get(const void * key,size_t keySize,void ** value,std::function<void * (size_t)> alloc)184 size_t BlobCache::get(const void* key, size_t keySize, void** value,
185         std::function<void*(size_t)> alloc) {
186     if (mMaxKeySize < keySize) {
187         ALOGV("get: not searching because the key is too large: %zu (limit %zu)",
188                 keySize, mMaxKeySize);
189         *value = nullptr;
190         return 0;
191     }
192     std::shared_ptr<Blob> dummyKey(new Blob(key, keySize, false));
193     CacheEntry dummyEntry(dummyKey, NULL, 0);
194     auto index = std::lower_bound(mCacheEntries.begin(), mCacheEntries.end(), dummyEntry);
195     if (index == mCacheEntries.end() || dummyEntry < *index) {
196         ALOGV("get: no cache entry found for key of size %zu", keySize);
197         *value = nullptr;
198         return 0;
199     }
200 
201     // The key was found. Return the value if we can allocate a buffer.
202     std::shared_ptr<Blob> valueBlob(index->getValue());
203     size_t valueBlobSize = valueBlob->getSize();
204     void *buf = alloc(valueBlobSize);
205     if (buf != nullptr) {
206         ALOGV("get: copying %zu bytes to caller's buffer", valueBlobSize);
207         memcpy(buf, valueBlob->getData(), valueBlobSize);
208         *value = buf;
209         index->setRecency(++mAccessCount);
210     } else {
211         ALOGV("get: cannot allocate caller's buffer: needs %zu", valueBlobSize);
212         *value = nullptr;
213     }
214     return valueBlobSize;
215 }
216 
align4(size_t size)217 static inline size_t align4(size_t size) {
218     return (size + 3) & ~3;
219 }
220 
getFlattenedSize() const221 size_t BlobCache::getFlattenedSize() const {
222     size_t size = align4(sizeof(Header) + PROPERTY_VALUE_MAX);
223     for (const CacheEntry& e :  mCacheEntries) {
224         std::shared_ptr<Blob> const& keyBlob = e.getKey();
225         std::shared_ptr<Blob> const& valueBlob = e.getValue();
226         size += align4(sizeof(EntryHeader) + keyBlob->getSize() + valueBlob->getSize());
227     }
228     return size;
229 }
230 
flatten(void * buffer,size_t size) const231 int BlobCache::flatten(void* buffer, size_t size) const {
232     // Write the cache header
233     if (size < sizeof(Header)) {
234         ALOGE("flatten: not enough room for cache header");
235         return 0;
236     }
237     Header* header = reinterpret_cast<Header*>(buffer);
238     header->mMagicNumber = blobCacheMagic;
239     header->mBlobCacheVersion = blobCacheVersion;
240     header->mDeviceVersion = blobCacheDeviceVersion;
241     header->mNumEntries = mCacheEntries.size();
242     char buildId[PROPERTY_VALUE_MAX];
243     header->mBuildIdLength = property_get("ro.build.id", buildId, "");
244     memcpy(header->mBuildId, buildId, header->mBuildIdLength);
245 
246     // Write cache entries
247     uint8_t* byteBuffer = reinterpret_cast<uint8_t*>(buffer);
248     off_t byteOffset = align4(sizeof(Header) + header->mBuildIdLength);
249     for (const CacheEntry& e :  mCacheEntries) {
250         std::shared_ptr<Blob> const& keyBlob = e.getKey();
251         std::shared_ptr<Blob> const& valueBlob = e.getValue();
252         size_t keySize = keyBlob->getSize();
253         size_t valueSize = valueBlob->getSize();
254 
255         size_t entrySize = sizeof(EntryHeader) + keySize + valueSize;
256         size_t totalSize = align4(entrySize);
257         if (byteOffset + totalSize > size) {
258             ALOGE("flatten: not enough room for cache entries");
259             return -EINVAL;
260         }
261 
262         EntryHeader* eheader = reinterpret_cast<EntryHeader*>(&byteBuffer[byteOffset]);
263         eheader->mKeySize = keySize;
264         eheader->mValueSize = valueSize;
265 
266         memcpy(eheader->mData, keyBlob->getData(), keySize);
267         memcpy(eheader->mData + keySize, valueBlob->getData(), valueSize);
268 
269         if (totalSize > entrySize) {
270             // We have padding bytes. Those will get written to storage, and contribute to the CRC,
271             // so make sure we zero-them to have reproducible results.
272             memset(eheader->mData + keySize + valueSize, 0, totalSize - entrySize);
273         }
274 
275         byteOffset += totalSize;
276     }
277 
278     return 0;
279 }
280 
unflatten(void const * buffer,size_t size)281 int BlobCache::unflatten(void const* buffer, size_t size) {
282     // All errors should result in the BlobCache being in an empty state.
283     mCacheEntries.clear();
284 
285     // Read the cache header
286     if (size < sizeof(Header)) {
287         ALOGE("unflatten: not enough room for cache header");
288         return -EINVAL;
289     }
290     const Header* header = reinterpret_cast<const Header*>(buffer);
291     if (header->mMagicNumber != blobCacheMagic) {
292         ALOGE("unflatten: bad magic number: %" PRIu32, header->mMagicNumber);
293         return -EINVAL;
294     }
295     char buildId[PROPERTY_VALUE_MAX];
296     int len = property_get("ro.build.id", buildId, "");
297     if (header->mBlobCacheVersion != blobCacheVersion ||
298             header->mDeviceVersion != blobCacheDeviceVersion ||
299             len != header->mBuildIdLength ||
300             strncmp(buildId, header->mBuildId, len)) {
301         // We treat version mismatches as an empty cache.
302         return 0;
303     }
304 
305     // Read cache entries
306     const uint8_t* byteBuffer = reinterpret_cast<const uint8_t*>(buffer);
307     off_t byteOffset = align4(sizeof(Header) + header->mBuildIdLength);
308     size_t numEntries = header->mNumEntries;
309     for (size_t i = 0; i < numEntries; i++) {
310         if (byteOffset + sizeof(EntryHeader) > size) {
311             mCacheEntries.clear();
312             ALOGE("unflatten: not enough room for cache entry header");
313             return -EINVAL;
314         }
315 
316         const EntryHeader* eheader = reinterpret_cast<const EntryHeader*>(
317                 &byteBuffer[byteOffset]);
318         size_t keySize = eheader->mKeySize;
319         size_t valueSize = eheader->mValueSize;
320         size_t entrySize = sizeof(EntryHeader) + keySize + valueSize;
321 
322         size_t totalSize = align4(entrySize);
323         if (byteOffset + totalSize > size) {
324             mCacheEntries.clear();
325             ALOGE("unflatten: not enough room for cache entry");
326             return -EINVAL;
327         }
328 
329         const uint8_t* data = eheader->mData;
330         set(data, keySize, data + keySize, valueSize);
331 
332         byteOffset += totalSize;
333     }
334 
335     return 0;
336 }
337 
blob_random()338 long int BlobCache::blob_random() {
339 #ifdef _WIN32
340     return rand();
341 #else
342     return nrand48(mRandState);
343 #endif
344 }
345 
findVictim()346 size_t BlobCache::findVictim() {
347     switch (mPolicySelect) {
348         case Select::RANDOM:
349             return size_t(blob_random() % (mCacheEntries.size()));
350         case Select::LRU:
351             return std::min_element(mCacheEntries.begin(), mCacheEntries.end(),
352                                     [](const CacheEntry &a, const CacheEntry &b) {
353                                         return a.getRecency() < b.getRecency();
354                                     }) - mCacheEntries.begin();
355         default:
356             ALOGE("findVictim: unknown mPolicySelect: %d", mPolicySelect);
357             return 0;
358     }
359 }
360 
findDownTo(size_t newEntrySize,size_t onBehalfOf)361 size_t BlobCache::findDownTo(size_t newEntrySize, size_t onBehalfOf) {
362     auto oldEntrySize = [this, onBehalfOf]() -> size_t {
363         if (onBehalfOf == NoEntry)
364             return 0;
365         const auto &entry = mCacheEntries[onBehalfOf];
366         return entry.getKey()->getSize() + entry.getValue()->getSize();
367     };
368     switch (mPolicyCapacity) {
369         case Capacity::HALVE:
370             return mMaxTotalSize / 2;
371         case Capacity::FIT:
372             return mMaxTotalSize - (newEntrySize - oldEntrySize());
373         case Capacity::FIT_HALVE:
374             return std::min(mMaxTotalSize - (newEntrySize - oldEntrySize()), mMaxTotalSize / 2);
375         default:
376             ALOGE("findDownTo: unknown mPolicyCapacity: %d", mPolicyCapacity);
377             return 0;
378     }
379 }
380 
isFit(Capacity capacity)381 bool BlobCache::isFit(Capacity capacity) {
382     switch (capacity) {
383         case Capacity::HALVE:
384             return false;
385         case Capacity::FIT:
386         case Capacity::FIT_HALVE:
387             return true;
388         default:
389             ALOGE("isFit: unknown capacity: %d", capacity);
390             return false;
391     }
392 }
393 
clean(size_t newEntrySize,size_t onBehalfOf)394 bool BlobCache::clean(size_t newEntrySize, size_t onBehalfOf) {
395     // Remove a selected cache entry until the total cache size does
396     // not exceed downTo.
397     const size_t downTo = findDownTo(newEntrySize, onBehalfOf);
398 
399     bool cleaned = false;
400     while (mTotalSize > downTo) {
401         const size_t i = findVictim();
402         const CacheEntry& entry(mCacheEntries[i]);
403         const size_t entrySize = entry.getKey()->getSize() + entry.getValue()->getSize();
404         mTotalSize -= entrySize;
405         mCacheEntries.erase(mCacheEntries.begin() + i);
406         cleaned = true;
407     }
408     return cleaned;
409 }
410 
isCleanable() const411 bool BlobCache::isCleanable() const {
412     switch (mPolicyCapacity) {
413         case Capacity::HALVE:
414             return mTotalSize > mMaxTotalSize / 2;
415         default:
416             ALOGE("isCleanable: unknown mPolicyCapacity: %d", mPolicyCapacity);
417             [[fallthrough]];
418         case Capacity::FIT:
419         case Capacity::FIT_HALVE:
420             return mTotalSize > 0;
421     }
422 }
423 
Blob(const void * data,size_t size,bool copyData)424 BlobCache::Blob::Blob(const void* data, size_t size, bool copyData) :
425         mData(copyData ? malloc(size) : data),
426         mSize(size),
427         mOwnsData(copyData) {
428     if (data != NULL && copyData) {
429         memcpy(const_cast<void*>(mData), data, size);
430     }
431 }
432 
~Blob()433 BlobCache::Blob::~Blob() {
434     if (mOwnsData) {
435         free(const_cast<void*>(mData));
436     }
437 }
438 
operator <(const Blob & rhs) const439 bool BlobCache::Blob::operator<(const Blob& rhs) const {
440     if (mSize == rhs.mSize) {
441         return memcmp(mData, rhs.mData, mSize) < 0;
442     } else {
443         return mSize < rhs.mSize;
444     }
445 }
446 
getData() const447 const void* BlobCache::Blob::getData() const {
448     return mData;
449 }
450 
getSize() const451 size_t BlobCache::Blob::getSize() const {
452     return mSize;
453 }
454 
CacheEntry()455 BlobCache::CacheEntry::CacheEntry(): mRecency(0) {
456 }
457 
CacheEntry(const std::shared_ptr<Blob> & key,const std::shared_ptr<Blob> & value,uint32_t recency)458 BlobCache::CacheEntry::CacheEntry(
459         const std::shared_ptr<Blob>& key, const std::shared_ptr<Blob>& value, uint32_t recency):
460         mKey(key),
461         mValue(value),
462         mRecency(recency) {
463 }
464 
CacheEntry(const CacheEntry & ce)465 BlobCache::CacheEntry::CacheEntry(const CacheEntry& ce):
466         mKey(ce.mKey),
467         mValue(ce.mValue),
468         mRecency(ce.mRecency) {
469 }
470 
operator <(const CacheEntry & rhs) const471 bool BlobCache::CacheEntry::operator<(const CacheEntry& rhs) const {
472     return *mKey < *rhs.mKey;
473 }
474 
operator =(const CacheEntry & rhs)475 const BlobCache::CacheEntry& BlobCache::CacheEntry::operator=(const CacheEntry& rhs) {
476     mKey = rhs.mKey;
477     mValue = rhs.mValue;
478     mRecency = rhs.mRecency;
479     return *this;
480 }
481 
getKey() const482 std::shared_ptr<BlobCache::Blob> BlobCache::CacheEntry::getKey() const {
483     return mKey;
484 }
485 
getValue() const486 std::shared_ptr<BlobCache::Blob> BlobCache::CacheEntry::getValue() const {
487     return mValue;
488 }
489 
setValue(const std::shared_ptr<Blob> & value)490 void BlobCache::CacheEntry::setValue(const std::shared_ptr<Blob>& value) {
491     mValue = value;
492 }
493 
getRecency() const494 uint32_t BlobCache::CacheEntry::getRecency() const {
495     return mRecency;
496 }
497 
setRecency(uint32_t recency)498 void BlobCache::CacheEntry::setRecency(uint32_t recency) {
499     mRecency = recency;
500 }
501 
502 } // namespace android
503