1 /*
2 ** Copyright 2011, The Android Open Source Project
3 **
4 ** Licensed under the Apache License, Version 2.0 (the "License");
5 ** you may not use this file except in compliance with the License.
6 ** You may obtain a copy of the License at
7 **
8 ** http://www.apache.org/licenses/LICENSE-2.0
9 **
10 ** Unless required by applicable law or agreed to in writing, software
11 ** distributed under the License is distributed on an "AS IS" BASIS,
12 ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 ** See the License for the specific language governing permissions and
14 ** limitations under the License.
15 */
16
17 #define LOG_TAG "BlobCache"
18 //#define LOG_NDEBUG 0
19
20 #include <inttypes.h>
21 #include <stdlib.h>
22 #include <string.h>
23
24 #include <utils/BlobCache.h>
25 #include <utils/Errors.h>
26 #include <utils/Log.h>
27
28 namespace android {
29
30 // BlobCache::Header::mMagicNumber value
31 static const uint32_t blobCacheMagic = ('_' << 24) + ('B' << 16) + ('b' << 8) + '$';
32
33 // BlobCache::Header::mBlobCacheVersion value
34 static const uint32_t blobCacheVersion = 2;
35
36 // BlobCache::Header::mDeviceVersion value
37 static const uint32_t blobCacheDeviceVersion = 1;
38
BlobCache(size_t maxKeySize,size_t maxValueSize,size_t maxTotalSize)39 BlobCache::BlobCache(size_t maxKeySize, size_t maxValueSize, size_t maxTotalSize):
40 mMaxKeySize(maxKeySize),
41 mMaxValueSize(maxValueSize),
42 mMaxTotalSize(maxTotalSize),
43 mTotalSize(0) {
44 nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC);
45 #ifdef _WIN32
46 srand(now);
47 #else
48 mRandState[0] = (now >> 0) & 0xFFFF;
49 mRandState[1] = (now >> 16) & 0xFFFF;
50 mRandState[2] = (now >> 32) & 0xFFFF;
51 #endif
52 ALOGV("initializing random seed using %lld", (unsigned long long)now);
53 }
54
set(const void * key,size_t keySize,const void * value,size_t valueSize)55 void BlobCache::set(const void* key, size_t keySize, const void* value,
56 size_t valueSize) {
57 if (mMaxKeySize < keySize) {
58 ALOGV("set: not caching because the key is too large: %zu (limit: %zu)",
59 keySize, mMaxKeySize);
60 return;
61 }
62 if (mMaxValueSize < valueSize) {
63 ALOGV("set: not caching because the value is too large: %zu (limit: %zu)",
64 valueSize, mMaxValueSize);
65 return;
66 }
67 if (mMaxTotalSize < keySize + valueSize) {
68 ALOGV("set: not caching because the combined key/value size is too "
69 "large: %zu (limit: %zu)", keySize + valueSize, mMaxTotalSize);
70 return;
71 }
72 if (keySize == 0) {
73 ALOGW("set: not caching because keySize is 0");
74 return;
75 }
76 if (valueSize <= 0) {
77 ALOGW("set: not caching because valueSize is 0");
78 return;
79 }
80
81 sp<Blob> dummyKey(new Blob(key, keySize, false));
82 CacheEntry dummyEntry(dummyKey, NULL);
83
84 while (true) {
85 ssize_t index = mCacheEntries.indexOf(dummyEntry);
86 if (index < 0) {
87 // Create a new cache entry.
88 sp<Blob> keyBlob(new Blob(key, keySize, true));
89 sp<Blob> valueBlob(new Blob(value, valueSize, true));
90 size_t newTotalSize = mTotalSize + keySize + valueSize;
91 if (mMaxTotalSize < newTotalSize) {
92 if (isCleanable()) {
93 // Clean the cache and try again.
94 clean();
95 continue;
96 } else {
97 ALOGV("set: not caching new key/value pair because the "
98 "total cache size limit would be exceeded: %zu "
99 "(limit: %zu)",
100 keySize + valueSize, mMaxTotalSize);
101 break;
102 }
103 }
104 mCacheEntries.add(CacheEntry(keyBlob, valueBlob));
105 mTotalSize = newTotalSize;
106 ALOGV("set: created new cache entry with %zu byte key and %zu byte value",
107 keySize, valueSize);
108 } else {
109 // Update the existing cache entry.
110 sp<Blob> valueBlob(new Blob(value, valueSize, true));
111 sp<Blob> oldValueBlob(mCacheEntries[index].getValue());
112 size_t newTotalSize = mTotalSize + valueSize - oldValueBlob->getSize();
113 if (mMaxTotalSize < newTotalSize) {
114 if (isCleanable()) {
115 // Clean the cache and try again.
116 clean();
117 continue;
118 } else {
119 ALOGV("set: not caching new value because the total cache "
120 "size limit would be exceeded: %zu (limit: %zu)",
121 keySize + valueSize, mMaxTotalSize);
122 break;
123 }
124 }
125 mCacheEntries.editItemAt(index).setValue(valueBlob);
126 mTotalSize = newTotalSize;
127 ALOGV("set: updated existing cache entry with %zu byte key and %zu byte "
128 "value", keySize, valueSize);
129 }
130 break;
131 }
132 }
133
get(const void * key,size_t keySize,void * value,size_t valueSize)134 size_t BlobCache::get(const void* key, size_t keySize, void* value,
135 size_t valueSize) {
136 if (mMaxKeySize < keySize) {
137 ALOGV("get: not searching because the key is too large: %zu (limit %zu)",
138 keySize, mMaxKeySize);
139 return 0;
140 }
141 sp<Blob> dummyKey(new Blob(key, keySize, false));
142 CacheEntry dummyEntry(dummyKey, NULL);
143 ssize_t index = mCacheEntries.indexOf(dummyEntry);
144 if (index < 0) {
145 ALOGV("get: no cache entry found for key of size %zu", keySize);
146 return 0;
147 }
148
149 // The key was found. Return the value if the caller's buffer is large
150 // enough.
151 sp<Blob> valueBlob(mCacheEntries[index].getValue());
152 size_t valueBlobSize = valueBlob->getSize();
153 if (valueBlobSize <= valueSize) {
154 ALOGV("get: copying %zu bytes to caller's buffer", valueBlobSize);
155 memcpy(value, valueBlob->getData(), valueBlobSize);
156 } else {
157 ALOGV("get: caller's buffer is too small for value: %zu (needs %zu)",
158 valueSize, valueBlobSize);
159 }
160 return valueBlobSize;
161 }
162
align4(size_t size)163 static inline size_t align4(size_t size) {
164 return (size + 3) & ~3;
165 }
166
getFlattenedSize() const167 size_t BlobCache::getFlattenedSize() const {
168 size_t size = align4(sizeof(Header));
169 for (size_t i = 0; i < mCacheEntries.size(); i++) {
170 const CacheEntry& e(mCacheEntries[i]);
171 sp<Blob> keyBlob = e.getKey();
172 sp<Blob> valueBlob = e.getValue();
173 size += align4(sizeof(EntryHeader) + keyBlob->getSize() +
174 valueBlob->getSize());
175 }
176 return size;
177 }
178
flatten(void * buffer,size_t size) const179 status_t BlobCache::flatten(void* buffer, size_t size) const {
180 // Write the cache header
181 if (size < sizeof(Header)) {
182 ALOGE("flatten: not enough room for cache header");
183 return BAD_VALUE;
184 }
185 Header* header = reinterpret_cast<Header*>(buffer);
186 header->mMagicNumber = blobCacheMagic;
187 header->mBlobCacheVersion = blobCacheVersion;
188 header->mDeviceVersion = blobCacheDeviceVersion;
189 header->mNumEntries = mCacheEntries.size();
190
191 // Write cache entries
192 uint8_t* byteBuffer = reinterpret_cast<uint8_t*>(buffer);
193 off_t byteOffset = align4(sizeof(Header));
194 for (size_t i = 0; i < mCacheEntries.size(); i++) {
195 const CacheEntry& e(mCacheEntries[i]);
196 sp<Blob> keyBlob = e.getKey();
197 sp<Blob> valueBlob = e.getValue();
198 size_t keySize = keyBlob->getSize();
199 size_t valueSize = valueBlob->getSize();
200
201 size_t entrySize = sizeof(EntryHeader) + keySize + valueSize;
202 size_t totalSize = align4(entrySize);
203 if (byteOffset + totalSize > size) {
204 ALOGE("flatten: not enough room for cache entries");
205 return BAD_VALUE;
206 }
207
208 EntryHeader* eheader = reinterpret_cast<EntryHeader*>(
209 &byteBuffer[byteOffset]);
210 eheader->mKeySize = keySize;
211 eheader->mValueSize = valueSize;
212
213 memcpy(eheader->mData, keyBlob->getData(), keySize);
214 memcpy(eheader->mData + keySize, valueBlob->getData(), valueSize);
215
216 if (totalSize > entrySize) {
217 // We have padding bytes. Those will get written to storage, and contribute to the CRC,
218 // so make sure we zero-them to have reproducible results.
219 memset(eheader->mData + keySize + valueSize, 0, totalSize - entrySize);
220 }
221
222 byteOffset += totalSize;
223 }
224
225 return OK;
226 }
227
unflatten(void const * buffer,size_t size)228 status_t BlobCache::unflatten(void const* buffer, size_t size) {
229 // All errors should result in the BlobCache being in an empty state.
230 mCacheEntries.clear();
231
232 // Read the cache header
233 if (size < sizeof(Header)) {
234 ALOGE("unflatten: not enough room for cache header");
235 return BAD_VALUE;
236 }
237 const Header* header = reinterpret_cast<const Header*>(buffer);
238 if (header->mMagicNumber != blobCacheMagic) {
239 ALOGE("unflatten: bad magic number: %" PRIu32, header->mMagicNumber);
240 return BAD_VALUE;
241 }
242 if (header->mBlobCacheVersion != blobCacheVersion ||
243 header->mDeviceVersion != blobCacheDeviceVersion) {
244 // We treat version mismatches as an empty cache.
245 return OK;
246 }
247
248 // Read cache entries
249 const uint8_t* byteBuffer = reinterpret_cast<const uint8_t*>(buffer);
250 off_t byteOffset = align4(sizeof(Header));
251 size_t numEntries = header->mNumEntries;
252 for (size_t i = 0; i < numEntries; i++) {
253 if (byteOffset + sizeof(EntryHeader) > size) {
254 mCacheEntries.clear();
255 ALOGE("unflatten: not enough room for cache entry headers");
256 return BAD_VALUE;
257 }
258
259 const EntryHeader* eheader = reinterpret_cast<const EntryHeader*>(
260 &byteBuffer[byteOffset]);
261 size_t keySize = eheader->mKeySize;
262 size_t valueSize = eheader->mValueSize;
263 size_t entrySize = sizeof(EntryHeader) + keySize + valueSize;
264
265 size_t totalSize = align4(entrySize);
266 if (byteOffset + totalSize > size) {
267 mCacheEntries.clear();
268 ALOGE("unflatten: not enough room for cache entry headers");
269 return BAD_VALUE;
270 }
271
272 const uint8_t* data = eheader->mData;
273 set(data, keySize, data + keySize, valueSize);
274
275 byteOffset += totalSize;
276 }
277
278 return OK;
279 }
280
blob_random()281 long int BlobCache::blob_random() {
282 #ifdef _WIN32
283 return rand();
284 #else
285 return nrand48(mRandState);
286 #endif
287 }
288
clean()289 void BlobCache::clean() {
290 // Remove a random cache entry until the total cache size gets below half
291 // the maximum total cache size.
292 while (mTotalSize > mMaxTotalSize / 2) {
293 size_t i = size_t(blob_random() % (mCacheEntries.size()));
294 const CacheEntry& entry(mCacheEntries[i]);
295 mTotalSize -= entry.getKey()->getSize() + entry.getValue()->getSize();
296 mCacheEntries.removeAt(i);
297 }
298 }
299
isCleanable() const300 bool BlobCache::isCleanable() const {
301 return mTotalSize > mMaxTotalSize / 2;
302 }
303
Blob(const void * data,size_t size,bool copyData)304 BlobCache::Blob::Blob(const void* data, size_t size, bool copyData):
305 mData(copyData ? malloc(size) : data),
306 mSize(size),
307 mOwnsData(copyData) {
308 if (data != NULL && copyData) {
309 memcpy(const_cast<void*>(mData), data, size);
310 }
311 }
312
~Blob()313 BlobCache::Blob::~Blob() {
314 if (mOwnsData) {
315 free(const_cast<void*>(mData));
316 }
317 }
318
operator <(const Blob & rhs) const319 bool BlobCache::Blob::operator<(const Blob& rhs) const {
320 if (mSize == rhs.mSize) {
321 return memcmp(mData, rhs.mData, mSize) < 0;
322 } else {
323 return mSize < rhs.mSize;
324 }
325 }
326
getData() const327 const void* BlobCache::Blob::getData() const {
328 return mData;
329 }
330
getSize() const331 size_t BlobCache::Blob::getSize() const {
332 return mSize;
333 }
334
CacheEntry()335 BlobCache::CacheEntry::CacheEntry() {
336 }
337
CacheEntry(const sp<Blob> & key,const sp<Blob> & value)338 BlobCache::CacheEntry::CacheEntry(const sp<Blob>& key, const sp<Blob>& value):
339 mKey(key),
340 mValue(value) {
341 }
342
CacheEntry(const CacheEntry & ce)343 BlobCache::CacheEntry::CacheEntry(const CacheEntry& ce):
344 mKey(ce.mKey),
345 mValue(ce.mValue) {
346 }
347
operator <(const CacheEntry & rhs) const348 bool BlobCache::CacheEntry::operator<(const CacheEntry& rhs) const {
349 return *mKey < *rhs.mKey;
350 }
351
operator =(const CacheEntry & rhs)352 const BlobCache::CacheEntry& BlobCache::CacheEntry::operator=(const CacheEntry& rhs) {
353 mKey = rhs.mKey;
354 mValue = rhs.mValue;
355 return *this;
356 }
357
getKey() const358 sp<BlobCache::Blob> BlobCache::CacheEntry::getKey() const {
359 return mKey;
360 }
361
getValue() const362 sp<BlobCache::Blob> BlobCache::CacheEntry::getValue() const {
363 return mValue;
364 }
365
setValue(const sp<Blob> & value)366 void BlobCache::CacheEntry::setValue(const sp<Blob>& value) {
367 mValue = value;
368 }
369
370 } // namespace android
371