1 /*
2 * Copyright (C) 2017 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "ShaderCache.h"
18 #include <GrDirectContext.h>
19 #include <gui/TraceUtils.h>
20 #include <log/log.h>
21 #include <openssl/sha.h>
22 #include <algorithm>
23 #include <array>
24 #include <thread>
25 #include "FileBlobCache.h"
26 #include "Properties.h"
27
28 namespace android {
29 namespace uirenderer {
30 namespace skiapipeline {
31
32 // Cache size limits.
33 static const size_t maxKeySize = 1024;
34 static const size_t maxValueSize = 2 * 1024 * 1024;
35 static const size_t maxTotalSize = 1024 * 1024;
36
ShaderCache()37 ShaderCache::ShaderCache() {
38 // There is an "incomplete FileBlobCache type" compilation error, if ctor is moved to header.
39 }
40
41 ShaderCache ShaderCache::sCache;
42
get()43 ShaderCache& ShaderCache::get() {
44 return sCache;
45 }
46
validateCache(const void * identity,ssize_t size)47 bool ShaderCache::validateCache(const void* identity, ssize_t size) {
48 if (nullptr == identity && size == 0) return true;
49
50 if (nullptr == identity || size < 0) {
51 if (CC_UNLIKELY(Properties::debugLevel & kDebugCaches)) {
52 ALOGW("ShaderCache::validateCache invalid cache identity");
53 }
54 mBlobCache->clear();
55 return false;
56 }
57
58 SHA256_CTX ctx;
59 SHA256_Init(&ctx);
60
61 SHA256_Update(&ctx, identity, size);
62 mIDHash.resize(SHA256_DIGEST_LENGTH);
63 SHA256_Final(mIDHash.data(), &ctx);
64
65 std::array<uint8_t, SHA256_DIGEST_LENGTH> hash;
66 auto key = sIDKey;
67 auto loaded = mBlobCache->get(&key, sizeof(key), hash.data(), hash.size());
68
69 if (loaded && std::equal(hash.begin(), hash.end(), mIDHash.begin())) return true;
70
71 if (CC_UNLIKELY(Properties::debugLevel & kDebugCaches)) {
72 ALOGW("ShaderCache::validateCache cache validation fails");
73 }
74 mBlobCache->clear();
75 return false;
76 }
77
initShaderDiskCache(const void * identity,ssize_t size)78 void ShaderCache::initShaderDiskCache(const void* identity, ssize_t size) {
79 ATRACE_NAME("initShaderDiskCache");
80 std::lock_guard<std::mutex> lock(mMutex);
81
82 // Emulators can switch between different renders either as part of config
83 // or snapshot migration. Also, program binaries may not work well on some
84 // desktop / laptop GPUs. Thus, disable the shader disk cache for emulator builds.
85 if (!Properties::runningInEmulator && mFilename.length() > 0) {
86 mBlobCache.reset(new FileBlobCache(maxKeySize, maxValueSize, maxTotalSize, mFilename));
87 validateCache(identity, size);
88 mInitialized = true;
89 }
90 }
91
setFilename(const char * filename)92 void ShaderCache::setFilename(const char* filename) {
93 std::lock_guard<std::mutex> lock(mMutex);
94 mFilename = filename;
95 }
96
getBlobCacheLocked()97 BlobCache* ShaderCache::getBlobCacheLocked() {
98 LOG_ALWAYS_FATAL_IF(!mInitialized, "ShaderCache has not been initialized");
99 return mBlobCache.get();
100 }
101
load(const SkData & key)102 sk_sp<SkData> ShaderCache::load(const SkData& key) {
103 ATRACE_NAME("ShaderCache::load");
104 size_t keySize = key.size();
105 std::lock_guard<std::mutex> lock(mMutex);
106 if (!mInitialized) {
107 return nullptr;
108 }
109
110 // mObservedBlobValueSize is reasonably big to avoid memory reallocation
111 // Allocate a buffer with malloc. SkData takes ownership of that allocation and will call free.
112 void* valueBuffer = malloc(mObservedBlobValueSize);
113 if (!valueBuffer) {
114 return nullptr;
115 }
116 BlobCache* bc = getBlobCacheLocked();
117 size_t valueSize = bc->get(key.data(), keySize, valueBuffer, mObservedBlobValueSize);
118 int maxTries = 3;
119 while (valueSize > mObservedBlobValueSize && maxTries > 0) {
120 mObservedBlobValueSize = std::min(valueSize, maxValueSize);
121 void* newValueBuffer = realloc(valueBuffer, mObservedBlobValueSize);
122 if (!newValueBuffer) {
123 free(valueBuffer);
124 return nullptr;
125 }
126 valueBuffer = newValueBuffer;
127 valueSize = bc->get(key.data(), keySize, valueBuffer, mObservedBlobValueSize);
128 maxTries--;
129 }
130 if (!valueSize) {
131 free(valueBuffer);
132 return nullptr;
133 }
134 if (valueSize > mObservedBlobValueSize) {
135 ALOGE("ShaderCache::load value size is too big %d", (int)valueSize);
136 free(valueBuffer);
137 return nullptr;
138 }
139 mNumShadersCachedInRam++;
140 ATRACE_FORMAT("HWUI RAM cache: %d shaders", mNumShadersCachedInRam);
141 return SkData::MakeFromMalloc(valueBuffer, valueSize);
142 }
143
144 namespace {
145 // Helper for BlobCache::set to trace the result.
set(BlobCache * cache,const void * key,size_t keySize,const void * value,size_t valueSize)146 void set(BlobCache* cache, const void* key, size_t keySize, const void* value, size_t valueSize) {
147 switch (cache->set(key, keySize, value, valueSize)) {
148 case BlobCache::InsertResult::kInserted:
149 // This is what we expect/hope. It means the cache is large enough.
150 return;
151 case BlobCache::InsertResult::kDidClean: {
152 ATRACE_FORMAT("ShaderCache: evicted an entry to fit {key: %lu value %lu}!", keySize,
153 valueSize);
154 return;
155 }
156 case BlobCache::InsertResult::kNotEnoughSpace: {
157 ATRACE_FORMAT("ShaderCache: could not fit {key: %lu value %lu}!", keySize, valueSize);
158 return;
159 }
160 case BlobCache::InsertResult::kInvalidValueSize:
161 case BlobCache::InsertResult::kInvalidKeySize: {
162 ATRACE_FORMAT("ShaderCache: invalid size {key: %lu value %lu}!", keySize, valueSize);
163 return;
164 }
165 case BlobCache::InsertResult::kKeyTooBig:
166 case BlobCache::InsertResult::kValueTooBig:
167 case BlobCache::InsertResult::kCombinedTooBig: {
168 ATRACE_FORMAT("ShaderCache: entry too big: {key: %lu value %lu}!", keySize, valueSize);
169 return;
170 }
171 }
172 }
173 } // namespace
174
saveToDiskLocked()175 void ShaderCache::saveToDiskLocked() {
176 ATRACE_NAME("ShaderCache::saveToDiskLocked");
177 if (mInitialized && mBlobCache) {
178 if (mIDHash.size()) {
179 auto key = sIDKey;
180 set(mBlobCache.get(), &key, sizeof(key), mIDHash.data(), mIDHash.size());
181 }
182 mBlobCache->writeToFile();
183 }
184 }
185
store(const SkData & key,const SkData & data,const SkString &)186 void ShaderCache::store(const SkData& key, const SkData& data, const SkString& /*description*/) {
187 ATRACE_NAME("ShaderCache::store");
188 std::lock_guard<std::mutex> lock(mMutex);
189 mNumShadersCachedInRam++;
190 ATRACE_FORMAT("HWUI RAM cache: %d shaders", mNumShadersCachedInRam);
191
192 if (!mInitialized) {
193 return;
194 }
195
196 size_t valueSize = data.size();
197 size_t keySize = key.size();
198 if (keySize == 0 || valueSize == 0 || valueSize >= maxValueSize) {
199 ALOGW("ShaderCache::store: sizes %d %d not allowed", (int)keySize, (int)valueSize);
200 return;
201 }
202
203 const void* value = data.data();
204
205 BlobCache* bc = getBlobCacheLocked();
206 if (mInStoreVkPipelineInProgress) {
207 if (mOldPipelineCacheSize == -1) {
208 // Record the initial pipeline cache size stored in the file.
209 mOldPipelineCacheSize = bc->get(key.data(), keySize, nullptr, 0);
210 }
211 if (mNewPipelineCacheSize != -1 && mNewPipelineCacheSize == valueSize) {
212 // There has not been change in pipeline cache size. Stop trying to save.
213 mTryToStorePipelineCache = false;
214 return;
215 }
216 mNewPipelineCacheSize = valueSize;
217 } else {
218 mCacheDirty = true;
219 // If there are new shaders compiled, we probably have new pipeline state too.
220 // Store pipeline cache on the next flush.
221 mNewPipelineCacheSize = -1;
222 mTryToStorePipelineCache = true;
223 }
224 set(bc, key.data(), keySize, value, valueSize);
225
226 if (!mSavePending && mDeferredSaveDelayMs > 0) {
227 mSavePending = true;
228 std::thread deferredSaveThread([this]() {
229 usleep(mDeferredSaveDelayMs * 1000); // milliseconds to microseconds
230 std::lock_guard<std::mutex> lock(mMutex);
231 // Store file on disk if there a new shader or Vulkan pipeline cache size changed.
232 if (mCacheDirty || mNewPipelineCacheSize != mOldPipelineCacheSize) {
233 saveToDiskLocked();
234 mOldPipelineCacheSize = mNewPipelineCacheSize;
235 mTryToStorePipelineCache = false;
236 mCacheDirty = false;
237 }
238 mSavePending = false;
239 });
240 deferredSaveThread.detach();
241 }
242 }
243
onVkFrameFlushed(GrDirectContext * context)244 void ShaderCache::onVkFrameFlushed(GrDirectContext* context) {
245 {
246 std::lock_guard<std::mutex> lock(mMutex);
247
248 if (!mInitialized || !mTryToStorePipelineCache) {
249 return;
250 }
251 }
252 mInStoreVkPipelineInProgress = true;
253 context->storeVkPipelineCacheData();
254 mInStoreVkPipelineInProgress = false;
255 }
256
257 } /* namespace skiapipeline */
258 } /* namespace uirenderer */
259 } /* namespace android */
260