• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #ifdef ENABLE_SHADER_CACHE
25 
26 #include <ctype.h>
27 #include <ftw.h>
28 #include <string.h>
29 #include <stdlib.h>
30 #include <stdio.h>
31 #include <sys/file.h>
32 #include <sys/types.h>
33 #include <sys/stat.h>
34 #include <sys/mman.h>
35 #include <fcntl.h>
36 #include <errno.h>
37 #include <dirent.h>
38 #include <inttypes.h>
39 
40 #include "util/crc32.h"
41 #include "util/debug.h"
42 #include "util/rand_xor.h"
43 #include "util/u_atomic.h"
44 #include "util/mesa-sha1.h"
45 #include "util/ralloc.h"
46 #include "util/compiler.h"
47 
48 #include "disk_cache.h"
49 #include "disk_cache_os.h"
50 
51 /* The cache version should be bumped whenever a change is made to the
52  * structure of cache entries or the index. This will give any 3rd party
53  * applications reading the cache entries a chance to adjust to the changes.
54  *
55  * - The cache version is checked internally when reading a cache entry. If we
56  *   ever have a mismatch we are in big trouble as this means we had a cache
57  *   collision. In case of such an event please check the skys for giant
58  *   asteroids and that the entire Mesa team hasn't been eaten by wolves.
59  *
60  * - There is no strict requirement that cache versions be backwards
61  *   compatible but effort should be taken to limit disruption where possible.
62  */
63 #define CACHE_VERSION 1
64 
65 #define DRV_KEY_CPY(_dst, _src, _src_size) \
66 do {                                       \
67    memcpy(_dst, _src, _src_size);          \
68    _dst += _src_size;                      \
69 } while (0);
70 
71 struct disk_cache *
disk_cache_create(const char * gpu_name,const char * driver_id,uint64_t driver_flags)72 disk_cache_create(const char *gpu_name, const char *driver_id,
73                   uint64_t driver_flags)
74 {
75    void *local;
76    struct disk_cache *cache = NULL;
77    char *max_size_str;
78    uint64_t max_size;
79 
80    uint8_t cache_version = CACHE_VERSION;
81    size_t cv_size = sizeof(cache_version);
82 
83    if (!disk_cache_enabled())
84       return NULL;
85 
86    /* A ralloc context for transient data during this invocation. */
87    local = ralloc_context(NULL);
88    if (local == NULL)
89       goto fail;
90 
91    cache = rzalloc(NULL, struct disk_cache);
92    if (cache == NULL)
93       goto fail;
94 
95    /* Assume failure. */
96    cache->path_init_failed = true;
97 
98 #ifdef ANDROID
99    /* Android needs the "disk cache" to be enabled for
100     * EGL_ANDROID_blob_cache's callbacks to be called, but it doesn't actually
101     * want any storing to disk to happen inside of the driver.
102     */
103    goto path_fail;
104 #endif
105 
106    char *path = disk_cache_generate_cache_dir(local);
107    if (!path)
108       goto path_fail;
109 
110    if (!disk_cache_mmap_cache_index(local, cache, path))
111       goto path_fail;
112 
113    max_size = 0;
114 
115    max_size_str = getenv("MESA_GLSL_CACHE_MAX_SIZE");
116    if (max_size_str) {
117       char *end;
118       max_size = strtoul(max_size_str, &end, 10);
119       if (end == max_size_str) {
120          max_size = 0;
121       } else {
122          switch (*end) {
123          case 'K':
124          case 'k':
125             max_size *= 1024;
126             break;
127          case 'M':
128          case 'm':
129             max_size *= 1024*1024;
130             break;
131          case '\0':
132          case 'G':
133          case 'g':
134          default:
135             max_size *= 1024*1024*1024;
136             break;
137          }
138       }
139    }
140 
141    /* Default to 1GB for maximum cache size. */
142    if (max_size == 0) {
143       max_size = 1024*1024*1024;
144    }
145 
146    cache->max_size = max_size;
147 
148    /* 4 threads were chosen below because just about all modern CPUs currently
149     * available that run Mesa have *at least* 4 cores. For these CPUs allowing
150     * more threads can result in the queue being processed faster, thus
151     * avoiding excessive memory use due to a backlog of cache entrys building
152     * up in the queue. Since we set the UTIL_QUEUE_INIT_USE_MINIMUM_PRIORITY
153     * flag this should have little negative impact on low core systems.
154     *
155     * The queue will resize automatically when it's full, so adding new jobs
156     * doesn't stall.
157     */
158    util_queue_init(&cache->cache_queue, "disk$", 32, 4,
159                    UTIL_QUEUE_INIT_RESIZE_IF_FULL |
160                    UTIL_QUEUE_INIT_USE_MINIMUM_PRIORITY |
161                    UTIL_QUEUE_INIT_SET_FULL_THREAD_AFFINITY);
162 
163    cache->path_init_failed = false;
164 
165  path_fail:
166 
167    cache->driver_keys_blob_size = cv_size;
168 
169    /* Create driver id keys */
170    size_t id_size = strlen(driver_id) + 1;
171    size_t gpu_name_size = strlen(gpu_name) + 1;
172    cache->driver_keys_blob_size += id_size;
173    cache->driver_keys_blob_size += gpu_name_size;
174 
175    /* We sometimes store entire structs that contains a pointers in the cache,
176     * use pointer size as a key to avoid hard to debug issues.
177     */
178    uint8_t ptr_size = sizeof(void *);
179    size_t ptr_size_size = sizeof(ptr_size);
180    cache->driver_keys_blob_size += ptr_size_size;
181 
182    size_t driver_flags_size = sizeof(driver_flags);
183    cache->driver_keys_blob_size += driver_flags_size;
184 
185    cache->driver_keys_blob =
186       ralloc_size(cache, cache->driver_keys_blob_size);
187    if (!cache->driver_keys_blob)
188       goto fail;
189 
190    uint8_t *drv_key_blob = cache->driver_keys_blob;
191    DRV_KEY_CPY(drv_key_blob, &cache_version, cv_size)
192    DRV_KEY_CPY(drv_key_blob, driver_id, id_size)
193    DRV_KEY_CPY(drv_key_blob, gpu_name, gpu_name_size)
194    DRV_KEY_CPY(drv_key_blob, &ptr_size, ptr_size_size)
195    DRV_KEY_CPY(drv_key_blob, &driver_flags, driver_flags_size)
196 
197    /* Seed our rand function */
198    s_rand_xorshift128plus(cache->seed_xorshift128plus, true);
199 
200    ralloc_free(local);
201 
202    return cache;
203 
204  fail:
205    if (cache)
206       ralloc_free(cache);
207    ralloc_free(local);
208 
209    return NULL;
210 }
211 
212 void
disk_cache_destroy(struct disk_cache * cache)213 disk_cache_destroy(struct disk_cache *cache)
214 {
215    if (cache && !cache->path_init_failed) {
216       util_queue_finish(&cache->cache_queue);
217       util_queue_destroy(&cache->cache_queue);
218       disk_cache_destroy_mmap(cache);
219    }
220 
221    ralloc_free(cache);
222 }
223 
224 void
disk_cache_wait_for_idle(struct disk_cache * cache)225 disk_cache_wait_for_idle(struct disk_cache *cache)
226 {
227    util_queue_finish(&cache->cache_queue);
228 }
229 
230 void
disk_cache_remove(struct disk_cache * cache,const cache_key key)231 disk_cache_remove(struct disk_cache *cache, const cache_key key)
232 {
233    char *filename = disk_cache_get_cache_filename(cache, key);
234    if (filename == NULL) {
235       return;
236    }
237 
238    disk_cache_evict_item(cache, filename);
239 }
240 
241 static struct disk_cache_put_job *
create_put_job(struct disk_cache * cache,const cache_key key,const void * data,size_t size,struct cache_item_metadata * cache_item_metadata)242 create_put_job(struct disk_cache *cache, const cache_key key,
243                const void *data, size_t size,
244                struct cache_item_metadata *cache_item_metadata)
245 {
246    struct disk_cache_put_job *dc_job = (struct disk_cache_put_job *)
247       malloc(sizeof(struct disk_cache_put_job) + size);
248 
249    if (dc_job) {
250       dc_job->cache = cache;
251       memcpy(dc_job->key, key, sizeof(cache_key));
252       dc_job->data = dc_job + 1;
253       memcpy(dc_job->data, data, size);
254       dc_job->size = size;
255 
256       /* Copy the cache item metadata */
257       if (cache_item_metadata) {
258          dc_job->cache_item_metadata.type = cache_item_metadata->type;
259          if (cache_item_metadata->type == CACHE_ITEM_TYPE_GLSL) {
260             dc_job->cache_item_metadata.num_keys =
261                cache_item_metadata->num_keys;
262             dc_job->cache_item_metadata.keys = (cache_key *)
263                malloc(cache_item_metadata->num_keys * sizeof(cache_key));
264 
265             if (!dc_job->cache_item_metadata.keys)
266                goto fail;
267 
268             memcpy(dc_job->cache_item_metadata.keys,
269                    cache_item_metadata->keys,
270                    sizeof(cache_key) * cache_item_metadata->num_keys);
271          }
272       } else {
273          dc_job->cache_item_metadata.type = CACHE_ITEM_TYPE_UNKNOWN;
274          dc_job->cache_item_metadata.keys = NULL;
275       }
276    }
277 
278    return dc_job;
279 
280 fail:
281    free(dc_job);
282 
283    return NULL;
284 }
285 
286 static void
destroy_put_job(void * job,int thread_index)287 destroy_put_job(void *job, int thread_index)
288 {
289    if (job) {
290       struct disk_cache_put_job *dc_job = (struct disk_cache_put_job *) job;
291       free(dc_job->cache_item_metadata.keys);
292 
293       free(job);
294    }
295 }
296 
297 static void
cache_put(void * job,int thread_index)298 cache_put(void *job, int thread_index)
299 {
300    assert(job);
301 
302    unsigned i = 0;
303    char *filename = NULL;
304    struct disk_cache_put_job *dc_job = (struct disk_cache_put_job *) job;
305 
306    filename = disk_cache_get_cache_filename(dc_job->cache, dc_job->key);
307    if (filename == NULL)
308       goto done;
309 
310    /* If the cache is too large, evict something else first. */
311    while (*dc_job->cache->size + dc_job->size > dc_job->cache->max_size &&
312           i < 8) {
313       disk_cache_evict_lru_item(dc_job->cache);
314       i++;
315    }
316 
317    /* Create CRC of the data. We will read this when restoring the cache and
318     * use it to check for corruption.
319     */
320    struct cache_entry_file_data cf_data;
321    cf_data.crc32 = util_hash_crc32(dc_job->data, dc_job->size);
322    cf_data.uncompressed_size = dc_job->size;
323 
324    disk_cache_write_item_to_disk(dc_job, &cf_data, filename);
325 
326 done:
327    free(filename);
328 }
329 
330 void
disk_cache_put(struct disk_cache * cache,const cache_key key,const void * data,size_t size,struct cache_item_metadata * cache_item_metadata)331 disk_cache_put(struct disk_cache *cache, const cache_key key,
332                const void *data, size_t size,
333                struct cache_item_metadata *cache_item_metadata)
334 {
335    if (cache->blob_put_cb) {
336       cache->blob_put_cb(key, CACHE_KEY_SIZE, data, size);
337       return;
338    }
339 
340    if (cache->path_init_failed)
341       return;
342 
343    struct disk_cache_put_job *dc_job =
344       create_put_job(cache, key, data, size, cache_item_metadata);
345 
346    if (dc_job) {
347       util_queue_fence_init(&dc_job->fence);
348       util_queue_add_job(&cache->cache_queue, dc_job, &dc_job->fence,
349                          cache_put, destroy_put_job, dc_job->size);
350    }
351 }
352 
353 void *
disk_cache_get(struct disk_cache * cache,const cache_key key,size_t * size)354 disk_cache_get(struct disk_cache *cache, const cache_key key, size_t *size)
355 {
356    if (size)
357       *size = 0;
358 
359    if (cache->blob_get_cb) {
360       /* This is what Android EGL defines as the maxValueSize in egl_cache_t
361        * class implementation.
362        */
363       const signed long max_blob_size = 64 * 1024;
364       void *blob = malloc(max_blob_size);
365       if (!blob)
366          return NULL;
367 
368       signed long bytes =
369          cache->blob_get_cb(key, CACHE_KEY_SIZE, blob, max_blob_size);
370 
371       if (!bytes) {
372          free(blob);
373          return NULL;
374       }
375 
376       if (size)
377          *size = bytes;
378       return blob;
379    }
380 
381    char *filename = disk_cache_get_cache_filename(cache, key);
382    if (filename == NULL)
383       return NULL;
384 
385    return disk_cache_load_item(cache, filename, size);
386 }
387 
388 void
disk_cache_put_key(struct disk_cache * cache,const cache_key key)389 disk_cache_put_key(struct disk_cache *cache, const cache_key key)
390 {
391    const uint32_t *key_chunk = (const uint32_t *) key;
392    int i = CPU_TO_LE32(*key_chunk) & CACHE_INDEX_KEY_MASK;
393    unsigned char *entry;
394 
395    if (cache->blob_put_cb) {
396       cache->blob_put_cb(key, CACHE_KEY_SIZE, key_chunk, sizeof(uint32_t));
397       return;
398    }
399 
400    if (cache->path_init_failed)
401       return;
402 
403    entry = &cache->stored_keys[i * CACHE_KEY_SIZE];
404 
405    memcpy(entry, key, CACHE_KEY_SIZE);
406 }
407 
408 /* This function lets us test whether a given key was previously
409  * stored in the cache with disk_cache_put_key(). The implement is
410  * efficient by not using syscalls or hitting the disk. It's not
411  * race-free, but the races are benign. If we race with someone else
412  * calling disk_cache_put_key, then that's just an extra cache miss and an
413  * extra recompile.
414  */
415 bool
disk_cache_has_key(struct disk_cache * cache,const cache_key key)416 disk_cache_has_key(struct disk_cache *cache, const cache_key key)
417 {
418    const uint32_t *key_chunk = (const uint32_t *) key;
419    int i = CPU_TO_LE32(*key_chunk) & CACHE_INDEX_KEY_MASK;
420    unsigned char *entry;
421 
422    if (cache->blob_get_cb) {
423       uint32_t blob;
424       return cache->blob_get_cb(key, CACHE_KEY_SIZE, &blob, sizeof(uint32_t));
425    }
426 
427    if (cache->path_init_failed)
428       return false;
429 
430    entry = &cache->stored_keys[i * CACHE_KEY_SIZE];
431 
432    return memcmp(entry, key, CACHE_KEY_SIZE) == 0;
433 }
434 
435 void
disk_cache_compute_key(struct disk_cache * cache,const void * data,size_t size,cache_key key)436 disk_cache_compute_key(struct disk_cache *cache, const void *data, size_t size,
437                        cache_key key)
438 {
439    struct mesa_sha1 ctx;
440 
441    _mesa_sha1_init(&ctx);
442    _mesa_sha1_update(&ctx, cache->driver_keys_blob,
443                      cache->driver_keys_blob_size);
444    _mesa_sha1_update(&ctx, data, size);
445    _mesa_sha1_final(&ctx, key);
446 }
447 
448 void
disk_cache_set_callbacks(struct disk_cache * cache,disk_cache_put_cb put,disk_cache_get_cb get)449 disk_cache_set_callbacks(struct disk_cache *cache, disk_cache_put_cb put,
450                          disk_cache_get_cb get)
451 {
452    cache->blob_put_cb = put;
453    cache->blob_get_cb = get;
454 }
455 
456 #endif /* ENABLE_SHADER_CACHE */
457