1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24
25 #include <assert.h>
26 #include <inttypes.h>
27 #include <stdbool.h>
28 #include <stddef.h>
29 #include <stdlib.h>
30 #include <sys/types.h>
31 #include <sys/stat.h>
32 #include <fcntl.h>
33
34 #include "util/compress.h"
35 #include "util/crc32.h"
36 #include "util/disk_cache.h"
37 #include "util/disk_cache_os.h"
38
39 struct cache_entry_file_data {
40 uint32_t crc32;
41 uint32_t uncompressed_size;
42 };
43
44 #if DETECT_OS_WINDOWS
45
46 bool
disk_cache_get_function_identifier(void * ptr,struct mesa_sha1 * ctx)47 disk_cache_get_function_identifier(void *ptr, struct mesa_sha1 *ctx)
48 {
49 HMODULE mod = NULL;
50 GetModuleHandleExW(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS | GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT,
51 (LPCWSTR)ptr,
52 &mod);
53 if (!mod)
54 return false;
55
56 WCHAR filename[MAX_PATH];
57 DWORD filename_length = GetModuleFileNameW(mod, filename, ARRAY_SIZE(filename));
58
59 if (filename_length == 0 || filename_length == ARRAY_SIZE(filename))
60 return false;
61
62 HANDLE mod_as_file = CreateFileW(
63 filename,
64 GENERIC_READ,
65 FILE_SHARE_READ,
66 NULL,
67 OPEN_EXISTING,
68 FILE_ATTRIBUTE_NORMAL,
69 NULL);
70 if (mod_as_file == INVALID_HANDLE_VALUE)
71 return false;
72
73 FILETIME time;
74 bool ret = GetFileTime(mod_as_file, NULL, NULL, &time);
75 if (ret)
76 _mesa_sha1_update(ctx, &time, sizeof(time));
77 CloseHandle(mod_as_file);
78 return ret;
79 }
80
81 #endif
82
83 #ifdef ENABLE_SHADER_CACHE
84
85 #if DETECT_OS_WINDOWS
86 /* TODO: implement disk cache support on windows */
87
88 #else
89
90 #include <dirent.h>
91 #include <errno.h>
92 #include <pwd.h>
93 #include <stdio.h>
94 #include <string.h>
95 #include <sys/file.h>
96 #include <sys/mman.h>
97 #include <sys/types.h>
98 #include <sys/stat.h>
99 #include <unistd.h>
100
101 #include "util/blob.h"
102 #include "util/crc32.h"
103 #include "util/debug.h"
104 #include "util/ralloc.h"
105 #include "util/rand_xor.h"
106
107 /* Create a directory named 'path' if it does not already exist.
108 *
109 * Returns: 0 if path already exists as a directory or if created.
110 * -1 in all other cases.
111 */
112 static int
mkdir_if_needed(const char * path)113 mkdir_if_needed(const char *path)
114 {
115 struct stat sb;
116
117 /* If the path exists already, then our work is done if it's a
118 * directory, but it's an error if it is not.
119 */
120 if (stat(path, &sb) == 0) {
121 if (S_ISDIR(sb.st_mode)) {
122 return 0;
123 } else {
124 fprintf(stderr, "Cannot use %s for shader cache (not a directory)"
125 "---disabling.\n", path);
126 return -1;
127 }
128 }
129
130 int ret = mkdir(path, 0755);
131 if (ret == 0 || (ret == -1 && errno == EEXIST))
132 return 0;
133
134 fprintf(stderr, "Failed to create %s for shader cache (%s)---disabling.\n",
135 path, strerror(errno));
136
137 return -1;
138 }
139
140 /* Concatenate an existing path and a new name to form a new path. If the new
141 * path does not exist as a directory, create it then return the resulting
142 * name of the new path (ralloc'ed off of 'ctx').
143 *
144 * Returns NULL on any error, such as:
145 *
146 * <path> does not exist or is not a directory
147 * <path>/<name> exists but is not a directory
148 * <path>/<name> cannot be created as a directory
149 */
150 static char *
concatenate_and_mkdir(void * ctx,const char * path,const char * name)151 concatenate_and_mkdir(void *ctx, const char *path, const char *name)
152 {
153 char *new_path;
154 struct stat sb;
155
156 if (stat(path, &sb) != 0 || ! S_ISDIR(sb.st_mode))
157 return NULL;
158
159 new_path = ralloc_asprintf(ctx, "%s/%s", path, name);
160
161 if (mkdir_if_needed(new_path) == 0)
162 return new_path;
163 else
164 return NULL;
165 }
166
167 struct lru_file {
168 struct list_head node;
169 char *lru_name;
170 size_t lru_file_size;
171 time_t lru_atime;
172 };
173
174 static void
free_lru_file_list(struct list_head * lru_file_list)175 free_lru_file_list(struct list_head *lru_file_list)
176 {
177 struct lru_file *e, *next;
178 LIST_FOR_EACH_ENTRY_SAFE(e, next, lru_file_list, node) {
179 free(e->lru_name);
180 free(e);
181 }
182 free(lru_file_list);
183 }
184
185 /* Given a directory path and predicate function, create a linked list of entrys
186 * with the oldest access time in that directory for which the predicate
187 * returns true.
188 *
189 * Returns: A malloc'ed linkd list for the paths of chosen files, (or
190 * NULL on any error). The caller should free the linked list via
191 * free_lru_file_list() when finished.
192 */
193 static struct list_head *
choose_lru_file_matching(const char * dir_path,bool (* predicate)(const char * dir_path,const struct stat *,const char *,const size_t))194 choose_lru_file_matching(const char *dir_path,
195 bool (*predicate)(const char *dir_path,
196 const struct stat *,
197 const char *, const size_t))
198 {
199 DIR *dir;
200 struct dirent *dir_ent;
201
202 dir = opendir(dir_path);
203 if (dir == NULL)
204 return NULL;
205
206 /* First count the number of files in the directory */
207 unsigned total_file_count = 0;
208 while ((dir_ent = readdir(dir)) != NULL) {
209 if (dir_ent->d_type == DT_REG) { /* If the entry is a regular file */
210 total_file_count++;
211 }
212 }
213
214 /* Reset to the start of the directory */
215 rewinddir(dir);
216
217 /* Collect 10% of files in this directory for removal. Note: This should work
218 * out to only be around 0.04% of total cache items.
219 */
220 unsigned lru_file_count = total_file_count > 10 ? total_file_count / 10 : 1;
221 struct list_head *lru_file_list = malloc(sizeof(struct list_head));
222 list_inithead(lru_file_list);
223
224 unsigned processed_files = 0;
225 while (1) {
226 dir_ent = readdir(dir);
227 if (dir_ent == NULL)
228 break;
229
230 struct stat sb;
231 if (fstatat(dirfd(dir), dir_ent->d_name, &sb, 0) == 0) {
232 struct lru_file *entry = NULL;
233 if (!list_is_empty(lru_file_list))
234 entry = list_first_entry(lru_file_list, struct lru_file, node);
235
236 if (!entry|| sb.st_atime < entry->lru_atime) {
237 size_t len = strlen(dir_ent->d_name);
238 if (!predicate(dir_path, &sb, dir_ent->d_name, len))
239 continue;
240
241 bool new_entry = false;
242 if (processed_files < lru_file_count) {
243 entry = calloc(1, sizeof(struct lru_file));
244 new_entry = true;
245 }
246 processed_files++;
247
248 char *tmp = realloc(entry->lru_name, len + 1);
249 if (tmp) {
250 /* Find location to insert new lru item. We want to keep the
251 * list ordering from most recently used to least recently used.
252 * This allows us to just evict the head item from the list as
253 * we process the directory and find older entrys.
254 */
255 struct list_head *list_node = lru_file_list;
256 struct lru_file *e;
257 LIST_FOR_EACH_ENTRY(e, lru_file_list, node) {
258 if (sb.st_atime < entry->lru_atime) {
259 list_node = &e->node;
260 break;
261 }
262 }
263
264 if (new_entry) {
265 list_addtail(&entry->node, list_node);
266 } else {
267 if (list_node != lru_file_list) {
268 list_del(lru_file_list);
269 list_addtail(lru_file_list, list_node);
270 }
271 }
272
273 entry->lru_name = tmp;
274 memcpy(entry->lru_name, dir_ent->d_name, len + 1);
275 entry->lru_atime = sb.st_atime;
276 entry->lru_file_size = sb.st_blocks * 512;
277 }
278 }
279 }
280 }
281
282 if (list_is_empty(lru_file_list)) {
283 closedir(dir);
284 free(lru_file_list);
285 return NULL;
286 }
287
288 /* Create the full path for the file list we found */
289 struct lru_file *e;
290 LIST_FOR_EACH_ENTRY(e, lru_file_list, node) {
291 char *filename = e->lru_name;
292 if (asprintf(&e->lru_name, "%s/%s", dir_path, filename) < 0)
293 e->lru_name = NULL;
294
295 free(filename);
296 }
297
298 closedir(dir);
299
300 return lru_file_list;
301 }
302
303 /* Is entry a regular file, and not having a name with a trailing
304 * ".tmp"
305 */
306 static bool
is_regular_non_tmp_file(const char * path,const struct stat * sb,const char * d_name,const size_t len)307 is_regular_non_tmp_file(const char *path, const struct stat *sb,
308 const char *d_name, const size_t len)
309 {
310 if (!S_ISREG(sb->st_mode))
311 return false;
312
313 if (len >= 4 && strcmp(&d_name[len-4], ".tmp") == 0)
314 return false;
315
316 return true;
317 }
318
319 /* Returns the size of the deleted file, (or 0 on any error). */
320 static size_t
unlink_lru_file_from_directory(const char * path)321 unlink_lru_file_from_directory(const char *path)
322 {
323 struct list_head *lru_file_list =
324 choose_lru_file_matching(path, is_regular_non_tmp_file);
325 if (lru_file_list == NULL)
326 return 0;
327
328 assert(!list_is_empty(lru_file_list));
329
330 size_t total_unlinked_size = 0;
331 struct lru_file *e;
332 LIST_FOR_EACH_ENTRY(e, lru_file_list, node) {
333 if (unlink(e->lru_name) == 0)
334 total_unlinked_size += e->lru_file_size;
335 }
336 free_lru_file_list(lru_file_list);
337
338 return total_unlinked_size;
339 }
340
341 /* Is entry a directory with a two-character name, (and not the
342 * special name of ".."). We also return false if the dir is empty.
343 */
344 static bool
is_two_character_sub_directory(const char * path,const struct stat * sb,const char * d_name,const size_t len)345 is_two_character_sub_directory(const char *path, const struct stat *sb,
346 const char *d_name, const size_t len)
347 {
348 if (!S_ISDIR(sb->st_mode))
349 return false;
350
351 if (len != 2)
352 return false;
353
354 if (strcmp(d_name, "..") == 0)
355 return false;
356
357 char *subdir;
358 if (asprintf(&subdir, "%s/%s", path, d_name) == -1)
359 return false;
360 DIR *dir = opendir(subdir);
361 free(subdir);
362
363 if (dir == NULL)
364 return false;
365
366 unsigned subdir_entries = 0;
367 struct dirent *d;
368 while ((d = readdir(dir)) != NULL) {
369 if(++subdir_entries > 2)
370 break;
371 }
372 closedir(dir);
373
374 /* If dir only contains '.' and '..' it must be empty */
375 if (subdir_entries <= 2)
376 return false;
377
378 return true;
379 }
380
381 /* Create the directory that will be needed for the cache file for \key.
382 *
383 * Obviously, the implementation here must closely match
384 * _get_cache_file above.
385 */
386 static void
make_cache_file_directory(struct disk_cache * cache,const cache_key key)387 make_cache_file_directory(struct disk_cache *cache, const cache_key key)
388 {
389 char *dir;
390 char buf[41];
391
392 _mesa_sha1_format(buf, key);
393 if (asprintf(&dir, "%s/%c%c", cache->path, buf[0], buf[1]) == -1)
394 return;
395
396 mkdir_if_needed(dir);
397 free(dir);
398 }
399
400 static ssize_t
read_all(int fd,void * buf,size_t count)401 read_all(int fd, void *buf, size_t count)
402 {
403 char *in = buf;
404 ssize_t read_ret;
405 size_t done;
406
407 for (done = 0; done < count; done += read_ret) {
408 read_ret = read(fd, in + done, count - done);
409 if (read_ret == -1 || read_ret == 0)
410 return -1;
411 }
412 return done;
413 }
414
415 static ssize_t
write_all(int fd,const void * buf,size_t count)416 write_all(int fd, const void *buf, size_t count)
417 {
418 const char *out = buf;
419 ssize_t written;
420 size_t done;
421
422 for (done = 0; done < count; done += written) {
423 written = write(fd, out + done, count - done);
424 if (written == -1)
425 return -1;
426 }
427 return done;
428 }
429
430 /* Evict least recently used cache item */
431 void
disk_cache_evict_lru_item(struct disk_cache * cache)432 disk_cache_evict_lru_item(struct disk_cache *cache)
433 {
434 char *dir_path;
435
436 /* With a reasonably-sized, full cache, (and with keys generated
437 * from a cryptographic hash), we can choose two random hex digits
438 * and reasonably expect the directory to exist with a file in it.
439 * Provides pseudo-LRU eviction to reduce checking all cache files.
440 */
441 uint64_t rand64 = rand_xorshift128plus(cache->seed_xorshift128plus);
442 if (asprintf(&dir_path, "%s/%02" PRIx64 , cache->path, rand64 & 0xff) < 0)
443 return;
444
445 size_t size = unlink_lru_file_from_directory(dir_path);
446
447 free(dir_path);
448
449 if (size) {
450 p_atomic_add(cache->size, - (uint64_t)size);
451 return;
452 }
453
454 /* In the case where the random choice of directory didn't find
455 * something, we choose the least recently accessed from the
456 * existing directories.
457 *
458 * Really, the only reason this code exists is to allow the unit
459 * tests to work, (which use an artificially-small cache to be able
460 * to force a single cached item to be evicted).
461 */
462 struct list_head *lru_file_list =
463 choose_lru_file_matching(cache->path, is_two_character_sub_directory);
464 if (lru_file_list == NULL)
465 return;
466
467 assert(!list_is_empty(lru_file_list));
468
469 struct lru_file *lru_file_dir =
470 list_first_entry(lru_file_list, struct lru_file, node);
471
472 size = unlink_lru_file_from_directory(lru_file_dir->lru_name);
473
474 free_lru_file_list(lru_file_list);
475
476 if (size)
477 p_atomic_add(cache->size, - (uint64_t)size);
478 }
479
480 void
disk_cache_evict_item(struct disk_cache * cache,char * filename)481 disk_cache_evict_item(struct disk_cache *cache, char *filename)
482 {
483 struct stat sb;
484 if (stat(filename, &sb) == -1) {
485 free(filename);
486 return;
487 }
488
489 unlink(filename);
490 free(filename);
491
492 if (sb.st_blocks)
493 p_atomic_add(cache->size, - (uint64_t)sb.st_blocks * 512);
494 }
495
496 static void *
parse_and_validate_cache_item(struct disk_cache * cache,void * cache_item,size_t cache_item_size,size_t * size)497 parse_and_validate_cache_item(struct disk_cache *cache, void *cache_item,
498 size_t cache_item_size, size_t *size)
499 {
500 uint8_t *uncompressed_data = NULL;
501
502 struct blob_reader ci_blob_reader;
503 blob_reader_init(&ci_blob_reader, cache_item, cache_item_size);
504
505 size_t header_size = cache->driver_keys_blob_size;
506 const void *keys_blob = blob_read_bytes(&ci_blob_reader, header_size);
507 if (ci_blob_reader.overrun)
508 goto fail;
509
510 /* Check for extremely unlikely hash collisions */
511 if (memcmp(cache->driver_keys_blob, keys_blob, header_size) != 0) {
512 assert(!"Mesa cache keys mismatch!");
513 goto fail;
514 }
515
516 uint32_t md_type = blob_read_uint32(&ci_blob_reader);
517 if (ci_blob_reader.overrun)
518 goto fail;
519
520 if (md_type == CACHE_ITEM_TYPE_GLSL) {
521 uint32_t num_keys = blob_read_uint32(&ci_blob_reader);
522 if (ci_blob_reader.overrun)
523 goto fail;
524
525 /* The cache item metadata is currently just used for distributing
526 * precompiled shaders, they are not used by Mesa so just skip them for
527 * now.
528 * TODO: pass the metadata back to the caller and do some basic
529 * validation.
530 */
531 const void UNUSED *metadata =
532 blob_read_bytes(&ci_blob_reader, num_keys * sizeof(cache_key));
533 if (ci_blob_reader.overrun)
534 goto fail;
535 }
536
537 /* Load the CRC that was created when the file was written. */
538 struct cache_entry_file_data *cf_data =
539 (struct cache_entry_file_data *)
540 blob_read_bytes(&ci_blob_reader, sizeof(struct cache_entry_file_data));
541 if (ci_blob_reader.overrun)
542 goto fail;
543
544 size_t cache_data_size = ci_blob_reader.end - ci_blob_reader.current;
545 const uint8_t *data = (uint8_t *) blob_read_bytes(&ci_blob_reader, cache_data_size);
546
547 /* Check the data for corruption */
548 if (cf_data->crc32 != util_hash_crc32(data, cache_data_size))
549 goto fail;
550
551 /* Uncompress the cache data */
552 uncompressed_data = malloc(cf_data->uncompressed_size);
553 if (!util_compress_inflate(data, cache_data_size, uncompressed_data,
554 cf_data->uncompressed_size))
555 goto fail;
556
557 if (size)
558 *size = cf_data->uncompressed_size;
559
560 return uncompressed_data;
561
562 fail:
563 if (uncompressed_data)
564 free(uncompressed_data);
565
566 return NULL;
567 }
568
569 void *
disk_cache_load_item(struct disk_cache * cache,char * filename,size_t * size)570 disk_cache_load_item(struct disk_cache *cache, char *filename, size_t *size)
571 {
572 uint8_t *data = NULL;
573
574 int fd = open(filename, O_RDONLY | O_CLOEXEC);
575 if (fd == -1)
576 goto fail;
577
578 struct stat sb;
579 if (fstat(fd, &sb) == -1)
580 goto fail;
581
582 data = malloc(sb.st_size);
583 if (data == NULL)
584 goto fail;
585
586 /* Read entire file into memory */
587 int ret = read_all(fd, data, sb.st_size);
588 if (ret == -1)
589 goto fail;
590
591 uint8_t *uncompressed_data =
592 parse_and_validate_cache_item(cache, data, sb.st_size, size);
593 if (!uncompressed_data)
594 goto fail;
595
596 free(data);
597 free(filename);
598 close(fd);
599
600 return uncompressed_data;
601
602 fail:
603 if (data)
604 free(data);
605 if (filename)
606 free(filename);
607 if (fd != -1)
608 close(fd);
609
610 return NULL;
611 }
612
613 /* Return a filename within the cache's directory corresponding to 'key'.
614 *
615 * Returns NULL if out of memory.
616 */
617 char *
disk_cache_get_cache_filename(struct disk_cache * cache,const cache_key key)618 disk_cache_get_cache_filename(struct disk_cache *cache, const cache_key key)
619 {
620 char buf[41];
621 char *filename;
622
623 if (cache->path_init_failed)
624 return NULL;
625
626 _mesa_sha1_format(buf, key);
627 if (asprintf(&filename, "%s/%c%c/%s", cache->path, buf[0],
628 buf[1], buf + 2) == -1)
629 return NULL;
630
631 return filename;
632 }
633
634 static bool
create_cache_item_header_and_blob(struct disk_cache_put_job * dc_job,struct blob * cache_blob)635 create_cache_item_header_and_blob(struct disk_cache_put_job *dc_job,
636 struct blob *cache_blob)
637 {
638
639 /* Compress the cache item data */
640 size_t max_buf = util_compress_max_compressed_len(dc_job->size);
641 void *compressed_data = malloc(max_buf);
642 if (compressed_data == NULL)
643 return false;
644
645 size_t compressed_size =
646 util_compress_deflate(dc_job->data, dc_job->size,
647 compressed_data, max_buf);
648 if (compressed_size == 0)
649 goto fail;
650
651 /* Copy the driver_keys_blob, this can be used find information about the
652 * mesa version that produced the entry or deal with hash collisions,
653 * should that ever become a real problem.
654 */
655 if (!blob_write_bytes(cache_blob, dc_job->cache->driver_keys_blob,
656 dc_job->cache->driver_keys_blob_size))
657 goto fail;
658
659 /* Write the cache item metadata. This data can be used to deal with
660 * hash collisions, as well as providing useful information to 3rd party
661 * tools reading the cache files.
662 */
663 if (!blob_write_uint32(cache_blob, dc_job->cache_item_metadata.type))
664 goto fail;
665
666 if (dc_job->cache_item_metadata.type == CACHE_ITEM_TYPE_GLSL) {
667 if (!blob_write_uint32(cache_blob, dc_job->cache_item_metadata.num_keys))
668 goto fail;
669
670 size_t metadata_keys_size =
671 dc_job->cache_item_metadata.num_keys * sizeof(cache_key);
672 if (!blob_write_bytes(cache_blob, dc_job->cache_item_metadata.keys[0],
673 metadata_keys_size))
674 goto fail;
675 }
676
677 /* Create CRC of the compressed data. We will read this when restoring the
678 * cache and use it to check for corruption.
679 */
680 struct cache_entry_file_data cf_data;
681 cf_data.crc32 = util_hash_crc32(compressed_data, compressed_size);
682 cf_data.uncompressed_size = dc_job->size;
683
684 if (!blob_write_bytes(cache_blob, &cf_data, sizeof(cf_data)))
685 goto fail;
686
687 /* Finally copy the compressed cache blob */
688 if (!blob_write_bytes(cache_blob, compressed_data, compressed_size))
689 goto fail;
690
691 free(compressed_data);
692 return true;
693
694 fail:
695 free(compressed_data);
696 return false;
697 }
698
699 void
disk_cache_write_item_to_disk(struct disk_cache_put_job * dc_job,char * filename)700 disk_cache_write_item_to_disk(struct disk_cache_put_job *dc_job,
701 char *filename)
702 {
703 int fd = -1, fd_final = -1;
704 struct blob cache_blob;
705 blob_init(&cache_blob);
706
707 /* Write to a temporary file to allow for an atomic rename to the
708 * final destination filename, (to prevent any readers from seeing
709 * a partially written file).
710 */
711 char *filename_tmp = NULL;
712 if (asprintf(&filename_tmp, "%s.tmp", filename) == -1)
713 goto done;
714
715 fd = open(filename_tmp, O_WRONLY | O_CLOEXEC | O_CREAT, 0644);
716
717 /* Make the two-character subdirectory within the cache as needed. */
718 if (fd == -1) {
719 if (errno != ENOENT)
720 goto done;
721
722 make_cache_file_directory(dc_job->cache, dc_job->key);
723
724 fd = open(filename_tmp, O_WRONLY | O_CLOEXEC | O_CREAT, 0644);
725 if (fd == -1)
726 goto done;
727 }
728
729 /* With the temporary file open, we take an exclusive flock on
730 * it. If the flock fails, then another process still has the file
731 * open with the flock held. So just let that file be responsible
732 * for writing the file.
733 */
734 #ifdef HAVE_FLOCK
735 int err = flock(fd, LOCK_EX | LOCK_NB);
736 #else
737 struct flock lock = {
738 .l_start = 0,
739 .l_len = 0, /* entire file */
740 .l_type = F_WRLCK,
741 .l_whence = SEEK_SET
742 };
743 int err = fcntl(fd, F_SETLK, &lock);
744 #endif
745 if (err == -1)
746 goto done;
747
748 /* Now that we have the lock on the open temporary file, we can
749 * check to see if the destination file already exists. If so,
750 * another process won the race between when we saw that the file
751 * didn't exist and now. In this case, we don't do anything more,
752 * (to ensure the size accounting of the cache doesn't get off).
753 */
754 fd_final = open(filename, O_RDONLY | O_CLOEXEC);
755 if (fd_final != -1) {
756 unlink(filename_tmp);
757 goto done;
758 }
759
760 /* OK, we're now on the hook to write out a file that we know is
761 * not in the cache, and is also not being written out to the cache
762 * by some other process.
763 */
764 if (!create_cache_item_header_and_blob(dc_job, &cache_blob)) {
765 unlink(filename_tmp);
766 goto done;
767 }
768
769 /* Now, finally, write out the contents to the temporary file, then
770 * rename them atomically to the destination filename, and also
771 * perform an atomic increment of the total cache size.
772 */
773 int ret = write_all(fd, cache_blob.data, cache_blob.size);
774 if (ret == -1) {
775 unlink(filename_tmp);
776 goto done;
777 }
778
779 ret = rename(filename_tmp, filename);
780 if (ret == -1) {
781 unlink(filename_tmp);
782 goto done;
783 }
784
785 struct stat sb;
786 if (stat(filename, &sb) == -1) {
787 /* Something went wrong remove the file */
788 unlink(filename);
789 goto done;
790 }
791
792 p_atomic_add(dc_job->cache->size, sb.st_blocks * 512);
793
794 done:
795 if (fd_final != -1)
796 close(fd_final);
797 /* This close finally releases the flock, (now that the final file
798 * has been renamed into place and the size has been added).
799 */
800 if (fd != -1)
801 close(fd);
802 free(filename_tmp);
803 blob_finish(&cache_blob);
804 }
805
806 /* Determine path for cache based on the first defined name as follows:
807 *
808 * $MESA_SHADER_CACHE_DIR
809 * $XDG_CACHE_HOME/mesa_shader_cache
810 * <pwd.pw_dir>/.cache/mesa_shader_cache
811 */
812 char *
disk_cache_generate_cache_dir(void * mem_ctx,const char * gpu_name,const char * driver_id)813 disk_cache_generate_cache_dir(void *mem_ctx, const char *gpu_name,
814 const char *driver_id)
815 {
816 char *cache_dir_name = CACHE_DIR_NAME;
817 if (env_var_as_boolean("MESA_DISK_CACHE_SINGLE_FILE", false))
818 cache_dir_name = CACHE_DIR_NAME_SF;
819
820 char *path = getenv("MESA_SHADER_CACHE_DIR");
821
822 if (!path) {
823 path = getenv("MESA_GLSL_CACHE_DIR");
824 if (path)
825 fprintf(stderr,
826 "*** MESA_GLSL_CACHE_DIR is deprecated; "
827 "use MESA_SHADER_CACHE_DIR instead ***\n");
828 }
829
830 if (path) {
831 if (mkdir_if_needed(path) == -1)
832 return NULL;
833
834 path = concatenate_and_mkdir(mem_ctx, path, cache_dir_name);
835 if (!path)
836 return NULL;
837 }
838
839 if (path == NULL) {
840 char *xdg_cache_home = getenv("XDG_CACHE_HOME");
841
842 if (xdg_cache_home) {
843 if (mkdir_if_needed(xdg_cache_home) == -1)
844 return NULL;
845
846 path = concatenate_and_mkdir(mem_ctx, xdg_cache_home, cache_dir_name);
847 if (!path)
848 return NULL;
849 }
850 }
851
852 if (!path) {
853 char *buf;
854 size_t buf_size;
855 struct passwd pwd, *result;
856
857 buf_size = sysconf(_SC_GETPW_R_SIZE_MAX);
858 if (buf_size == -1)
859 buf_size = 512;
860
861 /* Loop until buf_size is large enough to query the directory */
862 while (1) {
863 buf = ralloc_size(mem_ctx, buf_size);
864
865 getpwuid_r(getuid(), &pwd, buf, buf_size, &result);
866 if (result)
867 break;
868
869 if (errno == ERANGE) {
870 ralloc_free(buf);
871 buf = NULL;
872 buf_size *= 2;
873 } else {
874 return NULL;
875 }
876 }
877
878 path = concatenate_and_mkdir(mem_ctx, pwd.pw_dir, ".cache");
879 if (!path)
880 return NULL;
881
882 path = concatenate_and_mkdir(mem_ctx, path, cache_dir_name);
883 if (!path)
884 return NULL;
885 }
886
887 if (env_var_as_boolean("MESA_DISK_CACHE_SINGLE_FILE", false)) {
888 path = concatenate_and_mkdir(mem_ctx, path, driver_id);
889 if (!path)
890 return NULL;
891
892 path = concatenate_and_mkdir(mem_ctx, path, gpu_name);
893 if (!path)
894 return NULL;
895 }
896
897 return path;
898 }
899
900 bool
disk_cache_enabled()901 disk_cache_enabled()
902 {
903 /* If running as a users other than the real user disable cache */
904 if (geteuid() != getuid())
905 return false;
906
907 /* At user request, disable shader cache entirely. */
908 #ifdef SHADER_CACHE_DISABLE_BY_DEFAULT
909 bool disable_by_default = true;
910 #else
911 bool disable_by_default = false;
912 #endif
913 char *envvar_name = "MESA_SHADER_CACHE_DISABLE";
914 if (!getenv(envvar_name)) {
915 envvar_name = "MESA_GLSL_CACHE_DISABLE";
916 if (getenv(envvar_name))
917 fprintf(stderr,
918 "*** MESA_GLSL_CACHE_DISABLE is deprecated; "
919 "use MESA_SHADER_CACHE_DISABLE instead ***\n");
920 }
921
922 if (env_var_as_boolean(envvar_name, disable_by_default))
923 return false;
924
925 return true;
926 }
927
928 void *
disk_cache_load_item_foz(struct disk_cache * cache,const cache_key key,size_t * size)929 disk_cache_load_item_foz(struct disk_cache *cache, const cache_key key,
930 size_t *size)
931 {
932 size_t cache_tem_size = 0;
933 void *cache_item = foz_read_entry(&cache->foz_db, key, &cache_tem_size);
934 if (!cache_item)
935 return NULL;
936
937 uint8_t *uncompressed_data =
938 parse_and_validate_cache_item(cache, cache_item, cache_tem_size, size);
939 free(cache_item);
940
941 return uncompressed_data;
942 }
943
944 bool
disk_cache_write_item_to_disk_foz(struct disk_cache_put_job * dc_job)945 disk_cache_write_item_to_disk_foz(struct disk_cache_put_job *dc_job)
946 {
947 struct blob cache_blob;
948 blob_init(&cache_blob);
949
950 if (!create_cache_item_header_and_blob(dc_job, &cache_blob))
951 return false;
952
953 bool r = foz_write_entry(&dc_job->cache->foz_db, dc_job->key,
954 cache_blob.data, cache_blob.size);
955
956 blob_finish(&cache_blob);
957 return r;
958 }
959
960 bool
disk_cache_load_cache_index(void * mem_ctx,struct disk_cache * cache)961 disk_cache_load_cache_index(void *mem_ctx, struct disk_cache *cache)
962 {
963 /* Load cache index into a hash map (from fossilise files) */
964 return foz_prepare(&cache->foz_db, cache->path);
965 }
966
967 bool
disk_cache_mmap_cache_index(void * mem_ctx,struct disk_cache * cache,char * path)968 disk_cache_mmap_cache_index(void *mem_ctx, struct disk_cache *cache,
969 char *path)
970 {
971 int fd = -1;
972 bool mapped = false;
973
974 path = ralloc_asprintf(mem_ctx, "%s/index", cache->path);
975 if (path == NULL)
976 goto path_fail;
977
978 fd = open(path, O_RDWR | O_CREAT | O_CLOEXEC, 0644);
979 if (fd == -1)
980 goto path_fail;
981
982 struct stat sb;
983 if (fstat(fd, &sb) == -1)
984 goto path_fail;
985
986 /* Force the index file to be the expected size. */
987 size_t size = sizeof(*cache->size) + CACHE_INDEX_MAX_KEYS * CACHE_KEY_SIZE;
988 if (sb.st_size != size) {
989 if (ftruncate(fd, size) == -1)
990 goto path_fail;
991 }
992
993 /* We map this shared so that other processes see updates that we
994 * make.
995 *
996 * Note: We do use atomic addition to ensure that multiple
997 * processes don't scramble the cache size recorded in the
998 * index. But we don't use any locking to prevent multiple
999 * processes from updating the same entry simultaneously. The idea
1000 * is that if either result lands entirely in the index, then
1001 * that's equivalent to a well-ordered write followed by an
1002 * eviction and a write. On the other hand, if the simultaneous
1003 * writes result in a corrupt entry, that's not really any
1004 * different than both entries being evicted, (since within the
1005 * guarantees of the cryptographic hash, a corrupt entry is
1006 * unlikely to ever match a real cache key).
1007 */
1008 cache->index_mmap = mmap(NULL, size, PROT_READ | PROT_WRITE,
1009 MAP_SHARED, fd, 0);
1010 if (cache->index_mmap == MAP_FAILED)
1011 goto path_fail;
1012 cache->index_mmap_size = size;
1013
1014 cache->size = (uint64_t *) cache->index_mmap;
1015 cache->stored_keys = cache->index_mmap + sizeof(uint64_t);
1016 mapped = true;
1017
1018 path_fail:
1019 if (fd != -1)
1020 close(fd);
1021
1022 return mapped;
1023 }
1024
1025 void
disk_cache_destroy_mmap(struct disk_cache * cache)1026 disk_cache_destroy_mmap(struct disk_cache *cache)
1027 {
1028 munmap(cache->index_mmap, cache->index_mmap_size);
1029 }
1030 #endif
1031
1032 #endif /* ENABLE_SHADER_CACHE */
1033