1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24
25 #include <assert.h>
26 #include <inttypes.h>
27 #include <stdbool.h>
28 #include <stddef.h>
29 #include <stdlib.h>
30 #include <string.h>
31 #include <sys/types.h>
32 #include <sys/stat.h>
33 #include <fcntl.h>
34
35 #include "util/compress.h"
36 #include "util/crc32.h"
37 #include "util/u_debug.h"
38 #include "util/disk_cache.h"
39 #include "util/disk_cache_os.h"
40
41 #if DETECT_OS_WINDOWS
42
43 #include <windows.h>
44
45 bool
disk_cache_get_function_identifier(void * ptr,struct mesa_sha1 * ctx)46 disk_cache_get_function_identifier(void *ptr, struct mesa_sha1 *ctx)
47 {
48 HMODULE mod = NULL;
49 GetModuleHandleExW(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS | GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT,
50 (LPCWSTR)ptr,
51 &mod);
52 if (!mod)
53 return false;
54
55 WCHAR filename[MAX_PATH];
56 DWORD filename_length = GetModuleFileNameW(mod, filename, ARRAY_SIZE(filename));
57
58 if (filename_length == 0 || filename_length == ARRAY_SIZE(filename))
59 return false;
60
61 HANDLE mod_as_file = CreateFileW(
62 filename,
63 GENERIC_READ,
64 FILE_SHARE_READ,
65 NULL,
66 OPEN_EXISTING,
67 FILE_ATTRIBUTE_NORMAL,
68 NULL);
69 if (mod_as_file == INVALID_HANDLE_VALUE)
70 return false;
71
72 FILETIME time;
73 bool ret = GetFileTime(mod_as_file, NULL, NULL, &time);
74 if (ret)
75 _mesa_sha1_update(ctx, &time, sizeof(time));
76 CloseHandle(mod_as_file);
77 return ret;
78 }
79
80 #endif
81
82 #ifdef ENABLE_SHADER_CACHE
83
84 #if DETECT_OS_WINDOWS
85 /* TODO: implement disk cache support on windows */
86
87 #else
88
89 #include <dirent.h>
90 #include <errno.h>
91 #include <pwd.h>
92 #include <stdio.h>
93 #include <string.h>
94 #include <sys/file.h>
95 #include <sys/mman.h>
96 #include <sys/types.h>
97 #include <sys/stat.h>
98 #include <unistd.h>
99 #include "utime.h"
100
101 #include "util/blob.h"
102 #include "util/crc32.h"
103 #include "util/u_debug.h"
104 #include "util/ralloc.h"
105 #include "util/rand_xor.h"
106
107 /* Create a directory named 'path' if it does not already exist.
108 *
109 * Returns: 0 if path already exists as a directory or if created.
110 * -1 in all other cases.
111 */
112 static int
mkdir_if_needed(const char * path)113 mkdir_if_needed(const char *path)
114 {
115 struct stat sb;
116
117 /* If the path exists already, then our work is done if it's a
118 * directory, but it's an error if it is not.
119 */
120 if (stat(path, &sb) == 0) {
121 if (S_ISDIR(sb.st_mode)) {
122 return 0;
123 } else {
124 fprintf(stderr, "Cannot use %s for shader cache (not a directory)"
125 "---disabling.\n", path);
126 return -1;
127 }
128 }
129
130 int ret = mkdir(path, 0700);
131 if (ret == 0 || (ret == -1 && errno == EEXIST))
132 return 0;
133
134 fprintf(stderr, "Failed to create %s for shader cache (%s)---disabling.\n",
135 path, strerror(errno));
136
137 return -1;
138 }
139
140 /* Create a directory named 'path' if it does not already exist,
141 * including parent directories if required.
142 *
143 * Returns: 0 if path already exists as a directory or if created.
144 * -1 in all other cases.
145 */
146 static int
mkdir_with_parents_if_needed(const char * path)147 mkdir_with_parents_if_needed(const char *path)
148 {
149 char *p;
150 const char *end;
151
152 if (path[0] == '\0')
153 return -1;
154
155 p = strdup(path);
156 end = p + strlen(p) + 1; /* end points to the \0 terminator */
157 for (char *q = p; q != end; q++) {
158 if (*q == '/' || q == end - 1) {
159 if (q == p) {
160 /* Skip the first / of an absolute path. */
161 continue;
162 }
163
164 *q = '\0';
165
166 if (mkdir_if_needed(p) == -1) {
167 free(p);
168 return -1;
169 }
170
171 *q = '/';
172 }
173 }
174 free(p);
175
176 return 0;
177 }
178
179 /* Concatenate an existing path and a new name to form a new path. If the new
180 * path does not exist as a directory, create it then return the resulting
181 * name of the new path (ralloc'ed off of 'ctx').
182 *
183 * Returns NULL on any error, such as:
184 *
185 * <path> does not exist or is not a directory
186 * <path>/<name> exists but is not a directory
187 * <path>/<name> cannot be created as a directory
188 */
189 static char *
concatenate_and_mkdir(void * ctx,const char * path,const char * name)190 concatenate_and_mkdir(void *ctx, const char *path, const char *name)
191 {
192 char *new_path;
193 struct stat sb;
194
195 if (stat(path, &sb) != 0 || ! S_ISDIR(sb.st_mode))
196 return NULL;
197
198 new_path = ralloc_asprintf(ctx, "%s/%s", path, name);
199
200 if (mkdir_if_needed(new_path) == 0)
201 return new_path;
202 else
203 return NULL;
204 }
205
206 struct lru_file {
207 struct list_head node;
208 char *lru_name;
209 size_t lru_file_size;
210 time_t lru_atime;
211 };
212
213 static void
free_lru_file_list(struct list_head * lru_file_list)214 free_lru_file_list(struct list_head *lru_file_list)
215 {
216 struct lru_file *e, *next;
217 LIST_FOR_EACH_ENTRY_SAFE(e, next, lru_file_list, node) {
218 free(e->lru_name);
219 free(e);
220 }
221 free(lru_file_list);
222 }
223
224 /* Given a directory path and predicate function, create a linked list of entrys
225 * with the oldest access time in that directory for which the predicate
226 * returns true.
227 *
228 * Returns: A malloc'ed linkd list for the paths of chosen files, (or
229 * NULL on any error). The caller should free the linked list via
230 * free_lru_file_list() when finished.
231 */
232 static struct list_head *
choose_lru_file_matching(const char * dir_path,bool (* predicate)(const char * dir_path,const struct stat *,const char *,const size_t))233 choose_lru_file_matching(const char *dir_path,
234 bool (*predicate)(const char *dir_path,
235 const struct stat *,
236 const char *, const size_t))
237 {
238 DIR *dir;
239 struct dirent *dir_ent;
240
241 dir = opendir(dir_path);
242 if (dir == NULL)
243 return NULL;
244
245 const int dir_fd = dirfd(dir);
246
247 /* First count the number of files in the directory */
248 unsigned total_file_count = 0;
249 while ((dir_ent = readdir(dir)) != NULL) {
250 #ifdef HAVE_DIRENT_D_TYPE
251 if (dir_ent->d_type == DT_REG) { /* If the entry is a regular file */
252 total_file_count++;
253 }
254 #else
255 struct stat st;
256
257 if (fstatat(dir_fd, dir_ent->d_name, &st, AT_SYMLINK_NOFOLLOW) == 0) {
258 if (S_ISREG(st.st_mode)) {
259 total_file_count++;
260 }
261 }
262 #endif
263 }
264
265 /* Reset to the start of the directory */
266 rewinddir(dir);
267
268 /* Collect 10% of files in this directory for removal. Note: This should work
269 * out to only be around 0.04% of total cache items.
270 */
271 unsigned lru_file_count = total_file_count > 10 ? total_file_count / 10 : 1;
272 struct list_head *lru_file_list = malloc(sizeof(struct list_head));
273 list_inithead(lru_file_list);
274
275 unsigned processed_files = 0;
276 while (1) {
277 dir_ent = readdir(dir);
278 if (dir_ent == NULL)
279 break;
280
281 struct stat sb;
282 if (fstatat(dir_fd, dir_ent->d_name, &sb, 0) == 0) {
283 struct lru_file *entry = NULL;
284 if (!list_is_empty(lru_file_list))
285 entry = list_first_entry(lru_file_list, struct lru_file, node);
286
287 if (!entry|| sb.st_atime < entry->lru_atime) {
288 size_t len = strlen(dir_ent->d_name);
289 if (!predicate(dir_path, &sb, dir_ent->d_name, len))
290 continue;
291
292 bool new_entry = false;
293 if (processed_files < lru_file_count) {
294 entry = calloc(1, sizeof(struct lru_file));
295 new_entry = true;
296 }
297 processed_files++;
298
299 char *tmp = realloc(entry->lru_name, len + 1);
300 if (tmp) {
301 /* Find location to insert new lru item. We want to keep the
302 * list ordering from most recently used to least recently used.
303 * This allows us to just evict the head item from the list as
304 * we process the directory and find older entrys.
305 */
306 struct list_head *list_node = lru_file_list;
307 struct lru_file *e;
308 LIST_FOR_EACH_ENTRY(e, lru_file_list, node) {
309 if (sb.st_atime < entry->lru_atime) {
310 list_node = &e->node;
311 break;
312 }
313 }
314
315 if (new_entry) {
316 list_addtail(&entry->node, list_node);
317 } else {
318 if (list_node != lru_file_list) {
319 list_del(lru_file_list);
320 list_addtail(lru_file_list, list_node);
321 }
322 }
323
324 entry->lru_name = tmp;
325 memcpy(entry->lru_name, dir_ent->d_name, len + 1);
326 entry->lru_atime = sb.st_atime;
327 entry->lru_file_size = sb.st_blocks * 512;
328 }
329 }
330 }
331 }
332
333 if (list_is_empty(lru_file_list)) {
334 closedir(dir);
335 free(lru_file_list);
336 return NULL;
337 }
338
339 /* Create the full path for the file list we found */
340 struct lru_file *e;
341 LIST_FOR_EACH_ENTRY(e, lru_file_list, node) {
342 char *filename = e->lru_name;
343 if (asprintf(&e->lru_name, "%s/%s", dir_path, filename) < 0)
344 e->lru_name = NULL;
345
346 free(filename);
347 }
348
349 closedir(dir);
350
351 return lru_file_list;
352 }
353
354 /* Is entry a regular file, and not having a name with a trailing
355 * ".tmp"
356 */
357 static bool
is_regular_non_tmp_file(const char * path,const struct stat * sb,const char * d_name,const size_t len)358 is_regular_non_tmp_file(const char *path, const struct stat *sb,
359 const char *d_name, const size_t len)
360 {
361 if (!S_ISREG(sb->st_mode))
362 return false;
363
364 if (len >= 4 && strcmp(&d_name[len-4], ".tmp") == 0)
365 return false;
366
367 return true;
368 }
369
370 /* Returns the size of the deleted file, (or 0 on any error). */
371 static size_t
unlink_lru_file_from_directory(const char * path)372 unlink_lru_file_from_directory(const char *path)
373 {
374 struct list_head *lru_file_list =
375 choose_lru_file_matching(path, is_regular_non_tmp_file);
376 if (lru_file_list == NULL)
377 return 0;
378
379 assert(!list_is_empty(lru_file_list));
380
381 size_t total_unlinked_size = 0;
382 struct lru_file *e;
383 LIST_FOR_EACH_ENTRY(e, lru_file_list, node) {
384 if (unlink(e->lru_name) == 0)
385 total_unlinked_size += e->lru_file_size;
386 }
387 free_lru_file_list(lru_file_list);
388
389 return total_unlinked_size;
390 }
391
392 /* Is entry a directory with a two-character name, (and not the
393 * special name of ".."). We also return false if the dir is empty.
394 */
395 static bool
is_two_character_sub_directory(const char * path,const struct stat * sb,const char * d_name,const size_t len)396 is_two_character_sub_directory(const char *path, const struct stat *sb,
397 const char *d_name, const size_t len)
398 {
399 if (!S_ISDIR(sb->st_mode))
400 return false;
401
402 if (len != 2)
403 return false;
404
405 if (strcmp(d_name, "..") == 0)
406 return false;
407
408 char *subdir;
409 if (asprintf(&subdir, "%s/%s", path, d_name) == -1)
410 return false;
411 DIR *dir = opendir(subdir);
412 free(subdir);
413
414 if (dir == NULL)
415 return false;
416
417 unsigned subdir_entries = 0;
418 struct dirent *d;
419 while ((d = readdir(dir)) != NULL) {
420 if(++subdir_entries > 2)
421 break;
422 }
423 closedir(dir);
424
425 /* If dir only contains '.' and '..' it must be empty */
426 if (subdir_entries <= 2)
427 return false;
428
429 return true;
430 }
431
432 /* Create the directory that will be needed for the cache file for \key.
433 *
434 * Obviously, the implementation here must closely match
435 * _get_cache_file above.
436 */
437 static void
make_cache_file_directory(struct disk_cache * cache,const cache_key key)438 make_cache_file_directory(struct disk_cache *cache, const cache_key key)
439 {
440 char *dir;
441 char buf[41];
442
443 _mesa_sha1_format(buf, key);
444 if (asprintf(&dir, "%s/%c%c", cache->path, buf[0], buf[1]) == -1)
445 return;
446
447 mkdir_if_needed(dir);
448 free(dir);
449 }
450
451 static ssize_t
read_all(int fd,void * buf,size_t count)452 read_all(int fd, void *buf, size_t count)
453 {
454 char *in = buf;
455 ssize_t read_ret;
456 size_t done;
457
458 for (done = 0; done < count; done += read_ret) {
459 read_ret = read(fd, in + done, count - done);
460 if (read_ret == -1 || read_ret == 0)
461 return -1;
462 }
463 return done;
464 }
465
466 static ssize_t
write_all(int fd,const void * buf,size_t count)467 write_all(int fd, const void *buf, size_t count)
468 {
469 const char *out = buf;
470 ssize_t written;
471 size_t done;
472
473 for (done = 0; done < count; done += written) {
474 written = write(fd, out + done, count - done);
475 if (written == -1)
476 return -1;
477 }
478 return done;
479 }
480
481 /* Evict least recently used cache item */
482 void
disk_cache_evict_lru_item(struct disk_cache * cache)483 disk_cache_evict_lru_item(struct disk_cache *cache)
484 {
485 char *dir_path;
486
487 /* With a reasonably-sized, full cache, (and with keys generated
488 * from a cryptographic hash), we can choose two random hex digits
489 * and reasonably expect the directory to exist with a file in it.
490 * Provides pseudo-LRU eviction to reduce checking all cache files.
491 */
492 uint64_t rand64 = rand_xorshift128plus(cache->seed_xorshift128plus);
493 if (asprintf(&dir_path, "%s/%02" PRIx64 , cache->path, rand64 & 0xff) < 0)
494 return;
495
496 size_t size = unlink_lru_file_from_directory(dir_path);
497
498 free(dir_path);
499
500 if (size) {
501 p_atomic_add(&cache->size->value, - (uint64_t)size);
502 return;
503 }
504
505 /* In the case where the random choice of directory didn't find
506 * something, we choose the least recently accessed from the
507 * existing directories.
508 *
509 * Really, the only reason this code exists is to allow the unit
510 * tests to work, (which use an artificially-small cache to be able
511 * to force a single cached item to be evicted).
512 */
513 struct list_head *lru_file_list =
514 choose_lru_file_matching(cache->path, is_two_character_sub_directory);
515 if (lru_file_list == NULL)
516 return;
517
518 assert(!list_is_empty(lru_file_list));
519
520 struct lru_file *lru_file_dir =
521 list_first_entry(lru_file_list, struct lru_file, node);
522
523 size = unlink_lru_file_from_directory(lru_file_dir->lru_name);
524
525 free_lru_file_list(lru_file_list);
526
527 if (size)
528 p_atomic_add(&cache->size->value, - (uint64_t)size);
529 }
530
531 void
disk_cache_evict_item(struct disk_cache * cache,char * filename)532 disk_cache_evict_item(struct disk_cache *cache, char *filename)
533 {
534 struct stat sb;
535 if (stat(filename, &sb) == -1) {
536 free(filename);
537 return;
538 }
539
540 unlink(filename);
541 free(filename);
542
543 if (sb.st_blocks)
544 p_atomic_add(&cache->size->value, - (uint64_t)sb.st_blocks * 512);
545 }
546
547 static void *
parse_and_validate_cache_item(struct disk_cache * cache,void * cache_item,size_t cache_item_size,size_t * size)548 parse_and_validate_cache_item(struct disk_cache *cache, void *cache_item,
549 size_t cache_item_size, size_t *size)
550 {
551 uint8_t *uncompressed_data = NULL;
552
553 struct blob_reader ci_blob_reader;
554 blob_reader_init(&ci_blob_reader, cache_item, cache_item_size);
555
556 size_t header_size = cache->driver_keys_blob_size;
557 const void *keys_blob = blob_read_bytes(&ci_blob_reader, header_size);
558 if (ci_blob_reader.overrun)
559 goto fail;
560
561 /* Check for extremely unlikely hash collisions */
562 if (memcmp(cache->driver_keys_blob, keys_blob, header_size) != 0) {
563 assert(!"Mesa cache keys mismatch!");
564 goto fail;
565 }
566
567 uint32_t md_type = blob_read_uint32(&ci_blob_reader);
568 if (ci_blob_reader.overrun)
569 goto fail;
570
571 if (md_type == CACHE_ITEM_TYPE_GLSL) {
572 uint32_t num_keys = blob_read_uint32(&ci_blob_reader);
573 if (ci_blob_reader.overrun)
574 goto fail;
575
576 /* The cache item metadata is currently just used for distributing
577 * precompiled shaders, they are not used by Mesa so just skip them for
578 * now.
579 * TODO: pass the metadata back to the caller and do some basic
580 * validation.
581 */
582 const void UNUSED *metadata =
583 blob_read_bytes(&ci_blob_reader, num_keys * sizeof(cache_key));
584 if (ci_blob_reader.overrun)
585 goto fail;
586 }
587
588 /* Load the CRC that was created when the file was written. */
589 struct cache_entry_file_data *cf_data =
590 (struct cache_entry_file_data *)
591 blob_read_bytes(&ci_blob_reader, sizeof(struct cache_entry_file_data));
592 if (ci_blob_reader.overrun)
593 goto fail;
594
595 size_t cache_data_size = ci_blob_reader.end - ci_blob_reader.current;
596 const uint8_t *data = (uint8_t *) blob_read_bytes(&ci_blob_reader, cache_data_size);
597
598 /* Check the data for corruption */
599 if (cf_data->crc32 != util_hash_crc32(data, cache_data_size))
600 goto fail;
601
602 /* Uncompress the cache data */
603 uncompressed_data = malloc(cf_data->uncompressed_size);
604 if (!uncompressed_data)
605 goto fail;
606
607 if (cache->compression_disabled) {
608 if (cf_data->uncompressed_size != cache_data_size)
609 goto fail;
610
611 memcpy(uncompressed_data, data, cache_data_size);
612 } else {
613 if (!util_compress_inflate(data, cache_data_size, uncompressed_data,
614 cf_data->uncompressed_size))
615 goto fail;
616 }
617
618 if (size)
619 *size = cf_data->uncompressed_size;
620
621 return uncompressed_data;
622
623 fail:
624 if (uncompressed_data)
625 free(uncompressed_data);
626
627 return NULL;
628 }
629
630 void *
disk_cache_load_item(struct disk_cache * cache,char * filename,size_t * size)631 disk_cache_load_item(struct disk_cache *cache, char *filename, size_t *size)
632 {
633 uint8_t *data = NULL;
634
635 int fd = open(filename, O_RDONLY | O_CLOEXEC);
636 if (fd == -1)
637 goto fail;
638
639 struct stat sb;
640 if (fstat(fd, &sb) == -1)
641 goto fail;
642
643 data = malloc(sb.st_size);
644 if (data == NULL)
645 goto fail;
646
647 /* Read entire file into memory */
648 int ret = read_all(fd, data, sb.st_size);
649 if (ret == -1)
650 goto fail;
651
652 uint8_t *uncompressed_data =
653 parse_and_validate_cache_item(cache, data, sb.st_size, size);
654 if (!uncompressed_data)
655 goto fail;
656
657 free(data);
658 free(filename);
659 close(fd);
660
661 return uncompressed_data;
662
663 fail:
664 if (data)
665 free(data);
666 if (filename)
667 free(filename);
668 if (fd != -1)
669 close(fd);
670
671 return NULL;
672 }
673
674 /* Return a filename within the cache's directory corresponding to 'key'.
675 *
676 * Returns NULL if out of memory.
677 */
678 char *
disk_cache_get_cache_filename(struct disk_cache * cache,const cache_key key)679 disk_cache_get_cache_filename(struct disk_cache *cache, const cache_key key)
680 {
681 char buf[41];
682 char *filename;
683
684 if (cache->path_init_failed)
685 return NULL;
686
687 _mesa_sha1_format(buf, key);
688 if (asprintf(&filename, "%s/%c%c/%s", cache->path, buf[0],
689 buf[1], buf + 2) == -1)
690 return NULL;
691
692 return filename;
693 }
694
695 static bool
create_cache_item_header_and_blob(struct disk_cache_put_job * dc_job,struct blob * cache_blob)696 create_cache_item_header_and_blob(struct disk_cache_put_job *dc_job,
697 struct blob *cache_blob)
698 {
699
700 /* Compress the cache item data */
701 size_t max_buf = util_compress_max_compressed_len(dc_job->size);
702 size_t compressed_size;
703 void *compressed_data;
704
705 if (dc_job->cache->compression_disabled) {
706 compressed_size = dc_job->size;
707 compressed_data = dc_job->data;
708 } else {
709 compressed_data = malloc(max_buf);
710 if (compressed_data == NULL)
711 return false;
712 compressed_size =
713 util_compress_deflate(dc_job->data, dc_job->size,
714 compressed_data, max_buf);
715 if (compressed_size == 0)
716 goto fail;
717 }
718
719 /* Copy the driver_keys_blob, this can be used find information about the
720 * mesa version that produced the entry or deal with hash collisions,
721 * should that ever become a real problem.
722 */
723 if (!blob_write_bytes(cache_blob, dc_job->cache->driver_keys_blob,
724 dc_job->cache->driver_keys_blob_size))
725 goto fail;
726
727 /* Write the cache item metadata. This data can be used to deal with
728 * hash collisions, as well as providing useful information to 3rd party
729 * tools reading the cache files.
730 */
731 if (!blob_write_uint32(cache_blob, dc_job->cache_item_metadata.type))
732 goto fail;
733
734 if (dc_job->cache_item_metadata.type == CACHE_ITEM_TYPE_GLSL) {
735 if (!blob_write_uint32(cache_blob, dc_job->cache_item_metadata.num_keys))
736 goto fail;
737
738 size_t metadata_keys_size =
739 dc_job->cache_item_metadata.num_keys * sizeof(cache_key);
740 if (!blob_write_bytes(cache_blob, dc_job->cache_item_metadata.keys[0],
741 metadata_keys_size))
742 goto fail;
743 }
744
745 /* Create CRC of the compressed data. We will read this when restoring the
746 * cache and use it to check for corruption.
747 */
748 struct cache_entry_file_data cf_data;
749 cf_data.crc32 = util_hash_crc32(compressed_data, compressed_size);
750 cf_data.uncompressed_size = dc_job->size;
751
752 if (!blob_write_bytes(cache_blob, &cf_data, sizeof(cf_data)))
753 goto fail;
754
755 /* Finally copy the compressed cache blob */
756 if (!blob_write_bytes(cache_blob, compressed_data, compressed_size))
757 goto fail;
758
759 if (!dc_job->cache->compression_disabled)
760 free(compressed_data);
761
762 return true;
763
764 fail:
765 if (!dc_job->cache->compression_disabled)
766 free(compressed_data);
767
768 return false;
769 }
770
771 void
disk_cache_write_item_to_disk(struct disk_cache_put_job * dc_job,char * filename)772 disk_cache_write_item_to_disk(struct disk_cache_put_job *dc_job,
773 char *filename)
774 {
775 int fd = -1, fd_final = -1;
776 struct blob cache_blob;
777 blob_init(&cache_blob);
778
779 /* Write to a temporary file to allow for an atomic rename to the
780 * final destination filename, (to prevent any readers from seeing
781 * a partially written file).
782 */
783 char *filename_tmp = NULL;
784 if (asprintf(&filename_tmp, "%s.tmp", filename) == -1)
785 goto done;
786
787 fd = open(filename_tmp, O_WRONLY | O_CLOEXEC | O_CREAT, 0644);
788
789 /* Make the two-character subdirectory within the cache as needed. */
790 if (fd == -1) {
791 if (errno != ENOENT)
792 goto done;
793
794 make_cache_file_directory(dc_job->cache, dc_job->key);
795
796 fd = open(filename_tmp, O_WRONLY | O_CLOEXEC | O_CREAT, 0644);
797 if (fd == -1)
798 goto done;
799 }
800
801 /* With the temporary file open, we take an exclusive flock on
802 * it. If the flock fails, then another process still has the file
803 * open with the flock held. So just let that file be responsible
804 * for writing the file.
805 */
806 #ifdef HAVE_FLOCK
807 int err = flock(fd, LOCK_EX | LOCK_NB);
808 #else
809 struct flock lock = {
810 .l_start = 0,
811 .l_len = 0, /* entire file */
812 .l_type = F_WRLCK,
813 .l_whence = SEEK_SET
814 };
815 int err = fcntl(fd, F_SETLK, &lock);
816 #endif
817 if (err == -1)
818 goto done;
819
820 /* Now that we have the lock on the open temporary file, we can
821 * check to see if the destination file already exists. If so,
822 * another process won the race between when we saw that the file
823 * didn't exist and now. In this case, we don't do anything more,
824 * (to ensure the size accounting of the cache doesn't get off).
825 */
826 fd_final = open(filename, O_RDONLY | O_CLOEXEC);
827 if (fd_final != -1) {
828 unlink(filename_tmp);
829 goto done;
830 }
831
832 /* OK, we're now on the hook to write out a file that we know is
833 * not in the cache, and is also not being written out to the cache
834 * by some other process.
835 */
836 if (!create_cache_item_header_and_blob(dc_job, &cache_blob)) {
837 unlink(filename_tmp);
838 goto done;
839 }
840
841 /* Now, finally, write out the contents to the temporary file, then
842 * rename them atomically to the destination filename, and also
843 * perform an atomic increment of the total cache size.
844 */
845 int ret = write_all(fd, cache_blob.data, cache_blob.size);
846 if (ret == -1) {
847 unlink(filename_tmp);
848 goto done;
849 }
850
851 ret = rename(filename_tmp, filename);
852 if (ret == -1) {
853 unlink(filename_tmp);
854 goto done;
855 }
856
857 struct stat sb;
858 if (stat(filename, &sb) == -1) {
859 /* Something went wrong remove the file */
860 unlink(filename);
861 goto done;
862 }
863
864 p_atomic_add(&dc_job->cache->size->value, sb.st_blocks * 512);
865
866 done:
867 if (fd_final != -1)
868 close(fd_final);
869 /* This close finally releases the flock, (now that the final file
870 * has been renamed into place and the size has been added).
871 */
872 if (fd != -1)
873 close(fd);
874 free(filename_tmp);
875 blob_finish(&cache_blob);
876 }
877
878 /* Determine path for cache based on the first defined name as follows:
879 *
880 * $MESA_SHADER_CACHE_DIR
881 * $XDG_CACHE_HOME/mesa_shader_cache
882 * <pwd.pw_dir>/.cache/mesa_shader_cache
883 */
884 char *
disk_cache_generate_cache_dir(void * mem_ctx,const char * gpu_name,const char * driver_id,enum disk_cache_type cache_type)885 disk_cache_generate_cache_dir(void *mem_ctx, const char *gpu_name,
886 const char *driver_id,
887 enum disk_cache_type cache_type)
888 {
889 char *cache_dir_name = CACHE_DIR_NAME;
890 if (cache_type == DISK_CACHE_SINGLE_FILE)
891 cache_dir_name = CACHE_DIR_NAME_SF;
892 else if (cache_type == DISK_CACHE_DATABASE)
893 cache_dir_name = CACHE_DIR_NAME_DB;
894
895 char *path = secure_getenv("MESA_SHADER_CACHE_DIR");
896
897 if (!path) {
898 path = secure_getenv("MESA_GLSL_CACHE_DIR");
899 if (path)
900 fprintf(stderr,
901 "*** MESA_GLSL_CACHE_DIR is deprecated; "
902 "use MESA_SHADER_CACHE_DIR instead ***\n");
903 }
904
905 if (path) {
906 if (mkdir_with_parents_if_needed(path) == -1)
907 return NULL;
908
909 path = concatenate_and_mkdir(mem_ctx, path, cache_dir_name);
910 if (!path)
911 return NULL;
912 }
913
914 if (path == NULL) {
915 char *xdg_cache_home = secure_getenv("XDG_CACHE_HOME");
916
917 if (xdg_cache_home) {
918 if (mkdir_if_needed(xdg_cache_home) == -1)
919 return NULL;
920
921 path = concatenate_and_mkdir(mem_ctx, xdg_cache_home, cache_dir_name);
922 if (!path)
923 return NULL;
924 }
925 }
926
927 if (!path) {
928 char *buf;
929 size_t buf_size;
930 struct passwd pwd, *result;
931
932 buf_size = sysconf(_SC_GETPW_R_SIZE_MAX);
933 if (buf_size == -1)
934 buf_size = 512;
935
936 /* Loop until buf_size is large enough to query the directory */
937 while (1) {
938 buf = ralloc_size(mem_ctx, buf_size);
939
940 getpwuid_r(getuid(), &pwd, buf, buf_size, &result);
941 if (result)
942 break;
943
944 if (errno == ERANGE) {
945 ralloc_free(buf);
946 buf = NULL;
947 buf_size *= 2;
948 } else {
949 return NULL;
950 }
951 }
952
953 path = concatenate_and_mkdir(mem_ctx, pwd.pw_dir, ".cache");
954 if (!path)
955 return NULL;
956
957 path = concatenate_and_mkdir(mem_ctx, path, cache_dir_name);
958 if (!path)
959 return NULL;
960 }
961
962 if (cache_type == DISK_CACHE_SINGLE_FILE) {
963 path = concatenate_and_mkdir(mem_ctx, path, driver_id);
964 if (!path)
965 return NULL;
966
967 path = concatenate_and_mkdir(mem_ctx, path, gpu_name);
968 if (!path)
969 return NULL;
970 }
971
972 return path;
973 }
974
975 bool
disk_cache_enabled()976 disk_cache_enabled()
977 {
978 /* Disk cache is not enabled for android, but android's EGL layer
979 * uses EGL_ANDROID_blob_cache to manage the cache itself:
980 */
981 if (DETECT_OS_ANDROID)
982 return false;
983
984 /* If running as a users other than the real user disable cache */
985 if (!__normal_user())
986 return false;
987
988 /* At user request, disable shader cache entirely. */
989 #ifdef SHADER_CACHE_DISABLE_BY_DEFAULT
990 bool disable_by_default = true;
991 #else
992 bool disable_by_default = false;
993 #endif
994 char *envvar_name = "MESA_SHADER_CACHE_DISABLE";
995 if (!getenv(envvar_name)) {
996 envvar_name = "MESA_GLSL_CACHE_DISABLE";
997 if (getenv(envvar_name))
998 fprintf(stderr,
999 "*** MESA_GLSL_CACHE_DISABLE is deprecated; "
1000 "use MESA_SHADER_CACHE_DISABLE instead ***\n");
1001 }
1002
1003 if (debug_get_bool_option(envvar_name, disable_by_default))
1004 return false;
1005
1006 return true;
1007 }
1008
1009 void *
disk_cache_load_item_foz(struct disk_cache * cache,const cache_key key,size_t * size)1010 disk_cache_load_item_foz(struct disk_cache *cache, const cache_key key,
1011 size_t *size)
1012 {
1013 size_t cache_tem_size = 0;
1014 void *cache_item = foz_read_entry(&cache->foz_db, key, &cache_tem_size);
1015 if (!cache_item)
1016 return NULL;
1017
1018 uint8_t *uncompressed_data =
1019 parse_and_validate_cache_item(cache, cache_item, cache_tem_size, size);
1020 free(cache_item);
1021
1022 return uncompressed_data;
1023 }
1024
1025 bool
disk_cache_write_item_to_disk_foz(struct disk_cache_put_job * dc_job)1026 disk_cache_write_item_to_disk_foz(struct disk_cache_put_job *dc_job)
1027 {
1028 struct blob cache_blob;
1029 blob_init(&cache_blob);
1030
1031 if (!create_cache_item_header_and_blob(dc_job, &cache_blob))
1032 return false;
1033
1034 bool r = foz_write_entry(&dc_job->cache->foz_db, dc_job->key,
1035 cache_blob.data, cache_blob.size);
1036
1037 blob_finish(&cache_blob);
1038 return r;
1039 }
1040
1041 bool
disk_cache_load_cache_index_foz(void * mem_ctx,struct disk_cache * cache)1042 disk_cache_load_cache_index_foz(void *mem_ctx, struct disk_cache *cache)
1043 {
1044 /* Load cache index into a hash map (from fossilise files) */
1045 return foz_prepare(&cache->foz_db, cache->path);
1046 }
1047
1048
1049 void
disk_cache_touch_cache_user_marker(char * path)1050 disk_cache_touch_cache_user_marker(char *path)
1051 {
1052 char *marker_path = NULL;
1053 asprintf(&marker_path, "%s/marker", path);
1054 if (!marker_path)
1055 return;
1056
1057 time_t now = time(NULL);
1058
1059 struct stat attr;
1060 if (stat(marker_path, &attr) == -1) {
1061 int fd = open(marker_path, O_WRONLY | O_CREAT | O_CLOEXEC, 0644);
1062 if (fd != -1) {
1063 close(fd);
1064 }
1065 } else if (now - attr.st_mtime < 60 * 60 * 24 /* One day */) {
1066 (void)utime(marker_path, NULL);
1067 }
1068 free(marker_path);
1069 }
1070
1071 bool
disk_cache_mmap_cache_index(void * mem_ctx,struct disk_cache * cache,char * path)1072 disk_cache_mmap_cache_index(void *mem_ctx, struct disk_cache *cache,
1073 char *path)
1074 {
1075 int fd = -1;
1076 bool mapped = false;
1077
1078 path = ralloc_asprintf(mem_ctx, "%s/index", cache->path);
1079 if (path == NULL)
1080 goto path_fail;
1081
1082 fd = open(path, O_RDWR | O_CREAT | O_CLOEXEC, 0644);
1083 if (fd == -1)
1084 goto path_fail;
1085
1086 struct stat sb;
1087 if (fstat(fd, &sb) == -1)
1088 goto path_fail;
1089
1090 /* Force the index file to be the expected size. */
1091 size_t size = sizeof(*cache->size) + CACHE_INDEX_MAX_KEYS * CACHE_KEY_SIZE;
1092 if (sb.st_size != size) {
1093 #if HAVE_POSIX_FALLOCATE
1094 /* posix_fallocate() ensures disk space is allocated otherwise it
1095 * fails if there is not enough space on the disk.
1096 */
1097 if (posix_fallocate(fd, 0, size) != 0)
1098 goto path_fail;
1099 #else
1100 /* ftruncate() allocates disk space lazily. If the disk is full
1101 * and it is unable to allocate disk space when accessed via
1102 * mmap, it will crash with a SIGBUS.
1103 */
1104 if (ftruncate(fd, size) == -1)
1105 goto path_fail;
1106 #endif
1107 }
1108
1109 /* We map this shared so that other processes see updates that we
1110 * make.
1111 *
1112 * Note: We do use atomic addition to ensure that multiple
1113 * processes don't scramble the cache size recorded in the
1114 * index. But we don't use any locking to prevent multiple
1115 * processes from updating the same entry simultaneously. The idea
1116 * is that if either result lands entirely in the index, then
1117 * that's equivalent to a well-ordered write followed by an
1118 * eviction and a write. On the other hand, if the simultaneous
1119 * writes result in a corrupt entry, that's not really any
1120 * different than both entries being evicted, (since within the
1121 * guarantees of the cryptographic hash, a corrupt entry is
1122 * unlikely to ever match a real cache key).
1123 */
1124 cache->index_mmap = mmap(NULL, size, PROT_READ | PROT_WRITE,
1125 MAP_SHARED, fd, 0);
1126 if (cache->index_mmap == MAP_FAILED)
1127 goto path_fail;
1128 cache->index_mmap_size = size;
1129
1130 cache->size = (p_atomic_uint64_t *) cache->index_mmap;
1131 cache->stored_keys = cache->index_mmap + sizeof(uint64_t);
1132 mapped = true;
1133
1134 path_fail:
1135 if (fd != -1)
1136 close(fd);
1137
1138 return mapped;
1139 }
1140
1141 void
disk_cache_destroy_mmap(struct disk_cache * cache)1142 disk_cache_destroy_mmap(struct disk_cache *cache)
1143 {
1144 munmap(cache->index_mmap, cache->index_mmap_size);
1145 }
1146
1147 void *
disk_cache_db_load_item(struct disk_cache * cache,const cache_key key,size_t * size)1148 disk_cache_db_load_item(struct disk_cache *cache, const cache_key key,
1149 size_t *size)
1150 {
1151 size_t cache_tem_size = 0;
1152 void *cache_item = mesa_cache_db_multipart_read_entry(&cache->cache_db,
1153 key, &cache_tem_size);
1154 if (!cache_item)
1155 return NULL;
1156
1157 uint8_t *uncompressed_data =
1158 parse_and_validate_cache_item(cache, cache_item, cache_tem_size, size);
1159 free(cache_item);
1160
1161 return uncompressed_data;
1162 }
1163
1164 bool
disk_cache_db_write_item_to_disk(struct disk_cache_put_job * dc_job)1165 disk_cache_db_write_item_to_disk(struct disk_cache_put_job *dc_job)
1166 {
1167 struct blob cache_blob;
1168 blob_init(&cache_blob);
1169
1170 if (!create_cache_item_header_and_blob(dc_job, &cache_blob))
1171 return false;
1172
1173 bool r = mesa_cache_db_multipart_entry_write(&dc_job->cache->cache_db,
1174 dc_job->key, cache_blob.data,
1175 cache_blob.size);
1176
1177 blob_finish(&cache_blob);
1178 return r;
1179 }
1180
1181 bool
disk_cache_db_load_cache_index(void * mem_ctx,struct disk_cache * cache)1182 disk_cache_db_load_cache_index(void *mem_ctx, struct disk_cache *cache)
1183 {
1184 return mesa_cache_db_multipart_open(&cache->cache_db, cache->path);
1185 }
1186 #endif
1187
1188 #endif /* ENABLE_SHADER_CACHE */
1189