• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include "util/blob.h"
25 #include "util/hash_table.h"
26 #include "util/debug.h"
27 #include "util/disk_cache.h"
28 #include "util/mesa-sha1.h"
29 #include "nir/nir_serialize.h"
30 #include "anv_private.h"
31 #include "nir/nir_xfb_info.h"
32 #include "vulkan/util/vk_util.h"
33 
34 struct anv_shader_bin *
anv_shader_bin_create(struct anv_device * device,gl_shader_stage stage,const void * key_data,uint32_t key_size,const void * kernel_data,uint32_t kernel_size,const struct brw_stage_prog_data * prog_data_in,uint32_t prog_data_size,const struct brw_compile_stats * stats,uint32_t num_stats,const nir_xfb_info * xfb_info_in,const struct anv_pipeline_bind_map * bind_map)35 anv_shader_bin_create(struct anv_device *device,
36                       gl_shader_stage stage,
37                       const void *key_data, uint32_t key_size,
38                       const void *kernel_data, uint32_t kernel_size,
39                       const struct brw_stage_prog_data *prog_data_in,
40                       uint32_t prog_data_size,
41                       const struct brw_compile_stats *stats, uint32_t num_stats,
42                       const nir_xfb_info *xfb_info_in,
43                       const struct anv_pipeline_bind_map *bind_map)
44 {
45    struct anv_shader_bin *shader;
46    struct anv_shader_bin_key *key;
47    struct brw_stage_prog_data *prog_data;
48    struct brw_shader_reloc *prog_data_relocs;
49    uint32_t *prog_data_param;
50    nir_xfb_info *xfb_info;
51    struct anv_pipeline_binding *surface_to_descriptor, *sampler_to_descriptor;
52 
53    ANV_MULTIALLOC(ma);
54    anv_multialloc_add(&ma, &shader, 1);
55    anv_multialloc_add_size(&ma, &key, sizeof(*key) + key_size);
56    anv_multialloc_add_size(&ma, &prog_data, prog_data_size);
57    anv_multialloc_add(&ma, &prog_data_relocs, prog_data_in->num_relocs);
58    anv_multialloc_add(&ma, &prog_data_param, prog_data_in->nr_params);
59    if (xfb_info_in) {
60       uint32_t xfb_info_size = nir_xfb_info_size(xfb_info_in->output_count);
61       anv_multialloc_add_size(&ma, &xfb_info, xfb_info_size);
62    }
63    anv_multialloc_add(&ma, &surface_to_descriptor,
64                            bind_map->surface_count);
65    anv_multialloc_add(&ma, &sampler_to_descriptor,
66                            bind_map->sampler_count);
67 
68    if (!anv_multialloc_alloc(&ma, &device->vk.alloc,
69                              VK_SYSTEM_ALLOCATION_SCOPE_DEVICE))
70       return NULL;
71 
72    shader->ref_cnt = 1;
73 
74    shader->stage = stage;
75 
76    key->size = key_size;
77    memcpy(key->data, key_data, key_size);
78    shader->key = key;
79 
80    shader->kernel =
81       anv_state_pool_alloc(&device->instruction_state_pool, kernel_size, 64);
82    memcpy(shader->kernel.map, kernel_data, kernel_size);
83    shader->kernel_size = kernel_size;
84 
85    uint64_t shader_data_addr = INSTRUCTION_STATE_POOL_MIN_ADDRESS +
86                                shader->kernel.offset +
87                                prog_data_in->const_data_offset;
88 
89    struct brw_shader_reloc_value reloc_values[] = {
90       {
91          .id = ANV_SHADER_RELOC_CONST_DATA_ADDR_LOW,
92          .value = shader_data_addr,
93       },
94       {
95          .id = ANV_SHADER_RELOC_CONST_DATA_ADDR_HIGH,
96          .value = shader_data_addr >> 32,
97       },
98    };
99    brw_write_shader_relocs(&device->info, shader->kernel.map, prog_data_in,
100                            reloc_values, ARRAY_SIZE(reloc_values));
101 
102    memcpy(prog_data, prog_data_in, prog_data_size);
103    typed_memcpy(prog_data_relocs, prog_data_in->relocs,
104                 prog_data_in->num_relocs);
105    prog_data->relocs = prog_data_relocs;
106    memset(prog_data_param, 0,
107           prog_data->nr_params * sizeof(*prog_data_param));
108    prog_data->param = prog_data_param;
109    shader->prog_data = prog_data;
110    shader->prog_data_size = prog_data_size;
111 
112    assert(num_stats <= ARRAY_SIZE(shader->stats));
113    typed_memcpy(shader->stats, stats, num_stats);
114    shader->num_stats = num_stats;
115 
116    if (xfb_info_in) {
117       *xfb_info = *xfb_info_in;
118       typed_memcpy(xfb_info->outputs, xfb_info_in->outputs,
119                    xfb_info_in->output_count);
120       shader->xfb_info = xfb_info;
121    } else {
122       shader->xfb_info = NULL;
123    }
124 
125    shader->bind_map = *bind_map;
126    typed_memcpy(surface_to_descriptor, bind_map->surface_to_descriptor,
127                 bind_map->surface_count);
128    shader->bind_map.surface_to_descriptor = surface_to_descriptor;
129    typed_memcpy(sampler_to_descriptor, bind_map->sampler_to_descriptor,
130                 bind_map->sampler_count);
131    shader->bind_map.sampler_to_descriptor = sampler_to_descriptor;
132 
133    return shader;
134 }
135 
136 void
anv_shader_bin_destroy(struct anv_device * device,struct anv_shader_bin * shader)137 anv_shader_bin_destroy(struct anv_device *device,
138                        struct anv_shader_bin *shader)
139 {
140    assert(shader->ref_cnt == 0);
141    anv_state_pool_free(&device->instruction_state_pool, shader->kernel);
142    vk_free(&device->vk.alloc, shader);
143 }
144 
145 static bool
anv_shader_bin_write_to_blob(const struct anv_shader_bin * shader,struct blob * blob)146 anv_shader_bin_write_to_blob(const struct anv_shader_bin *shader,
147                              struct blob *blob)
148 {
149    blob_write_uint32(blob, shader->stage);
150 
151    blob_write_uint32(blob, shader->key->size);
152    blob_write_bytes(blob, shader->key->data, shader->key->size);
153 
154    blob_write_uint32(blob, shader->kernel_size);
155    blob_write_bytes(blob, shader->kernel.map, shader->kernel_size);
156 
157    blob_write_uint32(blob, shader->prog_data_size);
158    blob_write_bytes(blob, shader->prog_data, shader->prog_data_size);
159    blob_write_bytes(blob, shader->prog_data->relocs,
160                     shader->prog_data->num_relocs *
161                     sizeof(shader->prog_data->relocs[0]));
162 
163    blob_write_uint32(blob, shader->num_stats);
164    blob_write_bytes(blob, shader->stats,
165                     shader->num_stats * sizeof(shader->stats[0]));
166 
167    if (shader->xfb_info) {
168       uint32_t xfb_info_size =
169          nir_xfb_info_size(shader->xfb_info->output_count);
170       blob_write_uint32(blob, xfb_info_size);
171       blob_write_bytes(blob, shader->xfb_info, xfb_info_size);
172    } else {
173       blob_write_uint32(blob, 0);
174    }
175 
176    blob_write_bytes(blob, shader->bind_map.surface_sha1,
177                     sizeof(shader->bind_map.surface_sha1));
178    blob_write_bytes(blob, shader->bind_map.sampler_sha1,
179                     sizeof(shader->bind_map.sampler_sha1));
180    blob_write_bytes(blob, shader->bind_map.push_sha1,
181                     sizeof(shader->bind_map.push_sha1));
182    blob_write_uint32(blob, shader->bind_map.surface_count);
183    blob_write_uint32(blob, shader->bind_map.sampler_count);
184    blob_write_bytes(blob, shader->bind_map.surface_to_descriptor,
185                     shader->bind_map.surface_count *
186                     sizeof(*shader->bind_map.surface_to_descriptor));
187    blob_write_bytes(blob, shader->bind_map.sampler_to_descriptor,
188                     shader->bind_map.sampler_count *
189                     sizeof(*shader->bind_map.sampler_to_descriptor));
190    blob_write_bytes(blob, shader->bind_map.push_ranges,
191                     sizeof(shader->bind_map.push_ranges));
192 
193    return !blob->out_of_memory;
194 }
195 
196 static struct anv_shader_bin *
anv_shader_bin_create_from_blob(struct anv_device * device,struct blob_reader * blob)197 anv_shader_bin_create_from_blob(struct anv_device *device,
198                                 struct blob_reader *blob)
199 {
200    gl_shader_stage stage = blob_read_uint32(blob);
201 
202    uint32_t key_size = blob_read_uint32(blob);
203    const void *key_data = blob_read_bytes(blob, key_size);
204 
205    uint32_t kernel_size = blob_read_uint32(blob);
206    const void *kernel_data = blob_read_bytes(blob, kernel_size);
207 
208    uint32_t prog_data_size = blob_read_uint32(blob);
209    const void *prog_data_bytes = blob_read_bytes(blob, prog_data_size);
210    if (blob->overrun)
211       return NULL;
212 
213    union brw_any_prog_data prog_data;
214    memcpy(&prog_data, prog_data_bytes,
215           MIN2(sizeof(prog_data), prog_data_size));
216    prog_data.base.relocs =
217       blob_read_bytes(blob, prog_data.base.num_relocs *
218                             sizeof(prog_data.base.relocs[0]));
219 
220    uint32_t num_stats = blob_read_uint32(blob);
221    const struct brw_compile_stats *stats =
222       blob_read_bytes(blob, num_stats * sizeof(stats[0]));
223 
224    const nir_xfb_info *xfb_info = NULL;
225    uint32_t xfb_size = blob_read_uint32(blob);
226    if (xfb_size)
227       xfb_info = blob_read_bytes(blob, xfb_size);
228 
229    struct anv_pipeline_bind_map bind_map;
230    blob_copy_bytes(blob, bind_map.surface_sha1, sizeof(bind_map.surface_sha1));
231    blob_copy_bytes(blob, bind_map.sampler_sha1, sizeof(bind_map.sampler_sha1));
232    blob_copy_bytes(blob, bind_map.push_sha1, sizeof(bind_map.push_sha1));
233    bind_map.surface_count = blob_read_uint32(blob);
234    bind_map.sampler_count = blob_read_uint32(blob);
235    bind_map.surface_to_descriptor = (void *)
236       blob_read_bytes(blob, bind_map.surface_count *
237                             sizeof(*bind_map.surface_to_descriptor));
238    bind_map.sampler_to_descriptor = (void *)
239       blob_read_bytes(blob, bind_map.sampler_count *
240                             sizeof(*bind_map.sampler_to_descriptor));
241    blob_copy_bytes(blob, bind_map.push_ranges, sizeof(bind_map.push_ranges));
242 
243    if (blob->overrun)
244       return NULL;
245 
246    return anv_shader_bin_create(device, stage,
247                                 key_data, key_size,
248                                 kernel_data, kernel_size,
249                                 &prog_data.base, prog_data_size,
250                                 stats, num_stats, xfb_info, &bind_map);
251 }
252 
253 /* Remaining work:
254  *
255  * - Compact binding table layout so it's tight and not dependent on
256  *   descriptor set layout.
257  *
258  * - Review prog_data struct for size and cacheability: struct
259  *   brw_stage_prog_data has binding_table which uses a lot of uint32_t for 8
260  *   bit quantities etc; use bit fields for all bools, eg dual_src_blend.
261  */
262 
263 static uint32_t
shader_bin_key_hash_func(const void * void_key)264 shader_bin_key_hash_func(const void *void_key)
265 {
266    const struct anv_shader_bin_key *key = void_key;
267    return _mesa_hash_data(key->data, key->size);
268 }
269 
270 static bool
shader_bin_key_compare_func(const void * void_a,const void * void_b)271 shader_bin_key_compare_func(const void *void_a, const void *void_b)
272 {
273    const struct anv_shader_bin_key *a = void_a, *b = void_b;
274    if (a->size != b->size)
275       return false;
276 
277    return memcmp(a->data, b->data, a->size) == 0;
278 }
279 
280 static uint32_t
sha1_hash_func(const void * sha1)281 sha1_hash_func(const void *sha1)
282 {
283    return _mesa_hash_data(sha1, 20);
284 }
285 
286 static bool
sha1_compare_func(const void * sha1_a,const void * sha1_b)287 sha1_compare_func(const void *sha1_a, const void *sha1_b)
288 {
289    return memcmp(sha1_a, sha1_b, 20) == 0;
290 }
291 
292 void
anv_pipeline_cache_init(struct anv_pipeline_cache * cache,struct anv_device * device,bool cache_enabled,bool external_sync)293 anv_pipeline_cache_init(struct anv_pipeline_cache *cache,
294                         struct anv_device *device,
295                         bool cache_enabled,
296                         bool external_sync)
297 {
298    vk_object_base_init(&device->vk, &cache->base,
299                        VK_OBJECT_TYPE_PIPELINE_CACHE);
300    cache->device = device;
301    cache->external_sync = external_sync;
302    pthread_mutex_init(&cache->mutex, NULL);
303 
304    if (cache_enabled) {
305       cache->cache = _mesa_hash_table_create(NULL, shader_bin_key_hash_func,
306                                              shader_bin_key_compare_func);
307       cache->nir_cache = _mesa_hash_table_create(NULL, sha1_hash_func,
308                                                  sha1_compare_func);
309    } else {
310       cache->cache = NULL;
311       cache->nir_cache = NULL;
312    }
313 }
314 
315 void
anv_pipeline_cache_finish(struct anv_pipeline_cache * cache)316 anv_pipeline_cache_finish(struct anv_pipeline_cache *cache)
317 {
318    pthread_mutex_destroy(&cache->mutex);
319 
320    if (cache->cache) {
321       /* This is a bit unfortunate.  In order to keep things from randomly
322        * going away, the shader cache has to hold a reference to all shader
323        * binaries it contains.  We unref them when we destroy the cache.
324        */
325       hash_table_foreach(cache->cache, entry)
326          anv_shader_bin_unref(cache->device, entry->data);
327 
328       _mesa_hash_table_destroy(cache->cache, NULL);
329    }
330 
331    if (cache->nir_cache) {
332       hash_table_foreach(cache->nir_cache, entry)
333          ralloc_free(entry->data);
334 
335       _mesa_hash_table_destroy(cache->nir_cache, NULL);
336    }
337 
338    vk_object_base_finish(&cache->base);
339 }
340 
341 static struct anv_shader_bin *
anv_pipeline_cache_search_locked(struct anv_pipeline_cache * cache,const void * key_data,uint32_t key_size)342 anv_pipeline_cache_search_locked(struct anv_pipeline_cache *cache,
343                                  const void *key_data, uint32_t key_size)
344 {
345    uint32_t vla[1 + DIV_ROUND_UP(key_size, sizeof(uint32_t))];
346    struct anv_shader_bin_key *key = (void *)vla;
347    key->size = key_size;
348    memcpy(key->data, key_data, key_size);
349 
350    struct hash_entry *entry = _mesa_hash_table_search(cache->cache, key);
351    if (entry)
352       return entry->data;
353    else
354       return NULL;
355 }
356 
357 static inline void
anv_cache_lock(struct anv_pipeline_cache * cache)358 anv_cache_lock(struct anv_pipeline_cache *cache)
359 {
360    if (!cache->external_sync)
361       pthread_mutex_lock(&cache->mutex);
362 }
363 
364 static inline void
anv_cache_unlock(struct anv_pipeline_cache * cache)365 anv_cache_unlock(struct anv_pipeline_cache *cache)
366 {
367    if (!cache->external_sync)
368       pthread_mutex_unlock(&cache->mutex);
369 }
370 
371 struct anv_shader_bin *
anv_pipeline_cache_search(struct anv_pipeline_cache * cache,const void * key_data,uint32_t key_size)372 anv_pipeline_cache_search(struct anv_pipeline_cache *cache,
373                           const void *key_data, uint32_t key_size)
374 {
375    if (!cache->cache)
376       return NULL;
377 
378    anv_cache_lock(cache);
379 
380    struct anv_shader_bin *shader =
381       anv_pipeline_cache_search_locked(cache, key_data, key_size);
382 
383    anv_cache_unlock(cache);
384 
385    /* We increment refcount before handing it to the caller */
386    if (shader)
387       anv_shader_bin_ref(shader);
388 
389    return shader;
390 }
391 
392 static void
anv_pipeline_cache_add_shader_bin(struct anv_pipeline_cache * cache,struct anv_shader_bin * bin)393 anv_pipeline_cache_add_shader_bin(struct anv_pipeline_cache *cache,
394                                   struct anv_shader_bin *bin)
395 {
396    if (!cache->cache)
397       return;
398 
399    anv_cache_lock(cache);
400 
401    struct hash_entry *entry = _mesa_hash_table_search(cache->cache, bin->key);
402    if (entry == NULL) {
403       /* Take a reference for the cache */
404       anv_shader_bin_ref(bin);
405       _mesa_hash_table_insert(cache->cache, bin->key, bin);
406    }
407 
408    anv_cache_unlock(cache);
409 }
410 
411 static struct anv_shader_bin *
anv_pipeline_cache_add_shader_locked(struct anv_pipeline_cache * cache,gl_shader_stage stage,const void * key_data,uint32_t key_size,const void * kernel_data,uint32_t kernel_size,const struct brw_stage_prog_data * prog_data,uint32_t prog_data_size,const struct brw_compile_stats * stats,uint32_t num_stats,const nir_xfb_info * xfb_info,const struct anv_pipeline_bind_map * bind_map)412 anv_pipeline_cache_add_shader_locked(struct anv_pipeline_cache *cache,
413                                      gl_shader_stage stage,
414                                      const void *key_data, uint32_t key_size,
415                                      const void *kernel_data,
416                                      uint32_t kernel_size,
417                                      const struct brw_stage_prog_data *prog_data,
418                                      uint32_t prog_data_size,
419                                      const struct brw_compile_stats *stats,
420                                      uint32_t num_stats,
421                                      const nir_xfb_info *xfb_info,
422                                      const struct anv_pipeline_bind_map *bind_map)
423 {
424    struct anv_shader_bin *shader =
425       anv_pipeline_cache_search_locked(cache, key_data, key_size);
426    if (shader)
427       return shader;
428 
429    struct anv_shader_bin *bin =
430       anv_shader_bin_create(cache->device, stage,
431                             key_data, key_size,
432                             kernel_data, kernel_size,
433                             prog_data, prog_data_size,
434                             stats, num_stats, xfb_info, bind_map);
435    if (!bin)
436       return NULL;
437 
438    _mesa_hash_table_insert(cache->cache, bin->key, bin);
439 
440    return bin;
441 }
442 
443 struct anv_shader_bin *
anv_pipeline_cache_upload_kernel(struct anv_pipeline_cache * cache,gl_shader_stage stage,const void * key_data,uint32_t key_size,const void * kernel_data,uint32_t kernel_size,const struct brw_stage_prog_data * prog_data,uint32_t prog_data_size,const struct brw_compile_stats * stats,uint32_t num_stats,const nir_xfb_info * xfb_info,const struct anv_pipeline_bind_map * bind_map)444 anv_pipeline_cache_upload_kernel(struct anv_pipeline_cache *cache,
445                                  gl_shader_stage stage,
446                                  const void *key_data, uint32_t key_size,
447                                  const void *kernel_data, uint32_t kernel_size,
448                                  const struct brw_stage_prog_data *prog_data,
449                                  uint32_t prog_data_size,
450                                  const struct brw_compile_stats *stats,
451                                  uint32_t num_stats,
452                                  const nir_xfb_info *xfb_info,
453                                  const struct anv_pipeline_bind_map *bind_map)
454 {
455    if (cache->cache) {
456       anv_cache_lock(cache);
457 
458       struct anv_shader_bin *bin =
459          anv_pipeline_cache_add_shader_locked(cache, stage, key_data, key_size,
460                                               kernel_data, kernel_size,
461                                               prog_data, prog_data_size,
462                                               stats, num_stats,
463                                               xfb_info, bind_map);
464 
465       anv_cache_unlock(cache);
466 
467       /* We increment refcount before handing it to the caller */
468       if (bin)
469          anv_shader_bin_ref(bin);
470 
471       return bin;
472    } else {
473       /* In this case, we're not caching it so the caller owns it entirely */
474       return anv_shader_bin_create(cache->device, stage,
475                                    key_data, key_size,
476                                    kernel_data, kernel_size,
477                                    prog_data, prog_data_size,
478                                    stats, num_stats,
479                                    xfb_info, bind_map);
480    }
481 }
482 
483 static void
anv_pipeline_cache_load(struct anv_pipeline_cache * cache,const void * data,size_t size)484 anv_pipeline_cache_load(struct anv_pipeline_cache *cache,
485                         const void *data, size_t size)
486 {
487    struct anv_device *device = cache->device;
488    struct anv_physical_device *pdevice = device->physical;
489 
490    if (cache->cache == NULL)
491       return;
492 
493    struct blob_reader blob;
494    blob_reader_init(&blob, data, size);
495 
496    struct vk_pipeline_cache_header header;
497    blob_copy_bytes(&blob, &header, sizeof(header));
498    uint32_t count = blob_read_uint32(&blob);
499    if (blob.overrun)
500       return;
501 
502    if (header.header_size < sizeof(header))
503       return;
504    if (header.header_version != VK_PIPELINE_CACHE_HEADER_VERSION_ONE)
505       return;
506    if (header.vendor_id != 0x8086)
507       return;
508    if (header.device_id != device->info.chipset_id)
509       return;
510    if (memcmp(header.uuid, pdevice->pipeline_cache_uuid, VK_UUID_SIZE) != 0)
511       return;
512 
513    for (uint32_t i = 0; i < count; i++) {
514       struct anv_shader_bin *bin =
515          anv_shader_bin_create_from_blob(device, &blob);
516       if (!bin)
517          break;
518       _mesa_hash_table_insert(cache->cache, bin->key, bin);
519    }
520 }
521 
anv_CreatePipelineCache(VkDevice _device,const VkPipelineCacheCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkPipelineCache * pPipelineCache)522 VkResult anv_CreatePipelineCache(
523     VkDevice                                    _device,
524     const VkPipelineCacheCreateInfo*            pCreateInfo,
525     const VkAllocationCallbacks*                pAllocator,
526     VkPipelineCache*                            pPipelineCache)
527 {
528    ANV_FROM_HANDLE(anv_device, device, _device);
529    struct anv_pipeline_cache *cache;
530 
531    assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO);
532 
533    cache = vk_alloc2(&device->vk.alloc, pAllocator,
534                        sizeof(*cache), 8,
535                        VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
536    if (cache == NULL)
537       return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
538 
539    anv_pipeline_cache_init(cache, device,
540                            device->physical->instance->pipeline_cache_enabled,
541                            pCreateInfo->flags & VK_PIPELINE_CACHE_CREATE_EXTERNALLY_SYNCHRONIZED_BIT_EXT);
542 
543    if (pCreateInfo->initialDataSize > 0)
544       anv_pipeline_cache_load(cache,
545                               pCreateInfo->pInitialData,
546                               pCreateInfo->initialDataSize);
547 
548    *pPipelineCache = anv_pipeline_cache_to_handle(cache);
549 
550    return VK_SUCCESS;
551 }
552 
anv_DestroyPipelineCache(VkDevice _device,VkPipelineCache _cache,const VkAllocationCallbacks * pAllocator)553 void anv_DestroyPipelineCache(
554     VkDevice                                    _device,
555     VkPipelineCache                             _cache,
556     const VkAllocationCallbacks*                pAllocator)
557 {
558    ANV_FROM_HANDLE(anv_device, device, _device);
559    ANV_FROM_HANDLE(anv_pipeline_cache, cache, _cache);
560 
561    if (!cache)
562       return;
563 
564    anv_pipeline_cache_finish(cache);
565 
566    vk_free2(&device->vk.alloc, pAllocator, cache);
567 }
568 
anv_GetPipelineCacheData(VkDevice _device,VkPipelineCache _cache,size_t * pDataSize,void * pData)569 VkResult anv_GetPipelineCacheData(
570     VkDevice                                    _device,
571     VkPipelineCache                             _cache,
572     size_t*                                     pDataSize,
573     void*                                       pData)
574 {
575    ANV_FROM_HANDLE(anv_device, device, _device);
576    ANV_FROM_HANDLE(anv_pipeline_cache, cache, _cache);
577 
578    struct blob blob;
579    if (pData) {
580       blob_init_fixed(&blob, pData, *pDataSize);
581    } else {
582       blob_init_fixed(&blob, NULL, SIZE_MAX);
583    }
584 
585    struct vk_pipeline_cache_header header = {
586       .header_size = sizeof(struct vk_pipeline_cache_header),
587       .header_version = VK_PIPELINE_CACHE_HEADER_VERSION_ONE,
588       .vendor_id = 0x8086,
589       .device_id = device->info.chipset_id,
590    };
591    memcpy(header.uuid, device->physical->pipeline_cache_uuid, VK_UUID_SIZE);
592    blob_write_bytes(&blob, &header, sizeof(header));
593 
594    uint32_t count = 0;
595    intptr_t count_offset = blob_reserve_uint32(&blob);
596    if (count_offset < 0) {
597       *pDataSize = 0;
598       blob_finish(&blob);
599       return VK_INCOMPLETE;
600    }
601 
602    VkResult result = VK_SUCCESS;
603    if (cache->cache) {
604       hash_table_foreach(cache->cache, entry) {
605          struct anv_shader_bin *shader = entry->data;
606 
607          size_t save_size = blob.size;
608          if (!anv_shader_bin_write_to_blob(shader, &blob)) {
609             /* If it fails reset to the previous size and bail */
610             blob.size = save_size;
611             result = VK_INCOMPLETE;
612             break;
613          }
614 
615          count++;
616       }
617    }
618 
619    blob_overwrite_uint32(&blob, count_offset, count);
620 
621    *pDataSize = blob.size;
622 
623    blob_finish(&blob);
624 
625    return result;
626 }
627 
anv_MergePipelineCaches(VkDevice _device,VkPipelineCache destCache,uint32_t srcCacheCount,const VkPipelineCache * pSrcCaches)628 VkResult anv_MergePipelineCaches(
629     VkDevice                                    _device,
630     VkPipelineCache                             destCache,
631     uint32_t                                    srcCacheCount,
632     const VkPipelineCache*                      pSrcCaches)
633 {
634    ANV_FROM_HANDLE(anv_pipeline_cache, dst, destCache);
635 
636    if (!dst->cache)
637       return VK_SUCCESS;
638 
639    for (uint32_t i = 0; i < srcCacheCount; i++) {
640       ANV_FROM_HANDLE(anv_pipeline_cache, src, pSrcCaches[i]);
641       if (!src->cache)
642          continue;
643 
644       hash_table_foreach(src->cache, entry) {
645          struct anv_shader_bin *bin = entry->data;
646          assert(bin);
647 
648          if (_mesa_hash_table_search(dst->cache, bin->key))
649             continue;
650 
651          anv_shader_bin_ref(bin);
652          _mesa_hash_table_insert(dst->cache, bin->key, bin);
653       }
654    }
655 
656    return VK_SUCCESS;
657 }
658 
659 struct anv_shader_bin *
anv_device_search_for_kernel(struct anv_device * device,struct anv_pipeline_cache * cache,const void * key_data,uint32_t key_size,bool * user_cache_hit)660 anv_device_search_for_kernel(struct anv_device *device,
661                              struct anv_pipeline_cache *cache,
662                              const void *key_data, uint32_t key_size,
663                              bool *user_cache_hit)
664 {
665    struct anv_shader_bin *bin;
666 
667    *user_cache_hit = false;
668 
669    if (cache) {
670       bin = anv_pipeline_cache_search(cache, key_data, key_size);
671       if (bin) {
672          *user_cache_hit = cache != &device->default_pipeline_cache;
673          return bin;
674       }
675    }
676 
677 #ifdef ENABLE_SHADER_CACHE
678    struct disk_cache *disk_cache = device->physical->disk_cache;
679    if (disk_cache && device->physical->instance->pipeline_cache_enabled) {
680       cache_key cache_key;
681       disk_cache_compute_key(disk_cache, key_data, key_size, cache_key);
682 
683       size_t buffer_size;
684       uint8_t *buffer = disk_cache_get(disk_cache, cache_key, &buffer_size);
685       if (buffer) {
686          struct blob_reader blob;
687          blob_reader_init(&blob, buffer, buffer_size);
688          bin = anv_shader_bin_create_from_blob(device, &blob);
689          free(buffer);
690 
691          if (bin) {
692             if (cache)
693                anv_pipeline_cache_add_shader_bin(cache, bin);
694             return bin;
695          }
696       }
697    }
698 #endif
699 
700    return NULL;
701 }
702 
703 struct anv_shader_bin *
anv_device_upload_kernel(struct anv_device * device,struct anv_pipeline_cache * cache,gl_shader_stage stage,const void * key_data,uint32_t key_size,const void * kernel_data,uint32_t kernel_size,const struct brw_stage_prog_data * prog_data,uint32_t prog_data_size,const struct brw_compile_stats * stats,uint32_t num_stats,const nir_xfb_info * xfb_info,const struct anv_pipeline_bind_map * bind_map)704 anv_device_upload_kernel(struct anv_device *device,
705                          struct anv_pipeline_cache *cache,
706                          gl_shader_stage stage,
707                          const void *key_data, uint32_t key_size,
708                          const void *kernel_data, uint32_t kernel_size,
709                          const struct brw_stage_prog_data *prog_data,
710                          uint32_t prog_data_size,
711                          const struct brw_compile_stats *stats,
712                          uint32_t num_stats,
713                          const nir_xfb_info *xfb_info,
714                          const struct anv_pipeline_bind_map *bind_map)
715 {
716    struct anv_shader_bin *bin;
717    if (cache) {
718       bin = anv_pipeline_cache_upload_kernel(cache, stage, key_data, key_size,
719                                              kernel_data, kernel_size,
720                                              prog_data, prog_data_size,
721                                              stats, num_stats,
722                                              xfb_info, bind_map);
723    } else {
724       bin = anv_shader_bin_create(device, stage, key_data, key_size,
725                                   kernel_data, kernel_size,
726                                   prog_data, prog_data_size,
727                                   stats, num_stats,
728                                   xfb_info, bind_map);
729    }
730 
731    if (bin == NULL)
732       return NULL;
733 
734 #ifdef ENABLE_SHADER_CACHE
735    struct disk_cache *disk_cache = device->physical->disk_cache;
736    if (disk_cache) {
737       struct blob binary;
738       blob_init(&binary);
739       if (anv_shader_bin_write_to_blob(bin, &binary)) {
740          cache_key cache_key;
741          disk_cache_compute_key(disk_cache, key_data, key_size, cache_key);
742 
743          disk_cache_put(disk_cache, cache_key, binary.data, binary.size, NULL);
744       }
745 
746       blob_finish(&binary);
747    }
748 #endif
749 
750    return bin;
751 }
752 
753 struct serialized_nir {
754    unsigned char sha1_key[20];
755    size_t size;
756    char data[0];
757 };
758 
759 struct nir_shader *
anv_device_search_for_nir(struct anv_device * device,struct anv_pipeline_cache * cache,const nir_shader_compiler_options * nir_options,unsigned char sha1_key[20],void * mem_ctx)760 anv_device_search_for_nir(struct anv_device *device,
761                           struct anv_pipeline_cache *cache,
762                           const nir_shader_compiler_options *nir_options,
763                           unsigned char sha1_key[20],
764                           void *mem_ctx)
765 {
766    if (cache && cache->nir_cache) {
767       const struct serialized_nir *snir = NULL;
768 
769       anv_cache_lock(cache);
770       struct hash_entry *entry =
771          _mesa_hash_table_search(cache->nir_cache, sha1_key);
772       if (entry)
773          snir = entry->data;
774       anv_cache_unlock(cache);
775 
776       if (snir) {
777          struct blob_reader blob;
778          blob_reader_init(&blob, snir->data, snir->size);
779 
780          nir_shader *nir = nir_deserialize(mem_ctx, nir_options, &blob);
781          if (blob.overrun) {
782             ralloc_free(nir);
783          } else {
784             return nir;
785          }
786       }
787    }
788 
789    return NULL;
790 }
791 
792 void
anv_device_upload_nir(struct anv_device * device,struct anv_pipeline_cache * cache,const struct nir_shader * nir,unsigned char sha1_key[20])793 anv_device_upload_nir(struct anv_device *device,
794                       struct anv_pipeline_cache *cache,
795                       const struct nir_shader *nir,
796                       unsigned char sha1_key[20])
797 {
798    if (cache && cache->nir_cache) {
799       anv_cache_lock(cache);
800       struct hash_entry *entry =
801          _mesa_hash_table_search(cache->nir_cache, sha1_key);
802       anv_cache_unlock(cache);
803       if (entry)
804          return;
805 
806       struct blob blob;
807       blob_init(&blob);
808 
809       nir_serialize(&blob, nir, false);
810       if (blob.out_of_memory) {
811          blob_finish(&blob);
812          return;
813       }
814 
815       anv_cache_lock(cache);
816       /* Because ralloc isn't thread-safe, we have to do all this inside the
817        * lock.  We could unlock for the big memcpy but it's probably not worth
818        * the hassle.
819        */
820       entry = _mesa_hash_table_search(cache->nir_cache, sha1_key);
821       if (entry) {
822          blob_finish(&blob);
823          anv_cache_unlock(cache);
824          return;
825       }
826 
827       struct serialized_nir *snir =
828          ralloc_size(cache->nir_cache, sizeof(*snir) + blob.size);
829       memcpy(snir->sha1_key, sha1_key, 20);
830       snir->size = blob.size;
831       memcpy(snir->data, blob.data, blob.size);
832 
833       blob_finish(&blob);
834 
835       _mesa_hash_table_insert(cache->nir_cache, snir->sha1_key, snir);
836 
837       anv_cache_unlock(cache);
838    }
839 }
840