1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "compiler/blob.h"
25 #include "util/hash_table.h"
26 #include "util/debug.h"
27 #include "anv_private.h"
28
29 struct anv_shader_bin *
anv_shader_bin_create(struct anv_device * device,const void * key_data,uint32_t key_size,const void * kernel_data,uint32_t kernel_size,const struct brw_stage_prog_data * prog_data_in,uint32_t prog_data_size,const void * prog_data_param_in,const struct anv_pipeline_bind_map * bind_map)30 anv_shader_bin_create(struct anv_device *device,
31 const void *key_data, uint32_t key_size,
32 const void *kernel_data, uint32_t kernel_size,
33 const struct brw_stage_prog_data *prog_data_in,
34 uint32_t prog_data_size, const void *prog_data_param_in,
35 const struct anv_pipeline_bind_map *bind_map)
36 {
37 struct anv_shader_bin *shader;
38 struct anv_shader_bin_key *key;
39 struct brw_stage_prog_data *prog_data;
40 uint32_t *prog_data_param;
41 struct anv_pipeline_binding *surface_to_descriptor, *sampler_to_descriptor;
42
43 ANV_MULTIALLOC(ma);
44 anv_multialloc_add(&ma, &shader, 1);
45 anv_multialloc_add_size(&ma, &key, sizeof(*key) + key_size);
46 anv_multialloc_add_size(&ma, &prog_data, prog_data_size);
47 anv_multialloc_add(&ma, &prog_data_param, prog_data_in->nr_params);
48 anv_multialloc_add(&ma, &surface_to_descriptor,
49 bind_map->surface_count);
50 anv_multialloc_add(&ma, &sampler_to_descriptor,
51 bind_map->sampler_count);
52
53 if (!anv_multialloc_alloc(&ma, &device->alloc,
54 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE))
55 return NULL;
56
57 shader->ref_cnt = 1;
58
59 key->size = key_size;
60 memcpy(key->data, key_data, key_size);
61 shader->key = key;
62
63 shader->kernel =
64 anv_state_pool_alloc(&device->instruction_state_pool, kernel_size, 64);
65 memcpy(shader->kernel.map, kernel_data, kernel_size);
66 shader->kernel_size = kernel_size;
67
68 memcpy(prog_data, prog_data_in, prog_data_size);
69 memcpy(prog_data_param, prog_data_param_in,
70 prog_data->nr_params * sizeof(*prog_data_param));
71 prog_data->param = prog_data_param;
72 shader->prog_data = prog_data;
73 shader->prog_data_size = prog_data_size;
74
75 shader->bind_map = *bind_map;
76 typed_memcpy(surface_to_descriptor, bind_map->surface_to_descriptor,
77 bind_map->surface_count);
78 shader->bind_map.surface_to_descriptor = surface_to_descriptor;
79 typed_memcpy(sampler_to_descriptor, bind_map->sampler_to_descriptor,
80 bind_map->sampler_count);
81 shader->bind_map.sampler_to_descriptor = sampler_to_descriptor;
82
83 return shader;
84 }
85
86 void
anv_shader_bin_destroy(struct anv_device * device,struct anv_shader_bin * shader)87 anv_shader_bin_destroy(struct anv_device *device,
88 struct anv_shader_bin *shader)
89 {
90 assert(shader->ref_cnt == 0);
91 anv_state_pool_free(&device->instruction_state_pool, shader->kernel);
92 vk_free(&device->alloc, shader);
93 }
94
95 static bool
anv_shader_bin_write_to_blob(const struct anv_shader_bin * shader,struct blob * blob)96 anv_shader_bin_write_to_blob(const struct anv_shader_bin *shader,
97 struct blob *blob)
98 {
99 bool ok;
100
101 ok = blob_write_uint32(blob, shader->key->size);
102 ok = blob_write_bytes(blob, shader->key->data, shader->key->size);
103
104 ok = blob_write_uint32(blob, shader->kernel_size);
105 ok = blob_write_bytes(blob, shader->kernel.map, shader->kernel_size);
106
107 ok = blob_write_uint32(blob, shader->prog_data_size);
108 ok = blob_write_bytes(blob, shader->prog_data, shader->prog_data_size);
109 ok = blob_write_bytes(blob, shader->prog_data->param,
110 shader->prog_data->nr_params *
111 sizeof(*shader->prog_data->param));
112
113 ok = blob_write_uint32(blob, shader->bind_map.surface_count);
114 ok = blob_write_uint32(blob, shader->bind_map.sampler_count);
115 ok = blob_write_uint32(blob, shader->bind_map.image_count);
116 ok = blob_write_bytes(blob, shader->bind_map.surface_to_descriptor,
117 shader->bind_map.surface_count *
118 sizeof(*shader->bind_map.surface_to_descriptor));
119 ok = blob_write_bytes(blob, shader->bind_map.sampler_to_descriptor,
120 shader->bind_map.sampler_count *
121 sizeof(*shader->bind_map.sampler_to_descriptor));
122
123 return ok;
124 }
125
126 static struct anv_shader_bin *
anv_shader_bin_create_from_blob(struct anv_device * device,struct blob_reader * blob)127 anv_shader_bin_create_from_blob(struct anv_device *device,
128 struct blob_reader *blob)
129 {
130 uint32_t key_size = blob_read_uint32(blob);
131 const void *key_data = blob_read_bytes(blob, key_size);
132
133 uint32_t kernel_size = blob_read_uint32(blob);
134 const void *kernel_data = blob_read_bytes(blob, kernel_size);
135
136 uint32_t prog_data_size = blob_read_uint32(blob);
137 const struct brw_stage_prog_data *prog_data =
138 blob_read_bytes(blob, prog_data_size);
139 if (blob->overrun)
140 return NULL;
141 const void *prog_data_param =
142 blob_read_bytes(blob, prog_data->nr_params * sizeof(*prog_data->param));
143
144 struct anv_pipeline_bind_map bind_map;
145 bind_map.surface_count = blob_read_uint32(blob);
146 bind_map.sampler_count = blob_read_uint32(blob);
147 bind_map.image_count = blob_read_uint32(blob);
148 bind_map.surface_to_descriptor = (void *)
149 blob_read_bytes(blob, bind_map.surface_count *
150 sizeof(*bind_map.surface_to_descriptor));
151 bind_map.sampler_to_descriptor = (void *)
152 blob_read_bytes(blob, bind_map.sampler_count *
153 sizeof(*bind_map.sampler_to_descriptor));
154
155 if (blob->overrun)
156 return NULL;
157
158 return anv_shader_bin_create(device,
159 key_data, key_size,
160 kernel_data, kernel_size,
161 prog_data, prog_data_size, prog_data_param,
162 &bind_map);
163 }
164
165 /* Remaining work:
166 *
167 * - Compact binding table layout so it's tight and not dependent on
168 * descriptor set layout.
169 *
170 * - Review prog_data struct for size and cacheability: struct
171 * brw_stage_prog_data has binding_table which uses a lot of uint32_t for 8
172 * bit quantities etc; use bit fields for all bools, eg dual_src_blend.
173 */
174
175 static uint32_t
shader_bin_key_hash_func(const void * void_key)176 shader_bin_key_hash_func(const void *void_key)
177 {
178 const struct anv_shader_bin_key *key = void_key;
179 return _mesa_hash_data(key->data, key->size);
180 }
181
182 static bool
shader_bin_key_compare_func(const void * void_a,const void * void_b)183 shader_bin_key_compare_func(const void *void_a, const void *void_b)
184 {
185 const struct anv_shader_bin_key *a = void_a, *b = void_b;
186 if (a->size != b->size)
187 return false;
188
189 return memcmp(a->data, b->data, a->size) == 0;
190 }
191
192 void
anv_pipeline_cache_init(struct anv_pipeline_cache * cache,struct anv_device * device,bool cache_enabled)193 anv_pipeline_cache_init(struct anv_pipeline_cache *cache,
194 struct anv_device *device,
195 bool cache_enabled)
196 {
197 cache->device = device;
198 pthread_mutex_init(&cache->mutex, NULL);
199
200 if (cache_enabled) {
201 cache->cache = _mesa_hash_table_create(NULL, shader_bin_key_hash_func,
202 shader_bin_key_compare_func);
203 } else {
204 cache->cache = NULL;
205 }
206 }
207
208 void
anv_pipeline_cache_finish(struct anv_pipeline_cache * cache)209 anv_pipeline_cache_finish(struct anv_pipeline_cache *cache)
210 {
211 pthread_mutex_destroy(&cache->mutex);
212
213 if (cache->cache) {
214 /* This is a bit unfortunate. In order to keep things from randomly
215 * going away, the shader cache has to hold a reference to all shader
216 * binaries it contains. We unref them when we destroy the cache.
217 */
218 struct hash_entry *entry;
219 hash_table_foreach(cache->cache, entry)
220 anv_shader_bin_unref(cache->device, entry->data);
221
222 _mesa_hash_table_destroy(cache->cache, NULL);
223 }
224 }
225
226 static struct anv_shader_bin *
anv_pipeline_cache_search_locked(struct anv_pipeline_cache * cache,const void * key_data,uint32_t key_size)227 anv_pipeline_cache_search_locked(struct anv_pipeline_cache *cache,
228 const void *key_data, uint32_t key_size)
229 {
230 uint32_t vla[1 + DIV_ROUND_UP(key_size, sizeof(uint32_t))];
231 struct anv_shader_bin_key *key = (void *)vla;
232 key->size = key_size;
233 memcpy(key->data, key_data, key_size);
234
235 struct hash_entry *entry = _mesa_hash_table_search(cache->cache, key);
236 if (entry)
237 return entry->data;
238 else
239 return NULL;
240 }
241
242 struct anv_shader_bin *
anv_pipeline_cache_search(struct anv_pipeline_cache * cache,const void * key_data,uint32_t key_size)243 anv_pipeline_cache_search(struct anv_pipeline_cache *cache,
244 const void *key_data, uint32_t key_size)
245 {
246 if (!cache->cache)
247 return NULL;
248
249 pthread_mutex_lock(&cache->mutex);
250
251 struct anv_shader_bin *shader =
252 anv_pipeline_cache_search_locked(cache, key_data, key_size);
253
254 pthread_mutex_unlock(&cache->mutex);
255
256 /* We increment refcount before handing it to the caller */
257 if (shader)
258 anv_shader_bin_ref(shader);
259
260 return shader;
261 }
262
263 static struct anv_shader_bin *
anv_pipeline_cache_add_shader(struct anv_pipeline_cache * cache,const void * key_data,uint32_t key_size,const void * kernel_data,uint32_t kernel_size,const struct brw_stage_prog_data * prog_data,uint32_t prog_data_size,const void * prog_data_param,const struct anv_pipeline_bind_map * bind_map)264 anv_pipeline_cache_add_shader(struct anv_pipeline_cache *cache,
265 const void *key_data, uint32_t key_size,
266 const void *kernel_data, uint32_t kernel_size,
267 const struct brw_stage_prog_data *prog_data,
268 uint32_t prog_data_size,
269 const void *prog_data_param,
270 const struct anv_pipeline_bind_map *bind_map)
271 {
272 struct anv_shader_bin *shader =
273 anv_pipeline_cache_search_locked(cache, key_data, key_size);
274 if (shader)
275 return shader;
276
277 struct anv_shader_bin *bin =
278 anv_shader_bin_create(cache->device, key_data, key_size,
279 kernel_data, kernel_size,
280 prog_data, prog_data_size, prog_data_param,
281 bind_map);
282 if (!bin)
283 return NULL;
284
285 _mesa_hash_table_insert(cache->cache, bin->key, bin);
286
287 return bin;
288 }
289
290 struct anv_shader_bin *
anv_pipeline_cache_upload_kernel(struct anv_pipeline_cache * cache,const void * key_data,uint32_t key_size,const void * kernel_data,uint32_t kernel_size,const struct brw_stage_prog_data * prog_data,uint32_t prog_data_size,const struct anv_pipeline_bind_map * bind_map)291 anv_pipeline_cache_upload_kernel(struct anv_pipeline_cache *cache,
292 const void *key_data, uint32_t key_size,
293 const void *kernel_data, uint32_t kernel_size,
294 const struct brw_stage_prog_data *prog_data,
295 uint32_t prog_data_size,
296 const struct anv_pipeline_bind_map *bind_map)
297 {
298 if (cache->cache) {
299 pthread_mutex_lock(&cache->mutex);
300
301 struct anv_shader_bin *bin =
302 anv_pipeline_cache_add_shader(cache, key_data, key_size,
303 kernel_data, kernel_size,
304 prog_data, prog_data_size,
305 prog_data->param, bind_map);
306
307 pthread_mutex_unlock(&cache->mutex);
308
309 /* We increment refcount before handing it to the caller */
310 if (bin)
311 anv_shader_bin_ref(bin);
312
313 return bin;
314 } else {
315 /* In this case, we're not caching it so the caller owns it entirely */
316 return anv_shader_bin_create(cache->device, key_data, key_size,
317 kernel_data, kernel_size,
318 prog_data, prog_data_size,
319 prog_data->param, bind_map);
320 }
321 }
322
323 struct cache_header {
324 uint32_t header_size;
325 uint32_t header_version;
326 uint32_t vendor_id;
327 uint32_t device_id;
328 uint8_t uuid[VK_UUID_SIZE];
329 };
330
331 static void
anv_pipeline_cache_load(struct anv_pipeline_cache * cache,const void * data,size_t size)332 anv_pipeline_cache_load(struct anv_pipeline_cache *cache,
333 const void *data, size_t size)
334 {
335 struct anv_device *device = cache->device;
336 struct anv_physical_device *pdevice = &device->instance->physicalDevice;
337
338 if (cache->cache == NULL)
339 return;
340
341 struct blob_reader blob;
342 blob_reader_init(&blob, data, size);
343
344 struct cache_header header;
345 blob_copy_bytes(&blob, &header, sizeof(header));
346 uint32_t count = blob_read_uint32(&blob);
347 if (blob.overrun)
348 return;
349
350 if (header.header_size < sizeof(header))
351 return;
352 if (header.header_version != VK_PIPELINE_CACHE_HEADER_VERSION_ONE)
353 return;
354 if (header.vendor_id != 0x8086)
355 return;
356 if (header.device_id != device->chipset_id)
357 return;
358 if (memcmp(header.uuid, pdevice->pipeline_cache_uuid, VK_UUID_SIZE) != 0)
359 return;
360
361 for (uint32_t i = 0; i < count; i++) {
362 struct anv_shader_bin *bin =
363 anv_shader_bin_create_from_blob(device, &blob);
364 if (!bin)
365 break;
366 _mesa_hash_table_insert(cache->cache, bin->key, bin);
367 }
368 }
369
370 static bool
pipeline_cache_enabled()371 pipeline_cache_enabled()
372 {
373 static int enabled = -1;
374 if (enabled < 0)
375 enabled = env_var_as_boolean("ANV_ENABLE_PIPELINE_CACHE", true);
376 return enabled;
377 }
378
anv_CreatePipelineCache(VkDevice _device,const VkPipelineCacheCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkPipelineCache * pPipelineCache)379 VkResult anv_CreatePipelineCache(
380 VkDevice _device,
381 const VkPipelineCacheCreateInfo* pCreateInfo,
382 const VkAllocationCallbacks* pAllocator,
383 VkPipelineCache* pPipelineCache)
384 {
385 ANV_FROM_HANDLE(anv_device, device, _device);
386 struct anv_pipeline_cache *cache;
387
388 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO);
389 assert(pCreateInfo->flags == 0);
390
391 cache = vk_alloc2(&device->alloc, pAllocator,
392 sizeof(*cache), 8,
393 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
394 if (cache == NULL)
395 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
396
397 anv_pipeline_cache_init(cache, device, pipeline_cache_enabled());
398
399 if (pCreateInfo->initialDataSize > 0)
400 anv_pipeline_cache_load(cache,
401 pCreateInfo->pInitialData,
402 pCreateInfo->initialDataSize);
403
404 *pPipelineCache = anv_pipeline_cache_to_handle(cache);
405
406 return VK_SUCCESS;
407 }
408
anv_DestroyPipelineCache(VkDevice _device,VkPipelineCache _cache,const VkAllocationCallbacks * pAllocator)409 void anv_DestroyPipelineCache(
410 VkDevice _device,
411 VkPipelineCache _cache,
412 const VkAllocationCallbacks* pAllocator)
413 {
414 ANV_FROM_HANDLE(anv_device, device, _device);
415 ANV_FROM_HANDLE(anv_pipeline_cache, cache, _cache);
416
417 if (!cache)
418 return;
419
420 anv_pipeline_cache_finish(cache);
421
422 vk_free2(&device->alloc, pAllocator, cache);
423 }
424
anv_GetPipelineCacheData(VkDevice _device,VkPipelineCache _cache,size_t * pDataSize,void * pData)425 VkResult anv_GetPipelineCacheData(
426 VkDevice _device,
427 VkPipelineCache _cache,
428 size_t* pDataSize,
429 void* pData)
430 {
431 ANV_FROM_HANDLE(anv_device, device, _device);
432 ANV_FROM_HANDLE(anv_pipeline_cache, cache, _cache);
433 struct anv_physical_device *pdevice = &device->instance->physicalDevice;
434
435 struct blob blob;
436 if (pData) {
437 blob_init_fixed(&blob, pData, *pDataSize);
438 } else {
439 blob_init_fixed(&blob, NULL, SIZE_MAX);
440 }
441
442 struct cache_header header = {
443 .header_size = sizeof(struct cache_header),
444 .header_version = VK_PIPELINE_CACHE_HEADER_VERSION_ONE,
445 .vendor_id = 0x8086,
446 .device_id = device->chipset_id,
447 };
448 memcpy(header.uuid, pdevice->pipeline_cache_uuid, VK_UUID_SIZE);
449 blob_write_bytes(&blob, &header, sizeof(header));
450
451 uint32_t count = 0;
452 intptr_t count_offset = blob_reserve_uint32(&blob);
453 if (count_offset < 0) {
454 *pDataSize = 0;
455 blob_finish(&blob);
456 return VK_INCOMPLETE;
457 }
458
459 VkResult result = VK_SUCCESS;
460 if (cache->cache) {
461 struct hash_entry *entry;
462 hash_table_foreach(cache->cache, entry) {
463 struct anv_shader_bin *shader = entry->data;
464
465 size_t save_size = blob.size;
466 if (!anv_shader_bin_write_to_blob(shader, &blob)) {
467 /* If it fails reset to the previous size and bail */
468 blob.size = save_size;
469 result = VK_INCOMPLETE;
470 break;
471 }
472
473 count++;
474 }
475 }
476
477 blob_overwrite_uint32(&blob, count_offset, count);
478
479 *pDataSize = blob.size;
480
481 blob_finish(&blob);
482
483 return result;
484 }
485
anv_MergePipelineCaches(VkDevice _device,VkPipelineCache destCache,uint32_t srcCacheCount,const VkPipelineCache * pSrcCaches)486 VkResult anv_MergePipelineCaches(
487 VkDevice _device,
488 VkPipelineCache destCache,
489 uint32_t srcCacheCount,
490 const VkPipelineCache* pSrcCaches)
491 {
492 ANV_FROM_HANDLE(anv_pipeline_cache, dst, destCache);
493
494 if (!dst->cache)
495 return VK_SUCCESS;
496
497 for (uint32_t i = 0; i < srcCacheCount; i++) {
498 ANV_FROM_HANDLE(anv_pipeline_cache, src, pSrcCaches[i]);
499 if (!src->cache)
500 continue;
501
502 struct hash_entry *entry;
503 hash_table_foreach(src->cache, entry) {
504 struct anv_shader_bin *bin = entry->data;
505 assert(bin);
506
507 if (_mesa_hash_table_search(dst->cache, bin->key))
508 continue;
509
510 anv_shader_bin_ref(bin);
511 _mesa_hash_table_insert(dst->cache, bin->key, bin);
512 }
513 }
514
515 return VK_SUCCESS;
516 }
517