• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <sys/mman.h>
28 #include <sys/sysinfo.h>
29 #include <unistd.h>
30 #include <fcntl.h>
31 #include <xf86drm.h>
32 #include <drm_fourcc.h>
33 
34 #include "anv_private.h"
35 #include "util/strtod.h"
36 #include "util/debug.h"
37 #include "util/build_id.h"
38 #include "util/mesa-sha1.h"
39 #include "vk_util.h"
40 
41 #include "genxml/gen7_pack.h"
42 
43 static void
compiler_debug_log(void * data,const char * fmt,...)44 compiler_debug_log(void *data, const char *fmt, ...)
45 { }
46 
47 static void
compiler_perf_log(void * data,const char * fmt,...)48 compiler_perf_log(void *data, const char *fmt, ...)
49 {
50    va_list args;
51    va_start(args, fmt);
52 
53    if (unlikely(INTEL_DEBUG & DEBUG_PERF))
54       intel_logd_v(fmt, args);
55 
56    va_end(args);
57 }
58 
59 static VkResult
anv_compute_heap_size(int fd,uint64_t * heap_size)60 anv_compute_heap_size(int fd, uint64_t *heap_size)
61 {
62    uint64_t gtt_size;
63    if (anv_gem_get_context_param(fd, 0, I915_CONTEXT_PARAM_GTT_SIZE,
64                                  &gtt_size) == -1) {
65       /* If, for whatever reason, we can't actually get the GTT size from the
66        * kernel (too old?) fall back to the aperture size.
67        */
68       anv_perf_warn(NULL, NULL,
69                     "Failed to get I915_CONTEXT_PARAM_GTT_SIZE: %m");
70 
71       if (anv_gem_get_aperture(fd, &gtt_size) == -1) {
72          return vk_errorf(NULL, NULL, VK_ERROR_INITIALIZATION_FAILED,
73                           "failed to get aperture size: %m");
74       }
75    }
76 
77    /* Query the total ram from the system */
78    struct sysinfo info;
79    sysinfo(&info);
80 
81    uint64_t total_ram = (uint64_t)info.totalram * (uint64_t)info.mem_unit;
82 
83    /* We don't want to burn too much ram with the GPU.  If the user has 4GiB
84     * or less, we use at most half.  If they have more than 4GiB, we use 3/4.
85     */
86    uint64_t available_ram;
87    if (total_ram <= 4ull * 1024ull * 1024ull * 1024ull)
88       available_ram = total_ram / 2;
89    else
90       available_ram = total_ram * 3 / 4;
91 
92    /* We also want to leave some padding for things we allocate in the driver,
93     * so don't go over 3/4 of the GTT either.
94     */
95    uint64_t available_gtt = gtt_size * 3 / 4;
96 
97    *heap_size = MIN2(available_ram, available_gtt);
98 
99    return VK_SUCCESS;
100 }
101 
102 static VkResult
anv_physical_device_init_heaps(struct anv_physical_device * device,int fd)103 anv_physical_device_init_heaps(struct anv_physical_device *device, int fd)
104 {
105    /* The kernel query only tells us whether or not the kernel supports the
106     * EXEC_OBJECT_SUPPORTS_48B_ADDRESS flag and not whether or not the
107     * hardware has actual 48bit address support.
108     */
109    device->supports_48bit_addresses =
110       (device->info.gen >= 8) && anv_gem_supports_48b_addresses(fd);
111 
112    uint64_t heap_size;
113    VkResult result = anv_compute_heap_size(fd, &heap_size);
114    if (result != VK_SUCCESS)
115       return result;
116 
117    if (heap_size > (2ull << 30) && !device->supports_48bit_addresses) {
118       /* When running with an overridden PCI ID, we may get a GTT size from
119        * the kernel that is greater than 2 GiB but the execbuf check for 48bit
120        * address support can still fail.  Just clamp the address space size to
121        * 2 GiB if we don't have 48-bit support.
122        */
123       intel_logw("%s:%d: The kernel reported a GTT size larger than 2 GiB but "
124                         "not support for 48-bit addresses",
125                         __FILE__, __LINE__);
126       heap_size = 2ull << 30;
127    }
128 
129    if (heap_size <= 3ull * (1ull << 30)) {
130       /* In this case, everything fits nicely into the 32-bit address space,
131        * so there's no need for supporting 48bit addresses on client-allocated
132        * memory objects.
133        */
134       device->memory.heap_count = 1;
135       device->memory.heaps[0] = (struct anv_memory_heap) {
136          .size = heap_size,
137          .flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT,
138          .supports_48bit_addresses = false,
139       };
140    } else {
141       /* Not everything will fit nicely into a 32-bit address space.  In this
142        * case we need a 64-bit heap.  Advertise a small 32-bit heap and a
143        * larger 48-bit heap.  If we're in this case, then we have a total heap
144        * size larger than 3GiB which most likely means they have 8 GiB of
145        * video memory and so carving off 1 GiB for the 32-bit heap should be
146        * reasonable.
147        */
148       const uint64_t heap_size_32bit = 1ull << 30;
149       const uint64_t heap_size_48bit = heap_size - heap_size_32bit;
150 
151       assert(device->supports_48bit_addresses);
152 
153       device->memory.heap_count = 2;
154       device->memory.heaps[0] = (struct anv_memory_heap) {
155          .size = heap_size_48bit,
156          .flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT,
157          .supports_48bit_addresses = true,
158       };
159       device->memory.heaps[1] = (struct anv_memory_heap) {
160          .size = heap_size_32bit,
161          .flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT,
162          .supports_48bit_addresses = false,
163       };
164    }
165 
166    uint32_t type_count = 0;
167    for (uint32_t heap = 0; heap < device->memory.heap_count; heap++) {
168       uint32_t valid_buffer_usage = ~0;
169 
170       /* There appears to be a hardware issue in the VF cache where it only
171        * considers the bottom 32 bits of memory addresses.  If you happen to
172        * have two vertex buffers which get placed exactly 4 GiB apart and use
173        * them in back-to-back draw calls, you can get collisions.  In order to
174        * solve this problem, we require vertex and index buffers be bound to
175        * memory allocated out of the 32-bit heap.
176        */
177       if (device->memory.heaps[heap].supports_48bit_addresses) {
178          valid_buffer_usage &= ~(VK_BUFFER_USAGE_INDEX_BUFFER_BIT |
179                                  VK_BUFFER_USAGE_VERTEX_BUFFER_BIT);
180       }
181 
182       if (device->info.has_llc) {
183          /* Big core GPUs share LLC with the CPU and thus one memory type can be
184           * both cached and coherent at the same time.
185           */
186          device->memory.types[type_count++] = (struct anv_memory_type) {
187             .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
188                              VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
189                              VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
190                              VK_MEMORY_PROPERTY_HOST_CACHED_BIT,
191             .heapIndex = heap,
192             .valid_buffer_usage = valid_buffer_usage,
193          };
194       } else {
195          /* The spec requires that we expose a host-visible, coherent memory
196           * type, but Atom GPUs don't share LLC. Thus we offer two memory types
197           * to give the application a choice between cached, but not coherent and
198           * coherent but uncached (WC though).
199           */
200          device->memory.types[type_count++] = (struct anv_memory_type) {
201             .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
202                              VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
203                              VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
204             .heapIndex = heap,
205             .valid_buffer_usage = valid_buffer_usage,
206          };
207          device->memory.types[type_count++] = (struct anv_memory_type) {
208             .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
209                              VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
210                              VK_MEMORY_PROPERTY_HOST_CACHED_BIT,
211             .heapIndex = heap,
212             .valid_buffer_usage = valid_buffer_usage,
213          };
214       }
215    }
216    device->memory.type_count = type_count;
217 
218    return VK_SUCCESS;
219 }
220 
221 static VkResult
anv_physical_device_init_uuids(struct anv_physical_device * device)222 anv_physical_device_init_uuids(struct anv_physical_device *device)
223 {
224    const struct build_id_note *note =
225       build_id_find_nhdr_for_addr(anv_physical_device_init_uuids);
226    if (!note) {
227       return vk_errorf(device->instance, device,
228                        VK_ERROR_INITIALIZATION_FAILED,
229                        "Failed to find build-id");
230    }
231 
232    unsigned build_id_len = build_id_length(note);
233    if (build_id_len < 20) {
234       return vk_errorf(device->instance, device,
235                        VK_ERROR_INITIALIZATION_FAILED,
236                        "build-id too short.  It needs to be a SHA");
237    }
238 
239    struct mesa_sha1 sha1_ctx;
240    uint8_t sha1[20];
241    STATIC_ASSERT(VK_UUID_SIZE <= sizeof(sha1));
242 
243    /* The pipeline cache UUID is used for determining when a pipeline cache is
244     * invalid.  It needs both a driver build and the PCI ID of the device.
245     */
246    _mesa_sha1_init(&sha1_ctx);
247    _mesa_sha1_update(&sha1_ctx, build_id_data(note), build_id_len);
248    _mesa_sha1_update(&sha1_ctx, &device->chipset_id,
249                      sizeof(device->chipset_id));
250    _mesa_sha1_final(&sha1_ctx, sha1);
251    memcpy(device->pipeline_cache_uuid, sha1, VK_UUID_SIZE);
252 
253    /* The driver UUID is used for determining sharability of images and memory
254     * between two Vulkan instances in separate processes.  People who want to
255     * share memory need to also check the device UUID (below) so all this
256     * needs to be is the build-id.
257     */
258    memcpy(device->driver_uuid, build_id_data(note), VK_UUID_SIZE);
259 
260    /* The device UUID uniquely identifies the given device within the machine.
261     * Since we never have more than one device, this doesn't need to be a real
262     * UUID.  However, on the off-chance that someone tries to use this to
263     * cache pre-tiled images or something of the like, we use the PCI ID and
264     * some bits of ISL info to ensure that this is safe.
265     */
266    _mesa_sha1_init(&sha1_ctx);
267    _mesa_sha1_update(&sha1_ctx, &device->chipset_id,
268                      sizeof(device->chipset_id));
269    _mesa_sha1_update(&sha1_ctx, &device->isl_dev.has_bit6_swizzling,
270                      sizeof(device->isl_dev.has_bit6_swizzling));
271    _mesa_sha1_final(&sha1_ctx, sha1);
272    memcpy(device->device_uuid, sha1, VK_UUID_SIZE);
273 
274    return VK_SUCCESS;
275 }
276 
277 static VkResult
anv_physical_device_init(struct anv_physical_device * device,struct anv_instance * instance,const char * path)278 anv_physical_device_init(struct anv_physical_device *device,
279                          struct anv_instance *instance,
280                          const char *path)
281 {
282    VkResult result;
283    int fd;
284 
285    brw_process_intel_debug_variable();
286 
287    fd = open(path, O_RDWR | O_CLOEXEC);
288    if (fd < 0)
289       return vk_error(VK_ERROR_INCOMPATIBLE_DRIVER);
290 
291    device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
292    device->instance = instance;
293 
294    assert(strlen(path) < ARRAY_SIZE(device->path));
295    strncpy(device->path, path, ARRAY_SIZE(device->path));
296 
297    device->chipset_id = anv_gem_get_param(fd, I915_PARAM_CHIPSET_ID);
298    if (!device->chipset_id) {
299       result = vk_error(VK_ERROR_INCOMPATIBLE_DRIVER);
300       goto fail;
301    }
302 
303    device->name = gen_get_device_name(device->chipset_id);
304    if (!gen_get_device_info(device->chipset_id, &device->info)) {
305       result = vk_error(VK_ERROR_INCOMPATIBLE_DRIVER);
306       goto fail;
307    }
308 
309    if (device->info.is_haswell) {
310       intel_logw("Haswell Vulkan support is incomplete");
311    } else if (device->info.gen == 7 && !device->info.is_baytrail) {
312       intel_logw("Ivy Bridge Vulkan support is incomplete");
313    } else if (device->info.gen == 7 && device->info.is_baytrail) {
314       intel_logw("Bay Trail Vulkan support is incomplete");
315    } else if (device->info.gen >= 8 && device->info.gen <= 10) {
316       /* Gen8-10 fully supported */
317    } else {
318       result = vk_errorf(device->instance, device,
319                          VK_ERROR_INCOMPATIBLE_DRIVER,
320                          "Vulkan not yet supported on %s", device->name);
321       goto fail;
322    }
323 
324    device->cmd_parser_version = -1;
325    if (device->info.gen == 7) {
326       device->cmd_parser_version =
327          anv_gem_get_param(fd, I915_PARAM_CMD_PARSER_VERSION);
328       if (device->cmd_parser_version == -1) {
329          result = vk_errorf(device->instance, device,
330                             VK_ERROR_INITIALIZATION_FAILED,
331                             "failed to get command parser version");
332          goto fail;
333       }
334    }
335 
336    if (!anv_gem_get_param(fd, I915_PARAM_HAS_WAIT_TIMEOUT)) {
337       result = vk_errorf(device->instance, device,
338                          VK_ERROR_INITIALIZATION_FAILED,
339                          "kernel missing gem wait");
340       goto fail;
341    }
342 
343    if (!anv_gem_get_param(fd, I915_PARAM_HAS_EXECBUF2)) {
344       result = vk_errorf(device->instance, device,
345                          VK_ERROR_INITIALIZATION_FAILED,
346                          "kernel missing execbuf2");
347       goto fail;
348    }
349 
350    if (!device->info.has_llc &&
351        anv_gem_get_param(fd, I915_PARAM_MMAP_VERSION) < 1) {
352       result = vk_errorf(device->instance, device,
353                          VK_ERROR_INITIALIZATION_FAILED,
354                          "kernel missing wc mmap");
355       goto fail;
356    }
357 
358    result = anv_physical_device_init_heaps(device, fd);
359    if (result != VK_SUCCESS)
360       goto fail;
361 
362    device->has_exec_async = anv_gem_get_param(fd, I915_PARAM_HAS_EXEC_ASYNC);
363    device->has_exec_capture = anv_gem_get_param(fd, I915_PARAM_HAS_EXEC_CAPTURE);
364    device->has_exec_fence = anv_gem_get_param(fd, I915_PARAM_HAS_EXEC_FENCE);
365    device->has_syncobj = anv_gem_get_param(fd, I915_PARAM_HAS_EXEC_FENCE_ARRAY);
366    device->has_syncobj_wait = device->has_syncobj &&
367                               anv_gem_supports_syncobj_wait(fd);
368 
369    bool swizzled = anv_gem_get_bit6_swizzle(fd, I915_TILING_X);
370 
371    /* Starting with Gen10, the timestamp frequency of the command streamer may
372     * vary from one part to another. We can query the value from the kernel.
373     */
374    if (device->info.gen >= 10) {
375       int timestamp_frequency =
376          anv_gem_get_param(fd, I915_PARAM_CS_TIMESTAMP_FREQUENCY);
377 
378       if (timestamp_frequency < 0)
379          intel_logw("Kernel 4.16-rc1+ required to properly query CS timestamp frequency");
380       else
381          device->info.timestamp_frequency = timestamp_frequency;
382    }
383 
384    /* GENs prior to 8 do not support EU/Subslice info */
385    if (device->info.gen >= 8) {
386       device->subslice_total = anv_gem_get_param(fd, I915_PARAM_SUBSLICE_TOTAL);
387       device->eu_total = anv_gem_get_param(fd, I915_PARAM_EU_TOTAL);
388 
389       /* Without this information, we cannot get the right Braswell
390        * brandstrings, and we have to use conservative numbers for GPGPU on
391        * many platforms, but otherwise, things will just work.
392        */
393       if (device->subslice_total < 1 || device->eu_total < 1) {
394          intel_logw("Kernel 4.1 required to properly query GPU properties");
395       }
396    } else if (device->info.gen == 7) {
397       device->subslice_total = 1 << (device->info.gt - 1);
398    }
399 
400    if (device->info.is_cherryview &&
401        device->subslice_total > 0 && device->eu_total > 0) {
402       /* Logical CS threads = EUs per subslice * num threads per EU */
403       uint32_t max_cs_threads =
404          device->eu_total / device->subslice_total * device->info.num_thread_per_eu;
405 
406       /* Fuse configurations may give more threads than expected, never less. */
407       if (max_cs_threads > device->info.max_cs_threads)
408          device->info.max_cs_threads = max_cs_threads;
409    }
410 
411    device->compiler = brw_compiler_create(NULL, &device->info);
412    if (device->compiler == NULL) {
413       result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
414       goto fail;
415    }
416    device->compiler->shader_debug_log = compiler_debug_log;
417    device->compiler->shader_perf_log = compiler_perf_log;
418    device->compiler->supports_pull_constants = false;
419    device->compiler->constant_buffer_0_is_relative = true;
420 
421    isl_device_init(&device->isl_dev, &device->info, swizzled);
422 
423    result = anv_physical_device_init_uuids(device);
424    if (result != VK_SUCCESS)
425       goto fail;
426 
427    result = anv_init_wsi(device);
428    if (result != VK_SUCCESS) {
429       ralloc_free(device->compiler);
430       goto fail;
431    }
432 
433    anv_physical_device_get_supported_extensions(device,
434                                                 &device->supported_extensions);
435 
436    device->local_fd = fd;
437    return VK_SUCCESS;
438 
439 fail:
440    close(fd);
441    return result;
442 }
443 
444 static void
anv_physical_device_finish(struct anv_physical_device * device)445 anv_physical_device_finish(struct anv_physical_device *device)
446 {
447    anv_finish_wsi(device);
448    ralloc_free(device->compiler);
449    close(device->local_fd);
450 }
451 
452 static void *
default_alloc_func(void * pUserData,size_t size,size_t align,VkSystemAllocationScope allocationScope)453 default_alloc_func(void *pUserData, size_t size, size_t align,
454                    VkSystemAllocationScope allocationScope)
455 {
456    return malloc(size);
457 }
458 
459 static void *
default_realloc_func(void * pUserData,void * pOriginal,size_t size,size_t align,VkSystemAllocationScope allocationScope)460 default_realloc_func(void *pUserData, void *pOriginal, size_t size,
461                      size_t align, VkSystemAllocationScope allocationScope)
462 {
463    return realloc(pOriginal, size);
464 }
465 
466 static void
default_free_func(void * pUserData,void * pMemory)467 default_free_func(void *pUserData, void *pMemory)
468 {
469    free(pMemory);
470 }
471 
472 static const VkAllocationCallbacks default_alloc = {
473    .pUserData = NULL,
474    .pfnAllocation = default_alloc_func,
475    .pfnReallocation = default_realloc_func,
476    .pfnFree = default_free_func,
477 };
478 
anv_EnumerateInstanceExtensionProperties(const char * pLayerName,uint32_t * pPropertyCount,VkExtensionProperties * pProperties)479 VkResult anv_EnumerateInstanceExtensionProperties(
480     const char*                                 pLayerName,
481     uint32_t*                                   pPropertyCount,
482     VkExtensionProperties*                      pProperties)
483 {
484    VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
485 
486    for (int i = 0; i < ANV_INSTANCE_EXTENSION_COUNT; i++) {
487       if (anv_instance_extensions_supported.extensions[i]) {
488          vk_outarray_append(&out, prop) {
489             *prop = anv_instance_extensions[i];
490          }
491       }
492    }
493 
494    return vk_outarray_status(&out);
495 }
496 
anv_CreateInstance(const VkInstanceCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkInstance * pInstance)497 VkResult anv_CreateInstance(
498     const VkInstanceCreateInfo*                 pCreateInfo,
499     const VkAllocationCallbacks*                pAllocator,
500     VkInstance*                                 pInstance)
501 {
502    struct anv_instance *instance;
503    VkResult result;
504 
505    assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
506 
507    /* Check if user passed a debug report callback to be used during
508     * Create/Destroy of instance.
509     */
510    const VkDebugReportCallbackCreateInfoEXT *ctor_cb =
511       vk_find_struct_const(pCreateInfo->pNext,
512                            DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT);
513 
514    uint32_t client_version;
515    if (pCreateInfo->pApplicationInfo &&
516        pCreateInfo->pApplicationInfo->apiVersion != 0) {
517       client_version = pCreateInfo->pApplicationInfo->apiVersion;
518    } else {
519       client_version = VK_MAKE_VERSION(1, 0, 0);
520    }
521 
522    if (VK_MAKE_VERSION(1, 0, 0) > client_version ||
523        client_version > VK_MAKE_VERSION(1, 0, 0xfff)) {
524 
525       if (ctor_cb && ctor_cb->flags & VK_DEBUG_REPORT_ERROR_BIT_EXT)
526          ctor_cb->pfnCallback(VK_DEBUG_REPORT_ERROR_BIT_EXT,
527                               VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT,
528                               VK_NULL_HANDLE, /* No handle available yet. */
529                               __LINE__,
530                               0,
531                               "anv",
532                               "incompatible driver version",
533                               ctor_cb->pUserData);
534 
535       return vk_errorf(NULL, NULL, VK_ERROR_INCOMPATIBLE_DRIVER,
536                        "Client requested version %d.%d.%d",
537                        VK_VERSION_MAJOR(client_version),
538                        VK_VERSION_MINOR(client_version),
539                        VK_VERSION_PATCH(client_version));
540    }
541 
542    struct anv_instance_extension_table enabled_extensions = {};
543    for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
544       int idx;
545       for (idx = 0; idx < ANV_INSTANCE_EXTENSION_COUNT; idx++) {
546          if (strcmp(pCreateInfo->ppEnabledExtensionNames[i],
547                     anv_instance_extensions[idx].extensionName) == 0)
548             break;
549       }
550 
551       if (idx >= ANV_INSTANCE_EXTENSION_COUNT)
552          return vk_error(VK_ERROR_EXTENSION_NOT_PRESENT);
553 
554       if (!anv_instance_extensions_supported.extensions[idx])
555          return vk_error(VK_ERROR_EXTENSION_NOT_PRESENT);
556 
557       enabled_extensions.extensions[idx] = true;
558    }
559 
560    instance = vk_alloc2(&default_alloc, pAllocator, sizeof(*instance), 8,
561                          VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
562    if (!instance)
563       return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
564 
565    instance->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
566 
567    if (pAllocator)
568       instance->alloc = *pAllocator;
569    else
570       instance->alloc = default_alloc;
571 
572    instance->apiVersion = client_version;
573    instance->enabled_extensions = enabled_extensions;
574 
575    for (unsigned i = 0; i < ARRAY_SIZE(instance->dispatch.entrypoints); i++) {
576       /* Vulkan requires that entrypoints for extensions which have not been
577        * enabled must not be advertised.
578        */
579       if (!anv_entrypoint_is_enabled(i, instance->apiVersion,
580                                      &instance->enabled_extensions, NULL)) {
581          instance->dispatch.entrypoints[i] = NULL;
582       } else if (anv_dispatch_table.entrypoints[i] != NULL) {
583          instance->dispatch.entrypoints[i] = anv_dispatch_table.entrypoints[i];
584       } else {
585          instance->dispatch.entrypoints[i] =
586             anv_tramp_dispatch_table.entrypoints[i];
587       }
588    }
589 
590    instance->physicalDeviceCount = -1;
591 
592    result = vk_debug_report_instance_init(&instance->debug_report_callbacks);
593    if (result != VK_SUCCESS) {
594       vk_free2(&default_alloc, pAllocator, instance);
595       return vk_error(result);
596    }
597 
598    _mesa_locale_init();
599 
600    VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false));
601 
602    *pInstance = anv_instance_to_handle(instance);
603 
604    return VK_SUCCESS;
605 }
606 
anv_DestroyInstance(VkInstance _instance,const VkAllocationCallbacks * pAllocator)607 void anv_DestroyInstance(
608     VkInstance                                  _instance,
609     const VkAllocationCallbacks*                pAllocator)
610 {
611    ANV_FROM_HANDLE(anv_instance, instance, _instance);
612 
613    if (!instance)
614       return;
615 
616    if (instance->physicalDeviceCount > 0) {
617       /* We support at most one physical device. */
618       assert(instance->physicalDeviceCount == 1);
619       anv_physical_device_finish(&instance->physicalDevice);
620    }
621 
622    VG(VALGRIND_DESTROY_MEMPOOL(instance));
623 
624    vk_debug_report_instance_destroy(&instance->debug_report_callbacks);
625 
626    _mesa_locale_fini();
627 
628    vk_free(&instance->alloc, instance);
629 }
630 
631 static VkResult
anv_enumerate_devices(struct anv_instance * instance)632 anv_enumerate_devices(struct anv_instance *instance)
633 {
634    /* TODO: Check for more devices ? */
635    drmDevicePtr devices[8];
636    VkResult result = VK_ERROR_INCOMPATIBLE_DRIVER;
637    int max_devices;
638 
639    instance->physicalDeviceCount = 0;
640 
641    max_devices = drmGetDevices2(0, devices, ARRAY_SIZE(devices));
642    if (max_devices < 1)
643       return VK_ERROR_INCOMPATIBLE_DRIVER;
644 
645    for (unsigned i = 0; i < (unsigned)max_devices; i++) {
646       if (devices[i]->available_nodes & 1 << DRM_NODE_RENDER &&
647           devices[i]->bustype == DRM_BUS_PCI &&
648           devices[i]->deviceinfo.pci->vendor_id == 0x8086) {
649 
650          result = anv_physical_device_init(&instance->physicalDevice,
651                         instance,
652                         devices[i]->nodes[DRM_NODE_RENDER]);
653          if (result != VK_ERROR_INCOMPATIBLE_DRIVER)
654             break;
655       }
656    }
657    drmFreeDevices(devices, max_devices);
658 
659    if (result == VK_SUCCESS)
660       instance->physicalDeviceCount = 1;
661 
662    return result;
663 }
664 
665 
anv_EnumeratePhysicalDevices(VkInstance _instance,uint32_t * pPhysicalDeviceCount,VkPhysicalDevice * pPhysicalDevices)666 VkResult anv_EnumeratePhysicalDevices(
667     VkInstance                                  _instance,
668     uint32_t*                                   pPhysicalDeviceCount,
669     VkPhysicalDevice*                           pPhysicalDevices)
670 {
671    ANV_FROM_HANDLE(anv_instance, instance, _instance);
672    VK_OUTARRAY_MAKE(out, pPhysicalDevices, pPhysicalDeviceCount);
673    VkResult result;
674 
675    if (instance->physicalDeviceCount < 0) {
676       result = anv_enumerate_devices(instance);
677       if (result != VK_SUCCESS &&
678           result != VK_ERROR_INCOMPATIBLE_DRIVER)
679          return result;
680    }
681 
682    if (instance->physicalDeviceCount > 0) {
683       assert(instance->physicalDeviceCount == 1);
684       vk_outarray_append(&out, i) {
685          *i = anv_physical_device_to_handle(&instance->physicalDevice);
686       }
687    }
688 
689    return vk_outarray_status(&out);
690 }
691 
anv_GetPhysicalDeviceFeatures(VkPhysicalDevice physicalDevice,VkPhysicalDeviceFeatures * pFeatures)692 void anv_GetPhysicalDeviceFeatures(
693     VkPhysicalDevice                            physicalDevice,
694     VkPhysicalDeviceFeatures*                   pFeatures)
695 {
696    ANV_FROM_HANDLE(anv_physical_device, pdevice, physicalDevice);
697 
698    *pFeatures = (VkPhysicalDeviceFeatures) {
699       .robustBufferAccess                       = true,
700       .fullDrawIndexUint32                      = true,
701       .imageCubeArray                           = true,
702       .independentBlend                         = true,
703       .geometryShader                           = true,
704       .tessellationShader                       = true,
705       .sampleRateShading                        = true,
706       .dualSrcBlend                             = true,
707       .logicOp                                  = true,
708       .multiDrawIndirect                        = true,
709       .drawIndirectFirstInstance                = true,
710       .depthClamp                               = true,
711       .depthBiasClamp                           = true,
712       .fillModeNonSolid                         = true,
713       .depthBounds                              = false,
714       .wideLines                                = true,
715       .largePoints                              = true,
716       .alphaToOne                               = true,
717       .multiViewport                            = true,
718       .samplerAnisotropy                        = true,
719       .textureCompressionETC2                   = pdevice->info.gen >= 8 ||
720                                                   pdevice->info.is_baytrail,
721       .textureCompressionASTC_LDR               = pdevice->info.gen >= 9, /* FINISHME CHV */
722       .textureCompressionBC                     = true,
723       .occlusionQueryPrecise                    = true,
724       .pipelineStatisticsQuery                  = true,
725       .fragmentStoresAndAtomics                 = true,
726       .shaderTessellationAndGeometryPointSize   = true,
727       .shaderImageGatherExtended                = true,
728       .shaderStorageImageExtendedFormats        = true,
729       .shaderStorageImageMultisample            = false,
730       .shaderStorageImageReadWithoutFormat      = false,
731       .shaderStorageImageWriteWithoutFormat     = true,
732       .shaderUniformBufferArrayDynamicIndexing  = true,
733       .shaderSampledImageArrayDynamicIndexing   = true,
734       .shaderStorageBufferArrayDynamicIndexing  = true,
735       .shaderStorageImageArrayDynamicIndexing   = true,
736       .shaderClipDistance                       = true,
737       .shaderCullDistance                       = true,
738       .shaderFloat64                            = pdevice->info.gen >= 8,
739       .shaderInt64                              = pdevice->info.gen >= 8,
740       .shaderInt16                              = false,
741       .shaderResourceMinLod                     = false,
742       .variableMultisampleRate                  = false,
743       .inheritedQueries                         = true,
744    };
745 
746    /* We can't do image stores in vec4 shaders */
747    pFeatures->vertexPipelineStoresAndAtomics =
748       pdevice->compiler->scalar_stage[MESA_SHADER_VERTEX] &&
749       pdevice->compiler->scalar_stage[MESA_SHADER_GEOMETRY];
750 }
751 
anv_GetPhysicalDeviceFeatures2KHR(VkPhysicalDevice physicalDevice,VkPhysicalDeviceFeatures2KHR * pFeatures)752 void anv_GetPhysicalDeviceFeatures2KHR(
753     VkPhysicalDevice                            physicalDevice,
754     VkPhysicalDeviceFeatures2KHR*               pFeatures)
755 {
756    anv_GetPhysicalDeviceFeatures(physicalDevice, &pFeatures->features);
757 
758    vk_foreach_struct(ext, pFeatures->pNext) {
759       switch (ext->sType) {
760       case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES_KHX: {
761          VkPhysicalDeviceMultiviewFeaturesKHX *features =
762             (VkPhysicalDeviceMultiviewFeaturesKHX *)ext;
763          features->multiview = true;
764          features->multiviewGeometryShader = true;
765          features->multiviewTessellationShader = true;
766          break;
767       }
768 
769       case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES_KHR: {
770          VkPhysicalDeviceVariablePointerFeaturesKHR *features = (void *)ext;
771          features->variablePointersStorageBuffer = true;
772          features->variablePointers = true;
773          break;
774       }
775 
776       case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES_KHR: {
777          VkPhysicalDeviceSamplerYcbcrConversionFeaturesKHR *features =
778             (VkPhysicalDeviceSamplerYcbcrConversionFeaturesKHR *) ext;
779          features->samplerYcbcrConversion = true;
780          break;
781       }
782 
783       case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES_KHR: {
784          VkPhysicalDevice16BitStorageFeaturesKHR *features =
785             (VkPhysicalDevice16BitStorageFeaturesKHR *)ext;
786 
787          features->storageBuffer16BitAccess = false;
788          features->uniformAndStorageBuffer16BitAccess = false;
789          features->storagePushConstant16 = false;
790          features->storageInputOutput16 = false;
791          break;
792       }
793 
794       default:
795          anv_debug_ignored_stype(ext->sType);
796          break;
797       }
798    }
799 }
800 
anv_GetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice,VkPhysicalDeviceProperties * pProperties)801 void anv_GetPhysicalDeviceProperties(
802     VkPhysicalDevice                            physicalDevice,
803     VkPhysicalDeviceProperties*                 pProperties)
804 {
805    ANV_FROM_HANDLE(anv_physical_device, pdevice, physicalDevice);
806    const struct gen_device_info *devinfo = &pdevice->info;
807 
808    /* See assertions made when programming the buffer surface state. */
809    const uint32_t max_raw_buffer_sz = devinfo->gen >= 7 ?
810                                       (1ul << 30) : (1ul << 27);
811 
812    const uint32_t max_samplers = (devinfo->gen >= 8 || devinfo->is_haswell) ?
813                                  128 : 16;
814 
815    VkSampleCountFlags sample_counts =
816       isl_device_get_sample_counts(&pdevice->isl_dev);
817 
818    VkPhysicalDeviceLimits limits = {
819       .maxImageDimension1D                      = (1 << 14),
820       .maxImageDimension2D                      = (1 << 14),
821       .maxImageDimension3D                      = (1 << 11),
822       .maxImageDimensionCube                    = (1 << 14),
823       .maxImageArrayLayers                      = (1 << 11),
824       .maxTexelBufferElements                   = 128 * 1024 * 1024,
825       .maxUniformBufferRange                    = (1ul << 27),
826       .maxStorageBufferRange                    = max_raw_buffer_sz,
827       .maxPushConstantsSize                     = MAX_PUSH_CONSTANTS_SIZE,
828       .maxMemoryAllocationCount                 = UINT32_MAX,
829       .maxSamplerAllocationCount                = 64 * 1024,
830       .bufferImageGranularity                   = 64, /* A cache line */
831       .sparseAddressSpaceSize                   = 0,
832       .maxBoundDescriptorSets                   = MAX_SETS,
833       .maxPerStageDescriptorSamplers            = max_samplers,
834       .maxPerStageDescriptorUniformBuffers      = 64,
835       .maxPerStageDescriptorStorageBuffers      = 64,
836       .maxPerStageDescriptorSampledImages       = max_samplers,
837       .maxPerStageDescriptorStorageImages       = 64,
838       .maxPerStageDescriptorInputAttachments    = 64,
839       .maxPerStageResources                     = 250,
840       .maxDescriptorSetSamplers                 = 6 * max_samplers, /* number of stages * maxPerStageDescriptorSamplers */
841       .maxDescriptorSetUniformBuffers           = 6 * 64,           /* number of stages * maxPerStageDescriptorUniformBuffers */
842       .maxDescriptorSetUniformBuffersDynamic    = MAX_DYNAMIC_BUFFERS / 2,
843       .maxDescriptorSetStorageBuffers           = 6 * 64,           /* number of stages * maxPerStageDescriptorStorageBuffers */
844       .maxDescriptorSetStorageBuffersDynamic    = MAX_DYNAMIC_BUFFERS / 2,
845       .maxDescriptorSetSampledImages            = 6 * max_samplers, /* number of stages * maxPerStageDescriptorSampledImages */
846       .maxDescriptorSetStorageImages            = 6 * 64,           /* number of stages * maxPerStageDescriptorStorageImages */
847       .maxDescriptorSetInputAttachments         = 256,
848       .maxVertexInputAttributes                 = MAX_VBS,
849       .maxVertexInputBindings                   = MAX_VBS,
850       .maxVertexInputAttributeOffset            = 2047,
851       .maxVertexInputBindingStride              = 2048,
852       .maxVertexOutputComponents                = 128,
853       .maxTessellationGenerationLevel           = 64,
854       .maxTessellationPatchSize                 = 32,
855       .maxTessellationControlPerVertexInputComponents = 128,
856       .maxTessellationControlPerVertexOutputComponents = 128,
857       .maxTessellationControlPerPatchOutputComponents = 128,
858       .maxTessellationControlTotalOutputComponents = 2048,
859       .maxTessellationEvaluationInputComponents = 128,
860       .maxTessellationEvaluationOutputComponents = 128,
861       .maxGeometryShaderInvocations             = 32,
862       .maxGeometryInputComponents               = 64,
863       .maxGeometryOutputComponents              = 128,
864       .maxGeometryOutputVertices                = 256,
865       .maxGeometryTotalOutputComponents         = 1024,
866       .maxFragmentInputComponents               = 128,
867       .maxFragmentOutputAttachments             = 8,
868       .maxFragmentDualSrcAttachments            = 1,
869       .maxFragmentCombinedOutputResources       = 8,
870       .maxComputeSharedMemorySize               = 32768,
871       .maxComputeWorkGroupCount                 = { 65535, 65535, 65535 },
872       .maxComputeWorkGroupInvocations           = 16 * devinfo->max_cs_threads,
873       .maxComputeWorkGroupSize = {
874          16 * devinfo->max_cs_threads,
875          16 * devinfo->max_cs_threads,
876          16 * devinfo->max_cs_threads,
877       },
878       .subPixelPrecisionBits                    = 4 /* FIXME */,
879       .subTexelPrecisionBits                    = 4 /* FIXME */,
880       .mipmapPrecisionBits                      = 4 /* FIXME */,
881       .maxDrawIndexedIndexValue                 = UINT32_MAX,
882       .maxDrawIndirectCount                     = UINT32_MAX,
883       .maxSamplerLodBias                        = 16,
884       .maxSamplerAnisotropy                     = 16,
885       .maxViewports                             = MAX_VIEWPORTS,
886       .maxViewportDimensions                    = { (1 << 14), (1 << 14) },
887       .viewportBoundsRange                      = { INT16_MIN, INT16_MAX },
888       .viewportSubPixelBits                     = 13, /* We take a float? */
889       .minMemoryMapAlignment                    = 4096, /* A page */
890       .minTexelBufferOffsetAlignment            = 1,
891       /* We need 16 for UBO block reads to work and 32 for push UBOs */
892       .minUniformBufferOffsetAlignment          = 32,
893       .minStorageBufferOffsetAlignment          = 4,
894       .minTexelOffset                           = -8,
895       .maxTexelOffset                           = 7,
896       .minTexelGatherOffset                     = -32,
897       .maxTexelGatherOffset                     = 31,
898       .minInterpolationOffset                   = -0.5,
899       .maxInterpolationOffset                   = 0.4375,
900       .subPixelInterpolationOffsetBits          = 4,
901       .maxFramebufferWidth                      = (1 << 14),
902       .maxFramebufferHeight                     = (1 << 14),
903       .maxFramebufferLayers                     = (1 << 11),
904       .framebufferColorSampleCounts             = sample_counts,
905       .framebufferDepthSampleCounts             = sample_counts,
906       .framebufferStencilSampleCounts           = sample_counts,
907       .framebufferNoAttachmentsSampleCounts     = sample_counts,
908       .maxColorAttachments                      = MAX_RTS,
909       .sampledImageColorSampleCounts            = sample_counts,
910       .sampledImageIntegerSampleCounts          = VK_SAMPLE_COUNT_1_BIT,
911       .sampledImageDepthSampleCounts            = sample_counts,
912       .sampledImageStencilSampleCounts          = sample_counts,
913       .storageImageSampleCounts                 = VK_SAMPLE_COUNT_1_BIT,
914       .maxSampleMaskWords                       = 1,
915       .timestampComputeAndGraphics              = false,
916       .timestampPeriod                          = 1000000000.0 / devinfo->timestamp_frequency,
917       .maxClipDistances                         = 8,
918       .maxCullDistances                         = 8,
919       .maxCombinedClipAndCullDistances          = 8,
920       .discreteQueuePriorities                  = 1,
921       .pointSizeRange                           = { 0.125, 255.875 },
922       .lineWidthRange                           = { 0.0, 7.9921875 },
923       .pointSizeGranularity                     = (1.0 / 8.0),
924       .lineWidthGranularity                     = (1.0 / 128.0),
925       .strictLines                              = false, /* FINISHME */
926       .standardSampleLocations                  = true,
927       .optimalBufferCopyOffsetAlignment         = 128,
928       .optimalBufferCopyRowPitchAlignment       = 128,
929       .nonCoherentAtomSize                      = 64,
930    };
931 
932    *pProperties = (VkPhysicalDeviceProperties) {
933       .apiVersion = anv_physical_device_api_version(pdevice),
934       .driverVersion = vk_get_driver_version(),
935       .vendorID = 0x8086,
936       .deviceID = pdevice->chipset_id,
937       .deviceType = VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU,
938       .limits = limits,
939       .sparseProperties = {0}, /* Broadwell doesn't do sparse. */
940    };
941 
942    snprintf(pProperties->deviceName, sizeof(pProperties->deviceName),
943             "%s", pdevice->name);
944    memcpy(pProperties->pipelineCacheUUID,
945           pdevice->pipeline_cache_uuid, VK_UUID_SIZE);
946 }
947 
anv_GetPhysicalDeviceProperties2KHR(VkPhysicalDevice physicalDevice,VkPhysicalDeviceProperties2KHR * pProperties)948 void anv_GetPhysicalDeviceProperties2KHR(
949     VkPhysicalDevice                            physicalDevice,
950     VkPhysicalDeviceProperties2KHR*             pProperties)
951 {
952    ANV_FROM_HANDLE(anv_physical_device, pdevice, physicalDevice);
953 
954    anv_GetPhysicalDeviceProperties(physicalDevice, &pProperties->properties);
955 
956    vk_foreach_struct(ext, pProperties->pNext) {
957       switch (ext->sType) {
958       case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR: {
959          VkPhysicalDevicePushDescriptorPropertiesKHR *properties =
960             (VkPhysicalDevicePushDescriptorPropertiesKHR *) ext;
961 
962          properties->maxPushDescriptors = MAX_PUSH_DESCRIPTORS;
963          break;
964       }
965 
966       case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES_KHR: {
967          VkPhysicalDeviceIDPropertiesKHR *id_props =
968             (VkPhysicalDeviceIDPropertiesKHR *)ext;
969          memcpy(id_props->deviceUUID, pdevice->device_uuid, VK_UUID_SIZE);
970          memcpy(id_props->driverUUID, pdevice->driver_uuid, VK_UUID_SIZE);
971          /* The LUID is for Windows. */
972          id_props->deviceLUIDValid = false;
973          break;
974       }
975 
976       case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES_KHX: {
977          VkPhysicalDeviceMultiviewPropertiesKHX *properties =
978             (VkPhysicalDeviceMultiviewPropertiesKHX *)ext;
979          properties->maxMultiviewViewCount = 16;
980          properties->maxMultiviewInstanceIndex = UINT32_MAX / 16;
981          break;
982       }
983 
984       case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES_KHR: {
985          VkPhysicalDevicePointClippingPropertiesKHR *properties =
986             (VkPhysicalDevicePointClippingPropertiesKHR *) ext;
987          properties->pointClippingBehavior = VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES_KHR;
988          anv_finishme("Implement pop-free point clipping");
989          break;
990       }
991 
992       default:
993          anv_debug_ignored_stype(ext->sType);
994          break;
995       }
996    }
997 }
998 
999 /* We support exactly one queue family. */
1000 static const VkQueueFamilyProperties
1001 anv_queue_family_properties = {
1002    .queueFlags = VK_QUEUE_GRAPHICS_BIT |
1003                  VK_QUEUE_COMPUTE_BIT |
1004                  VK_QUEUE_TRANSFER_BIT,
1005    .queueCount = 1,
1006    .timestampValidBits = 36, /* XXX: Real value here */
1007    .minImageTransferGranularity = { 1, 1, 1 },
1008 };
1009 
anv_GetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice physicalDevice,uint32_t * pCount,VkQueueFamilyProperties * pQueueFamilyProperties)1010 void anv_GetPhysicalDeviceQueueFamilyProperties(
1011     VkPhysicalDevice                            physicalDevice,
1012     uint32_t*                                   pCount,
1013     VkQueueFamilyProperties*                    pQueueFamilyProperties)
1014 {
1015    VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pCount);
1016 
1017    vk_outarray_append(&out, p) {
1018       *p = anv_queue_family_properties;
1019    }
1020 }
1021 
anv_GetPhysicalDeviceQueueFamilyProperties2KHR(VkPhysicalDevice physicalDevice,uint32_t * pQueueFamilyPropertyCount,VkQueueFamilyProperties2KHR * pQueueFamilyProperties)1022 void anv_GetPhysicalDeviceQueueFamilyProperties2KHR(
1023     VkPhysicalDevice                            physicalDevice,
1024     uint32_t*                                   pQueueFamilyPropertyCount,
1025     VkQueueFamilyProperties2KHR*                pQueueFamilyProperties)
1026 {
1027 
1028    VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pQueueFamilyPropertyCount);
1029 
1030    vk_outarray_append(&out, p) {
1031       p->queueFamilyProperties = anv_queue_family_properties;
1032 
1033       vk_foreach_struct(s, p->pNext) {
1034          anv_debug_ignored_stype(s->sType);
1035       }
1036    }
1037 }
1038 
anv_GetPhysicalDeviceMemoryProperties(VkPhysicalDevice physicalDevice,VkPhysicalDeviceMemoryProperties * pMemoryProperties)1039 void anv_GetPhysicalDeviceMemoryProperties(
1040     VkPhysicalDevice                            physicalDevice,
1041     VkPhysicalDeviceMemoryProperties*           pMemoryProperties)
1042 {
1043    ANV_FROM_HANDLE(anv_physical_device, physical_device, physicalDevice);
1044 
1045    pMemoryProperties->memoryTypeCount = physical_device->memory.type_count;
1046    for (uint32_t i = 0; i < physical_device->memory.type_count; i++) {
1047       pMemoryProperties->memoryTypes[i] = (VkMemoryType) {
1048          .propertyFlags = physical_device->memory.types[i].propertyFlags,
1049          .heapIndex     = physical_device->memory.types[i].heapIndex,
1050       };
1051    }
1052 
1053    pMemoryProperties->memoryHeapCount = physical_device->memory.heap_count;
1054    for (uint32_t i = 0; i < physical_device->memory.heap_count; i++) {
1055       pMemoryProperties->memoryHeaps[i] = (VkMemoryHeap) {
1056          .size    = physical_device->memory.heaps[i].size,
1057          .flags   = physical_device->memory.heaps[i].flags,
1058       };
1059    }
1060 }
1061 
anv_GetPhysicalDeviceMemoryProperties2KHR(VkPhysicalDevice physicalDevice,VkPhysicalDeviceMemoryProperties2KHR * pMemoryProperties)1062 void anv_GetPhysicalDeviceMemoryProperties2KHR(
1063     VkPhysicalDevice                            physicalDevice,
1064     VkPhysicalDeviceMemoryProperties2KHR*       pMemoryProperties)
1065 {
1066    anv_GetPhysicalDeviceMemoryProperties(physicalDevice,
1067                                          &pMemoryProperties->memoryProperties);
1068 
1069    vk_foreach_struct(ext, pMemoryProperties->pNext) {
1070       switch (ext->sType) {
1071       default:
1072          anv_debug_ignored_stype(ext->sType);
1073          break;
1074       }
1075    }
1076 }
1077 
anv_GetInstanceProcAddr(VkInstance _instance,const char * pName)1078 PFN_vkVoidFunction anv_GetInstanceProcAddr(
1079     VkInstance                                  _instance,
1080     const char*                                 pName)
1081 {
1082    ANV_FROM_HANDLE(anv_instance, instance, _instance);
1083 
1084    /* The Vulkan 1.0 spec for vkGetInstanceProcAddr has a table of exactly
1085     * when we have to return valid function pointers, NULL, or it's left
1086     * undefined.  See the table for exact details.
1087     */
1088    if (pName == NULL)
1089       return NULL;
1090 
1091 #define LOOKUP_ANV_ENTRYPOINT(entrypoint) \
1092    if (strcmp(pName, "vk" #entrypoint) == 0) \
1093       return (PFN_vkVoidFunction)anv_##entrypoint
1094 
1095    LOOKUP_ANV_ENTRYPOINT(EnumerateInstanceExtensionProperties);
1096    LOOKUP_ANV_ENTRYPOINT(EnumerateInstanceLayerProperties);
1097    LOOKUP_ANV_ENTRYPOINT(CreateInstance);
1098 
1099 #undef LOOKUP_ANV_ENTRYPOINT
1100 
1101    if (instance == NULL)
1102       return NULL;
1103 
1104    int idx = anv_get_entrypoint_index(pName);
1105    if (idx < 0)
1106       return NULL;
1107 
1108    return instance->dispatch.entrypoints[idx];
1109 }
1110 
1111 /* With version 1+ of the loader interface the ICD should expose
1112  * vk_icdGetInstanceProcAddr to work around certain LD_PRELOAD issues seen in apps.
1113  */
1114 PUBLIC
1115 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_icdGetInstanceProcAddr(
1116     VkInstance                                  instance,
1117     const char*                                 pName);
1118 
1119 PUBLIC
vk_icdGetInstanceProcAddr(VkInstance instance,const char * pName)1120 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_icdGetInstanceProcAddr(
1121     VkInstance                                  instance,
1122     const char*                                 pName)
1123 {
1124    return anv_GetInstanceProcAddr(instance, pName);
1125 }
1126 
anv_GetDeviceProcAddr(VkDevice _device,const char * pName)1127 PFN_vkVoidFunction anv_GetDeviceProcAddr(
1128     VkDevice                                    _device,
1129     const char*                                 pName)
1130 {
1131    ANV_FROM_HANDLE(anv_device, device, _device);
1132 
1133    if (!device || !pName)
1134       return NULL;
1135 
1136    int idx = anv_get_entrypoint_index(pName);
1137    if (idx < 0)
1138       return NULL;
1139 
1140    return device->dispatch.entrypoints[idx];
1141 }
1142 
1143 VkResult
anv_CreateDebugReportCallbackEXT(VkInstance _instance,const VkDebugReportCallbackCreateInfoEXT * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDebugReportCallbackEXT * pCallback)1144 anv_CreateDebugReportCallbackEXT(VkInstance _instance,
1145                                  const VkDebugReportCallbackCreateInfoEXT* pCreateInfo,
1146                                  const VkAllocationCallbacks* pAllocator,
1147                                  VkDebugReportCallbackEXT* pCallback)
1148 {
1149    ANV_FROM_HANDLE(anv_instance, instance, _instance);
1150    return vk_create_debug_report_callback(&instance->debug_report_callbacks,
1151                                           pCreateInfo, pAllocator, &instance->alloc,
1152                                           pCallback);
1153 }
1154 
1155 void
anv_DestroyDebugReportCallbackEXT(VkInstance _instance,VkDebugReportCallbackEXT _callback,const VkAllocationCallbacks * pAllocator)1156 anv_DestroyDebugReportCallbackEXT(VkInstance _instance,
1157                                   VkDebugReportCallbackEXT _callback,
1158                                   const VkAllocationCallbacks* pAllocator)
1159 {
1160    ANV_FROM_HANDLE(anv_instance, instance, _instance);
1161    vk_destroy_debug_report_callback(&instance->debug_report_callbacks,
1162                                     _callback, pAllocator, &instance->alloc);
1163 }
1164 
1165 void
anv_DebugReportMessageEXT(VkInstance _instance,VkDebugReportFlagsEXT flags,VkDebugReportObjectTypeEXT objectType,uint64_t object,size_t location,int32_t messageCode,const char * pLayerPrefix,const char * pMessage)1166 anv_DebugReportMessageEXT(VkInstance _instance,
1167                           VkDebugReportFlagsEXT flags,
1168                           VkDebugReportObjectTypeEXT objectType,
1169                           uint64_t object,
1170                           size_t location,
1171                           int32_t messageCode,
1172                           const char* pLayerPrefix,
1173                           const char* pMessage)
1174 {
1175    ANV_FROM_HANDLE(anv_instance, instance, _instance);
1176    vk_debug_report(&instance->debug_report_callbacks, flags, objectType,
1177                    object, location, messageCode, pLayerPrefix, pMessage);
1178 }
1179 
1180 static void
anv_queue_init(struct anv_device * device,struct anv_queue * queue)1181 anv_queue_init(struct anv_device *device, struct anv_queue *queue)
1182 {
1183    queue->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
1184    queue->device = device;
1185    queue->pool = &device->surface_state_pool;
1186 }
1187 
1188 static void
anv_queue_finish(struct anv_queue * queue)1189 anv_queue_finish(struct anv_queue *queue)
1190 {
1191 }
1192 
1193 static struct anv_state
anv_state_pool_emit_data(struct anv_state_pool * pool,size_t size,size_t align,const void * p)1194 anv_state_pool_emit_data(struct anv_state_pool *pool, size_t size, size_t align, const void *p)
1195 {
1196    struct anv_state state;
1197 
1198    state = anv_state_pool_alloc(pool, size, align);
1199    memcpy(state.map, p, size);
1200 
1201    anv_state_flush(pool->block_pool.device, state);
1202 
1203    return state;
1204 }
1205 
1206 struct gen8_border_color {
1207    union {
1208       float float32[4];
1209       uint32_t uint32[4];
1210    };
1211    /* Pad out to 64 bytes */
1212    uint32_t _pad[12];
1213 };
1214 
1215 static void
anv_device_init_border_colors(struct anv_device * device)1216 anv_device_init_border_colors(struct anv_device *device)
1217 {
1218    static const struct gen8_border_color border_colors[] = {
1219       [VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK] =  { .float32 = { 0.0, 0.0, 0.0, 0.0 } },
1220       [VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK] =       { .float32 = { 0.0, 0.0, 0.0, 1.0 } },
1221       [VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE] =       { .float32 = { 1.0, 1.0, 1.0, 1.0 } },
1222       [VK_BORDER_COLOR_INT_TRANSPARENT_BLACK] =    { .uint32 = { 0, 0, 0, 0 } },
1223       [VK_BORDER_COLOR_INT_OPAQUE_BLACK] =         { .uint32 = { 0, 0, 0, 1 } },
1224       [VK_BORDER_COLOR_INT_OPAQUE_WHITE] =         { .uint32 = { 1, 1, 1, 1 } },
1225    };
1226 
1227    device->border_colors = anv_state_pool_emit_data(&device->dynamic_state_pool,
1228                                                     sizeof(border_colors), 64,
1229                                                     border_colors);
1230 }
1231 
1232 static void
anv_device_init_trivial_batch(struct anv_device * device)1233 anv_device_init_trivial_batch(struct anv_device *device)
1234 {
1235    anv_bo_init_new(&device->trivial_batch_bo, device, 4096);
1236 
1237    if (device->instance->physicalDevice.has_exec_async)
1238       device->trivial_batch_bo.flags |= EXEC_OBJECT_ASYNC;
1239 
1240    void *map = anv_gem_mmap(device, device->trivial_batch_bo.gem_handle,
1241                             0, 4096, 0);
1242 
1243    struct anv_batch batch = {
1244       .start = map,
1245       .next = map,
1246       .end = map + 4096,
1247    };
1248 
1249    anv_batch_emit(&batch, GEN7_MI_BATCH_BUFFER_END, bbe);
1250    anv_batch_emit(&batch, GEN7_MI_NOOP, noop);
1251 
1252    if (!device->info.has_llc)
1253       gen_clflush_range(map, batch.next - map);
1254 
1255    anv_gem_munmap(map, device->trivial_batch_bo.size);
1256 }
1257 
anv_EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,const char * pLayerName,uint32_t * pPropertyCount,VkExtensionProperties * pProperties)1258 VkResult anv_EnumerateDeviceExtensionProperties(
1259     VkPhysicalDevice                            physicalDevice,
1260     const char*                                 pLayerName,
1261     uint32_t*                                   pPropertyCount,
1262     VkExtensionProperties*                      pProperties)
1263 {
1264    ANV_FROM_HANDLE(anv_physical_device, device, physicalDevice);
1265    VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
1266    (void)device;
1267 
1268    for (int i = 0; i < ANV_DEVICE_EXTENSION_COUNT; i++) {
1269       if (device->supported_extensions.extensions[i]) {
1270          vk_outarray_append(&out, prop) {
1271             *prop = anv_device_extensions[i];
1272          }
1273       }
1274    }
1275 
1276    return vk_outarray_status(&out);
1277 }
1278 
1279 static void
anv_device_init_dispatch(struct anv_device * device)1280 anv_device_init_dispatch(struct anv_device *device)
1281 {
1282    const struct anv_dispatch_table *genX_table;
1283    switch (device->info.gen) {
1284    case 10:
1285       genX_table = &gen10_dispatch_table;
1286       break;
1287    case 9:
1288       genX_table = &gen9_dispatch_table;
1289       break;
1290    case 8:
1291       genX_table = &gen8_dispatch_table;
1292       break;
1293    case 7:
1294       if (device->info.is_haswell)
1295          genX_table = &gen75_dispatch_table;
1296       else
1297          genX_table = &gen7_dispatch_table;
1298       break;
1299    default:
1300       unreachable("unsupported gen\n");
1301    }
1302 
1303    for (unsigned i = 0; i < ARRAY_SIZE(device->dispatch.entrypoints); i++) {
1304       /* Vulkan requires that entrypoints for extensions which have not been
1305        * enabled must not be advertised.
1306        */
1307       if (!anv_entrypoint_is_enabled(i, device->instance->apiVersion,
1308                                      &device->instance->enabled_extensions,
1309                                      &device->enabled_extensions)) {
1310          device->dispatch.entrypoints[i] = NULL;
1311       } else if (genX_table->entrypoints[i]) {
1312          device->dispatch.entrypoints[i] = genX_table->entrypoints[i];
1313       } else {
1314          device->dispatch.entrypoints[i] = anv_dispatch_table.entrypoints[i];
1315       }
1316    }
1317 }
1318 
anv_CreateDevice(VkPhysicalDevice physicalDevice,const VkDeviceCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDevice * pDevice)1319 VkResult anv_CreateDevice(
1320     VkPhysicalDevice                            physicalDevice,
1321     const VkDeviceCreateInfo*                   pCreateInfo,
1322     const VkAllocationCallbacks*                pAllocator,
1323     VkDevice*                                   pDevice)
1324 {
1325    ANV_FROM_HANDLE(anv_physical_device, physical_device, physicalDevice);
1326    VkResult result;
1327    struct anv_device *device;
1328 
1329    assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO);
1330 
1331    struct anv_device_extension_table enabled_extensions = { };
1332    for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
1333       int idx;
1334       for (idx = 0; idx < ANV_DEVICE_EXTENSION_COUNT; idx++) {
1335          if (strcmp(pCreateInfo->ppEnabledExtensionNames[i],
1336                     anv_device_extensions[idx].extensionName) == 0)
1337             break;
1338       }
1339 
1340       if (idx >= ANV_DEVICE_EXTENSION_COUNT)
1341          return vk_error(VK_ERROR_EXTENSION_NOT_PRESENT);
1342 
1343       if (!physical_device->supported_extensions.extensions[idx])
1344          return vk_error(VK_ERROR_EXTENSION_NOT_PRESENT);
1345 
1346       enabled_extensions.extensions[idx] = true;
1347    }
1348 
1349    /* Check enabled features */
1350    if (pCreateInfo->pEnabledFeatures) {
1351       VkPhysicalDeviceFeatures supported_features;
1352       anv_GetPhysicalDeviceFeatures(physicalDevice, &supported_features);
1353       VkBool32 *supported_feature = (VkBool32 *)&supported_features;
1354       VkBool32 *enabled_feature = (VkBool32 *)pCreateInfo->pEnabledFeatures;
1355       unsigned num_features = sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
1356       for (uint32_t i = 0; i < num_features; i++) {
1357          if (enabled_feature[i] && !supported_feature[i])
1358             return vk_error(VK_ERROR_FEATURE_NOT_PRESENT);
1359       }
1360    }
1361 
1362    device = vk_alloc2(&physical_device->instance->alloc, pAllocator,
1363                        sizeof(*device), 8,
1364                        VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1365    if (!device)
1366       return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1367 
1368    device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
1369    device->instance = physical_device->instance;
1370    device->chipset_id = physical_device->chipset_id;
1371    device->lost = false;
1372 
1373    if (pAllocator)
1374       device->alloc = *pAllocator;
1375    else
1376       device->alloc = physical_device->instance->alloc;
1377 
1378    /* XXX(chadv): Can we dup() physicalDevice->fd here? */
1379    device->fd = open(physical_device->path, O_RDWR | O_CLOEXEC);
1380    if (device->fd == -1) {
1381       result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
1382       goto fail_device;
1383    }
1384 
1385    device->context_id = anv_gem_create_context(device);
1386    if (device->context_id == -1) {
1387       result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
1388       goto fail_fd;
1389    }
1390 
1391    device->info = physical_device->info;
1392    device->isl_dev = physical_device->isl_dev;
1393 
1394    /* On Broadwell and later, we can use batch chaining to more efficiently
1395     * implement growing command buffers.  Prior to Haswell, the kernel
1396     * command parser gets in the way and we have to fall back to growing
1397     * the batch.
1398     */
1399    device->can_chain_batches = device->info.gen >= 8;
1400 
1401    device->robust_buffer_access = pCreateInfo->pEnabledFeatures &&
1402       pCreateInfo->pEnabledFeatures->robustBufferAccess;
1403    device->enabled_extensions = enabled_extensions;
1404 
1405    anv_device_init_dispatch(device);
1406 
1407    if (pthread_mutex_init(&device->mutex, NULL) != 0) {
1408       result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
1409       goto fail_context_id;
1410    }
1411 
1412    pthread_condattr_t condattr;
1413    if (pthread_condattr_init(&condattr) != 0) {
1414       result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
1415       goto fail_mutex;
1416    }
1417    if (pthread_condattr_setclock(&condattr, CLOCK_MONOTONIC) != 0) {
1418       pthread_condattr_destroy(&condattr);
1419       result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
1420       goto fail_mutex;
1421    }
1422    if (pthread_cond_init(&device->queue_submit, NULL) != 0) {
1423       pthread_condattr_destroy(&condattr);
1424       result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
1425       goto fail_mutex;
1426    }
1427    pthread_condattr_destroy(&condattr);
1428 
1429    uint64_t bo_flags =
1430       (physical_device->supports_48bit_addresses ? EXEC_OBJECT_SUPPORTS_48B_ADDRESS : 0) |
1431       (physical_device->has_exec_async ? EXEC_OBJECT_ASYNC : 0) |
1432       (physical_device->has_exec_capture ? EXEC_OBJECT_CAPTURE : 0);
1433 
1434    anv_bo_pool_init(&device->batch_bo_pool, device, bo_flags);
1435 
1436    result = anv_bo_cache_init(&device->bo_cache);
1437    if (result != VK_SUCCESS)
1438       goto fail_batch_bo_pool;
1439 
1440    /* For the state pools we explicitly disable 48bit. */
1441    bo_flags = (physical_device->has_exec_async ? EXEC_OBJECT_ASYNC : 0) |
1442               (physical_device->has_exec_capture ? EXEC_OBJECT_CAPTURE : 0);
1443 
1444    result = anv_state_pool_init(&device->dynamic_state_pool, device, 16384,
1445                                 bo_flags);
1446    if (result != VK_SUCCESS)
1447       goto fail_bo_cache;
1448 
1449    result = anv_state_pool_init(&device->instruction_state_pool, device, 16384,
1450                                 bo_flags);
1451    if (result != VK_SUCCESS)
1452       goto fail_dynamic_state_pool;
1453 
1454    result = anv_state_pool_init(&device->surface_state_pool, device, 4096,
1455                                 bo_flags);
1456    if (result != VK_SUCCESS)
1457       goto fail_instruction_state_pool;
1458 
1459    result = anv_bo_init_new(&device->workaround_bo, device, 1024);
1460    if (result != VK_SUCCESS)
1461       goto fail_surface_state_pool;
1462 
1463    anv_device_init_trivial_batch(device);
1464 
1465    anv_scratch_pool_init(device, &device->scratch_pool);
1466 
1467    anv_queue_init(device, &device->queue);
1468 
1469    switch (device->info.gen) {
1470    case 7:
1471       if (!device->info.is_haswell)
1472          result = gen7_init_device_state(device);
1473       else
1474          result = gen75_init_device_state(device);
1475       break;
1476    case 8:
1477       result = gen8_init_device_state(device);
1478       break;
1479    case 9:
1480       result = gen9_init_device_state(device);
1481       break;
1482    case 10:
1483       result = gen10_init_device_state(device);
1484       break;
1485    default:
1486       /* Shouldn't get here as we don't create physical devices for any other
1487        * gens. */
1488       unreachable("unhandled gen");
1489    }
1490    if (result != VK_SUCCESS)
1491       goto fail_workaround_bo;
1492 
1493    anv_device_init_blorp(device);
1494 
1495    anv_device_init_border_colors(device);
1496 
1497    *pDevice = anv_device_to_handle(device);
1498 
1499    return VK_SUCCESS;
1500 
1501  fail_workaround_bo:
1502    anv_queue_finish(&device->queue);
1503    anv_scratch_pool_finish(device, &device->scratch_pool);
1504    anv_gem_munmap(device->workaround_bo.map, device->workaround_bo.size);
1505    anv_gem_close(device, device->workaround_bo.gem_handle);
1506  fail_surface_state_pool:
1507    anv_state_pool_finish(&device->surface_state_pool);
1508  fail_instruction_state_pool:
1509    anv_state_pool_finish(&device->instruction_state_pool);
1510  fail_dynamic_state_pool:
1511    anv_state_pool_finish(&device->dynamic_state_pool);
1512  fail_bo_cache:
1513    anv_bo_cache_finish(&device->bo_cache);
1514  fail_batch_bo_pool:
1515    anv_bo_pool_finish(&device->batch_bo_pool);
1516    pthread_cond_destroy(&device->queue_submit);
1517  fail_mutex:
1518    pthread_mutex_destroy(&device->mutex);
1519  fail_context_id:
1520    anv_gem_destroy_context(device, device->context_id);
1521  fail_fd:
1522    close(device->fd);
1523  fail_device:
1524    vk_free(&device->alloc, device);
1525 
1526    return result;
1527 }
1528 
anv_DestroyDevice(VkDevice _device,const VkAllocationCallbacks * pAllocator)1529 void anv_DestroyDevice(
1530     VkDevice                                    _device,
1531     const VkAllocationCallbacks*                pAllocator)
1532 {
1533    ANV_FROM_HANDLE(anv_device, device, _device);
1534 
1535    if (!device)
1536       return;
1537 
1538    anv_device_finish_blorp(device);
1539 
1540    anv_queue_finish(&device->queue);
1541 
1542 #ifdef HAVE_VALGRIND
1543    /* We only need to free these to prevent valgrind errors.  The backing
1544     * BO will go away in a couple of lines so we don't actually leak.
1545     */
1546    anv_state_pool_free(&device->dynamic_state_pool, device->border_colors);
1547 #endif
1548 
1549    anv_scratch_pool_finish(device, &device->scratch_pool);
1550 
1551    anv_gem_munmap(device->workaround_bo.map, device->workaround_bo.size);
1552    anv_gem_close(device, device->workaround_bo.gem_handle);
1553 
1554    anv_gem_close(device, device->trivial_batch_bo.gem_handle);
1555 
1556    anv_state_pool_finish(&device->surface_state_pool);
1557    anv_state_pool_finish(&device->instruction_state_pool);
1558    anv_state_pool_finish(&device->dynamic_state_pool);
1559 
1560    anv_bo_cache_finish(&device->bo_cache);
1561 
1562    anv_bo_pool_finish(&device->batch_bo_pool);
1563 
1564    pthread_cond_destroy(&device->queue_submit);
1565    pthread_mutex_destroy(&device->mutex);
1566 
1567    anv_gem_destroy_context(device, device->context_id);
1568 
1569    close(device->fd);
1570 
1571    vk_free(&device->alloc, device);
1572 }
1573 
anv_EnumerateInstanceLayerProperties(uint32_t * pPropertyCount,VkLayerProperties * pProperties)1574 VkResult anv_EnumerateInstanceLayerProperties(
1575     uint32_t*                                   pPropertyCount,
1576     VkLayerProperties*                          pProperties)
1577 {
1578    if (pProperties == NULL) {
1579       *pPropertyCount = 0;
1580       return VK_SUCCESS;
1581    }
1582 
1583    /* None supported at this time */
1584    return vk_error(VK_ERROR_LAYER_NOT_PRESENT);
1585 }
1586 
anv_EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice,uint32_t * pPropertyCount,VkLayerProperties * pProperties)1587 VkResult anv_EnumerateDeviceLayerProperties(
1588     VkPhysicalDevice                            physicalDevice,
1589     uint32_t*                                   pPropertyCount,
1590     VkLayerProperties*                          pProperties)
1591 {
1592    if (pProperties == NULL) {
1593       *pPropertyCount = 0;
1594       return VK_SUCCESS;
1595    }
1596 
1597    /* None supported at this time */
1598    return vk_error(VK_ERROR_LAYER_NOT_PRESENT);
1599 }
1600 
anv_GetDeviceQueue(VkDevice _device,uint32_t queueNodeIndex,uint32_t queueIndex,VkQueue * pQueue)1601 void anv_GetDeviceQueue(
1602     VkDevice                                    _device,
1603     uint32_t                                    queueNodeIndex,
1604     uint32_t                                    queueIndex,
1605     VkQueue*                                    pQueue)
1606 {
1607    ANV_FROM_HANDLE(anv_device, device, _device);
1608 
1609    assert(queueIndex == 0);
1610 
1611    *pQueue = anv_queue_to_handle(&device->queue);
1612 }
1613 
1614 VkResult
anv_device_query_status(struct anv_device * device)1615 anv_device_query_status(struct anv_device *device)
1616 {
1617    /* This isn't likely as most of the callers of this function already check
1618     * for it.  However, it doesn't hurt to check and it potentially lets us
1619     * avoid an ioctl.
1620     */
1621    if (unlikely(device->lost))
1622       return VK_ERROR_DEVICE_LOST;
1623 
1624    uint32_t active, pending;
1625    int ret = anv_gem_gpu_get_reset_stats(device, &active, &pending);
1626    if (ret == -1) {
1627       /* We don't know the real error. */
1628       device->lost = true;
1629       return vk_errorf(device->instance, device, VK_ERROR_DEVICE_LOST,
1630                        "get_reset_stats failed: %m");
1631    }
1632 
1633    if (active) {
1634       device->lost = true;
1635       return vk_errorf(device->instance, device, VK_ERROR_DEVICE_LOST,
1636                        "GPU hung on one of our command buffers");
1637    } else if (pending) {
1638       device->lost = true;
1639       return vk_errorf(device->instance, device, VK_ERROR_DEVICE_LOST,
1640                        "GPU hung with commands in-flight");
1641    }
1642 
1643    return VK_SUCCESS;
1644 }
1645 
1646 VkResult
anv_device_bo_busy(struct anv_device * device,struct anv_bo * bo)1647 anv_device_bo_busy(struct anv_device *device, struct anv_bo *bo)
1648 {
1649    /* Note:  This only returns whether or not the BO is in use by an i915 GPU.
1650     * Other usages of the BO (such as on different hardware) will not be
1651     * flagged as "busy" by this ioctl.  Use with care.
1652     */
1653    int ret = anv_gem_busy(device, bo->gem_handle);
1654    if (ret == 1) {
1655       return VK_NOT_READY;
1656    } else if (ret == -1) {
1657       /* We don't know the real error. */
1658       device->lost = true;
1659       return vk_errorf(device->instance, device, VK_ERROR_DEVICE_LOST,
1660                        "gem wait failed: %m");
1661    }
1662 
1663    /* Query for device status after the busy call.  If the BO we're checking
1664     * got caught in a GPU hang we don't want to return VK_SUCCESS to the
1665     * client because it clearly doesn't have valid data.  Yes, this most
1666     * likely means an ioctl, but we just did an ioctl to query the busy status
1667     * so it's no great loss.
1668     */
1669    return anv_device_query_status(device);
1670 }
1671 
1672 VkResult
anv_device_wait(struct anv_device * device,struct anv_bo * bo,int64_t timeout)1673 anv_device_wait(struct anv_device *device, struct anv_bo *bo,
1674                 int64_t timeout)
1675 {
1676    int ret = anv_gem_wait(device, bo->gem_handle, &timeout);
1677    if (ret == -1 && errno == ETIME) {
1678       return VK_TIMEOUT;
1679    } else if (ret == -1) {
1680       /* We don't know the real error. */
1681       device->lost = true;
1682       return vk_errorf(device->instance, device, VK_ERROR_DEVICE_LOST,
1683                        "gem wait failed: %m");
1684    }
1685 
1686    /* Query for device status after the wait.  If the BO we're waiting on got
1687     * caught in a GPU hang we don't want to return VK_SUCCESS to the client
1688     * because it clearly doesn't have valid data.  Yes, this most likely means
1689     * an ioctl, but we just did an ioctl to wait so it's no great loss.
1690     */
1691    return anv_device_query_status(device);
1692 }
1693 
anv_DeviceWaitIdle(VkDevice _device)1694 VkResult anv_DeviceWaitIdle(
1695     VkDevice                                    _device)
1696 {
1697    ANV_FROM_HANDLE(anv_device, device, _device);
1698    if (unlikely(device->lost))
1699       return VK_ERROR_DEVICE_LOST;
1700 
1701    struct anv_batch batch;
1702 
1703    uint32_t cmds[8];
1704    batch.start = batch.next = cmds;
1705    batch.end = (void *) cmds + sizeof(cmds);
1706 
1707    anv_batch_emit(&batch, GEN7_MI_BATCH_BUFFER_END, bbe);
1708    anv_batch_emit(&batch, GEN7_MI_NOOP, noop);
1709 
1710    return anv_device_submit_simple_batch(device, &batch);
1711 }
1712 
1713 VkResult
anv_bo_init_new(struct anv_bo * bo,struct anv_device * device,uint64_t size)1714 anv_bo_init_new(struct anv_bo *bo, struct anv_device *device, uint64_t size)
1715 {
1716    uint32_t gem_handle = anv_gem_create(device, size);
1717    if (!gem_handle)
1718       return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
1719 
1720    anv_bo_init(bo, gem_handle, size);
1721 
1722    return VK_SUCCESS;
1723 }
1724 
anv_AllocateMemory(VkDevice _device,const VkMemoryAllocateInfo * pAllocateInfo,const VkAllocationCallbacks * pAllocator,VkDeviceMemory * pMem)1725 VkResult anv_AllocateMemory(
1726     VkDevice                                    _device,
1727     const VkMemoryAllocateInfo*                 pAllocateInfo,
1728     const VkAllocationCallbacks*                pAllocator,
1729     VkDeviceMemory*                             pMem)
1730 {
1731    ANV_FROM_HANDLE(anv_device, device, _device);
1732    struct anv_physical_device *pdevice = &device->instance->physicalDevice;
1733    struct anv_device_memory *mem;
1734    VkResult result = VK_SUCCESS;
1735 
1736    assert(pAllocateInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO);
1737 
1738    /* The Vulkan 1.0.33 spec says "allocationSize must be greater than 0". */
1739    assert(pAllocateInfo->allocationSize > 0);
1740 
1741    /* The kernel relocation API has a limitation of a 32-bit delta value
1742     * applied to the address before it is written which, in spite of it being
1743     * unsigned, is treated as signed .  Because of the way that this maps to
1744     * the Vulkan API, we cannot handle an offset into a buffer that does not
1745     * fit into a signed 32 bits.  The only mechanism we have for dealing with
1746     * this at the moment is to limit all VkDeviceMemory objects to a maximum
1747     * of 2GB each.  The Vulkan spec allows us to do this:
1748     *
1749     *    "Some platforms may have a limit on the maximum size of a single
1750     *    allocation. For example, certain systems may fail to create
1751     *    allocations with a size greater than or equal to 4GB. Such a limit is
1752     *    implementation-dependent, and if such a failure occurs then the error
1753     *    VK_ERROR_OUT_OF_DEVICE_MEMORY should be returned."
1754     *
1755     * We don't use vk_error here because it's not an error so much as an
1756     * indication to the application that the allocation is too large.
1757     */
1758    if (pAllocateInfo->allocationSize > (1ull << 31))
1759       return VK_ERROR_OUT_OF_DEVICE_MEMORY;
1760 
1761    /* FINISHME: Fail if allocation request exceeds heap size. */
1762 
1763    mem = vk_alloc2(&device->alloc, pAllocator, sizeof(*mem), 8,
1764                     VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1765    if (mem == NULL)
1766       return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1767 
1768    assert(pAllocateInfo->memoryTypeIndex < pdevice->memory.type_count);
1769    mem->type = &pdevice->memory.types[pAllocateInfo->memoryTypeIndex];
1770    mem->map = NULL;
1771    mem->map_size = 0;
1772 
1773    const VkImportMemoryFdInfoKHR *fd_info =
1774       vk_find_struct_const(pAllocateInfo->pNext, IMPORT_MEMORY_FD_INFO_KHR);
1775 
1776    /* The Vulkan spec permits handleType to be 0, in which case the struct is
1777     * ignored.
1778     */
1779    if (fd_info && fd_info->handleType) {
1780       /* At the moment, we support only the below handle types. */
1781       assert(fd_info->handleType ==
1782                VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR ||
1783              fd_info->handleType ==
1784                VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
1785 
1786       result = anv_bo_cache_import(device, &device->bo_cache,
1787                                    fd_info->fd, &mem->bo);
1788       if (result != VK_SUCCESS)
1789          goto fail;
1790 
1791       VkDeviceSize aligned_alloc_size =
1792          align_u64(pAllocateInfo->allocationSize, 4096);
1793 
1794       /* For security purposes, we reject importing the bo if it's smaller
1795        * than the requested allocation size.  This prevents a malicious client
1796        * from passing a buffer to a trusted client, lying about the size, and
1797        * telling the trusted client to try and texture from an image that goes
1798        * out-of-bounds.  This sort of thing could lead to GPU hangs or worse
1799        * in the trusted client.  The trusted client can protect itself against
1800        * this sort of attack but only if it can trust the buffer size.
1801        */
1802       if (mem->bo->size < aligned_alloc_size) {
1803          result = vk_errorf(device->instance, device,
1804                             VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR,
1805                             "aligned allocationSize too large for "
1806                             "VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR: "
1807                             "%"PRIu64"B > %"PRIu64"B",
1808                             aligned_alloc_size, mem->bo->size);
1809          anv_bo_cache_release(device, &device->bo_cache, mem->bo);
1810          goto fail;
1811       }
1812 
1813       /* From the Vulkan spec:
1814        *
1815        *    "Importing memory from a file descriptor transfers ownership of
1816        *    the file descriptor from the application to the Vulkan
1817        *    implementation. The application must not perform any operations on
1818        *    the file descriptor after a successful import."
1819        *
1820        * If the import fails, we leave the file descriptor open.
1821        */
1822       close(fd_info->fd);
1823    } else {
1824       result = anv_bo_cache_alloc(device, &device->bo_cache,
1825                                   pAllocateInfo->allocationSize,
1826                                   &mem->bo);
1827       if (result != VK_SUCCESS)
1828          goto fail;
1829 
1830       const VkMemoryDedicatedAllocateInfoKHR *dedicated_info =
1831          vk_find_struct_const(pAllocateInfo->pNext, MEMORY_DEDICATED_ALLOCATE_INFO_KHR);
1832       if (dedicated_info && dedicated_info->image != VK_NULL_HANDLE) {
1833          ANV_FROM_HANDLE(anv_image, image, dedicated_info->image);
1834 
1835          /* For images using modifiers, we require a dedicated allocation
1836           * and we set the BO tiling to match the tiling of the underlying
1837           * modifier.  This is a bit unfortunate as this is completely
1838           * pointless for Vulkan.  However, GL needs to be able to map things
1839           * so it needs the tiling to be set.  The only way to do this in a
1840           * non-racy way is to set the tiling in the creator of the BO so that
1841           * makes it our job.
1842           *
1843           * One of these days, once the GL driver learns to not map things
1844           * through the GTT in random places, we can drop this and start
1845           * allowing multiple modified images in the same BO.
1846           */
1847          if (image->drm_format_mod != DRM_FORMAT_MOD_INVALID) {
1848             assert(isl_drm_modifier_get_info(image->drm_format_mod)->tiling ==
1849                    image->planes[0].surface.isl.tiling);
1850             const uint32_t i915_tiling =
1851                isl_tiling_to_i915_tiling(image->planes[0].surface.isl.tiling);
1852             int ret = anv_gem_set_tiling(device, mem->bo->gem_handle,
1853                                          image->planes[0].surface.isl.row_pitch,
1854                                          i915_tiling);
1855             if (ret) {
1856                anv_bo_cache_release(device, &device->bo_cache, mem->bo);
1857                return vk_errorf(device->instance, NULL,
1858                                 VK_ERROR_OUT_OF_DEVICE_MEMORY,
1859                                 "failed to set BO tiling: %m");
1860             }
1861          }
1862       }
1863    }
1864 
1865    assert(mem->type->heapIndex < pdevice->memory.heap_count);
1866    if (pdevice->memory.heaps[mem->type->heapIndex].supports_48bit_addresses)
1867       mem->bo->flags |= EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
1868 
1869    const struct wsi_memory_allocate_info *wsi_info =
1870       vk_find_struct_const(pAllocateInfo->pNext, WSI_MEMORY_ALLOCATE_INFO_MESA);
1871    if (wsi_info && wsi_info->implicit_sync) {
1872       /* We need to set the WRITE flag on window system buffers so that GEM
1873        * will know we're writing to them and synchronize uses on other rings
1874        * (eg if the display server uses the blitter ring).
1875        */
1876       mem->bo->flags |= EXEC_OBJECT_WRITE;
1877    } else if (pdevice->has_exec_async) {
1878       mem->bo->flags |= EXEC_OBJECT_ASYNC;
1879    }
1880 
1881    *pMem = anv_device_memory_to_handle(mem);
1882 
1883    return VK_SUCCESS;
1884 
1885  fail:
1886    vk_free2(&device->alloc, pAllocator, mem);
1887 
1888    return result;
1889 }
1890 
anv_GetMemoryFdKHR(VkDevice device_h,const VkMemoryGetFdInfoKHR * pGetFdInfo,int * pFd)1891 VkResult anv_GetMemoryFdKHR(
1892     VkDevice                                    device_h,
1893     const VkMemoryGetFdInfoKHR*                 pGetFdInfo,
1894     int*                                        pFd)
1895 {
1896    ANV_FROM_HANDLE(anv_device, dev, device_h);
1897    ANV_FROM_HANDLE(anv_device_memory, mem, pGetFdInfo->memory);
1898 
1899    assert(pGetFdInfo->sType == VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR);
1900 
1901    assert(pGetFdInfo->handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR ||
1902           pGetFdInfo->handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
1903 
1904    return anv_bo_cache_export(dev, &dev->bo_cache, mem->bo, pFd);
1905 }
1906 
anv_GetMemoryFdPropertiesKHR(VkDevice _device,VkExternalMemoryHandleTypeFlagBitsKHR handleType,int fd,VkMemoryFdPropertiesKHR * pMemoryFdProperties)1907 VkResult anv_GetMemoryFdPropertiesKHR(
1908     VkDevice                                    _device,
1909     VkExternalMemoryHandleTypeFlagBitsKHR       handleType,
1910     int                                         fd,
1911     VkMemoryFdPropertiesKHR*                    pMemoryFdProperties)
1912 {
1913    ANV_FROM_HANDLE(anv_device, device, _device);
1914    struct anv_physical_device *pdevice = &device->instance->physicalDevice;
1915 
1916    switch (handleType) {
1917    case VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT:
1918       /* dma-buf can be imported as any memory type */
1919       pMemoryFdProperties->memoryTypeBits =
1920          (1 << pdevice->memory.type_count) - 1;
1921       return VK_SUCCESS;
1922 
1923    default:
1924       /* The valid usage section for this function says:
1925        *
1926        *    "handleType must not be one of the handle types defined as
1927        *    opaque."
1928        *
1929        * So opaque handle types fall into the default "unsupported" case.
1930        */
1931       return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
1932    }
1933 }
1934 
anv_FreeMemory(VkDevice _device,VkDeviceMemory _mem,const VkAllocationCallbacks * pAllocator)1935 void anv_FreeMemory(
1936     VkDevice                                    _device,
1937     VkDeviceMemory                              _mem,
1938     const VkAllocationCallbacks*                pAllocator)
1939 {
1940    ANV_FROM_HANDLE(anv_device, device, _device);
1941    ANV_FROM_HANDLE(anv_device_memory, mem, _mem);
1942 
1943    if (mem == NULL)
1944       return;
1945 
1946    if (mem->map)
1947       anv_UnmapMemory(_device, _mem);
1948 
1949    anv_bo_cache_release(device, &device->bo_cache, mem->bo);
1950 
1951    vk_free2(&device->alloc, pAllocator, mem);
1952 }
1953 
anv_MapMemory(VkDevice _device,VkDeviceMemory _memory,VkDeviceSize offset,VkDeviceSize size,VkMemoryMapFlags flags,void ** ppData)1954 VkResult anv_MapMemory(
1955     VkDevice                                    _device,
1956     VkDeviceMemory                              _memory,
1957     VkDeviceSize                                offset,
1958     VkDeviceSize                                size,
1959     VkMemoryMapFlags                            flags,
1960     void**                                      ppData)
1961 {
1962    ANV_FROM_HANDLE(anv_device, device, _device);
1963    ANV_FROM_HANDLE(anv_device_memory, mem, _memory);
1964 
1965    if (mem == NULL) {
1966       *ppData = NULL;
1967       return VK_SUCCESS;
1968    }
1969 
1970    if (size == VK_WHOLE_SIZE)
1971       size = mem->bo->size - offset;
1972 
1973    /* From the Vulkan spec version 1.0.32 docs for MapMemory:
1974     *
1975     *  * If size is not equal to VK_WHOLE_SIZE, size must be greater than 0
1976     *    assert(size != 0);
1977     *  * If size is not equal to VK_WHOLE_SIZE, size must be less than or
1978     *    equal to the size of the memory minus offset
1979     */
1980    assert(size > 0);
1981    assert(offset + size <= mem->bo->size);
1982 
1983    /* FIXME: Is this supposed to be thread safe? Since vkUnmapMemory() only
1984     * takes a VkDeviceMemory pointer, it seems like only one map of the memory
1985     * at a time is valid. We could just mmap up front and return an offset
1986     * pointer here, but that may exhaust virtual memory on 32 bit
1987     * userspace. */
1988 
1989    uint32_t gem_flags = 0;
1990 
1991    if (!device->info.has_llc &&
1992        (mem->type->propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT))
1993       gem_flags |= I915_MMAP_WC;
1994 
1995    /* GEM will fail to map if the offset isn't 4k-aligned.  Round down. */
1996    uint64_t map_offset = offset & ~4095ull;
1997    assert(offset >= map_offset);
1998    uint64_t map_size = (offset + size) - map_offset;
1999 
2000    /* Let's map whole pages */
2001    map_size = align_u64(map_size, 4096);
2002 
2003    void *map = anv_gem_mmap(device, mem->bo->gem_handle,
2004                             map_offset, map_size, gem_flags);
2005    if (map == MAP_FAILED)
2006       return vk_error(VK_ERROR_MEMORY_MAP_FAILED);
2007 
2008    mem->map = map;
2009    mem->map_size = map_size;
2010 
2011    *ppData = mem->map + (offset - map_offset);
2012 
2013    return VK_SUCCESS;
2014 }
2015 
anv_UnmapMemory(VkDevice _device,VkDeviceMemory _memory)2016 void anv_UnmapMemory(
2017     VkDevice                                    _device,
2018     VkDeviceMemory                              _memory)
2019 {
2020    ANV_FROM_HANDLE(anv_device_memory, mem, _memory);
2021 
2022    if (mem == NULL)
2023       return;
2024 
2025    anv_gem_munmap(mem->map, mem->map_size);
2026 
2027    mem->map = NULL;
2028    mem->map_size = 0;
2029 }
2030 
2031 static void
clflush_mapped_ranges(struct anv_device * device,uint32_t count,const VkMappedMemoryRange * ranges)2032 clflush_mapped_ranges(struct anv_device         *device,
2033                       uint32_t                   count,
2034                       const VkMappedMemoryRange *ranges)
2035 {
2036    for (uint32_t i = 0; i < count; i++) {
2037       ANV_FROM_HANDLE(anv_device_memory, mem, ranges[i].memory);
2038       if (ranges[i].offset >= mem->map_size)
2039          continue;
2040 
2041       gen_clflush_range(mem->map + ranges[i].offset,
2042                         MIN2(ranges[i].size, mem->map_size - ranges[i].offset));
2043    }
2044 }
2045 
anv_FlushMappedMemoryRanges(VkDevice _device,uint32_t memoryRangeCount,const VkMappedMemoryRange * pMemoryRanges)2046 VkResult anv_FlushMappedMemoryRanges(
2047     VkDevice                                    _device,
2048     uint32_t                                    memoryRangeCount,
2049     const VkMappedMemoryRange*                  pMemoryRanges)
2050 {
2051    ANV_FROM_HANDLE(anv_device, device, _device);
2052 
2053    if (device->info.has_llc)
2054       return VK_SUCCESS;
2055 
2056    /* Make sure the writes we're flushing have landed. */
2057    __builtin_ia32_mfence();
2058 
2059    clflush_mapped_ranges(device, memoryRangeCount, pMemoryRanges);
2060 
2061    return VK_SUCCESS;
2062 }
2063 
anv_InvalidateMappedMemoryRanges(VkDevice _device,uint32_t memoryRangeCount,const VkMappedMemoryRange * pMemoryRanges)2064 VkResult anv_InvalidateMappedMemoryRanges(
2065     VkDevice                                    _device,
2066     uint32_t                                    memoryRangeCount,
2067     const VkMappedMemoryRange*                  pMemoryRanges)
2068 {
2069    ANV_FROM_HANDLE(anv_device, device, _device);
2070 
2071    if (device->info.has_llc)
2072       return VK_SUCCESS;
2073 
2074    clflush_mapped_ranges(device, memoryRangeCount, pMemoryRanges);
2075 
2076    /* Make sure no reads get moved up above the invalidate. */
2077    __builtin_ia32_mfence();
2078 
2079    return VK_SUCCESS;
2080 }
2081 
anv_GetBufferMemoryRequirements(VkDevice _device,VkBuffer _buffer,VkMemoryRequirements * pMemoryRequirements)2082 void anv_GetBufferMemoryRequirements(
2083     VkDevice                                    _device,
2084     VkBuffer                                    _buffer,
2085     VkMemoryRequirements*                       pMemoryRequirements)
2086 {
2087    ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
2088    ANV_FROM_HANDLE(anv_device, device, _device);
2089    struct anv_physical_device *pdevice = &device->instance->physicalDevice;
2090 
2091    /* The Vulkan spec (git aaed022) says:
2092     *
2093     *    memoryTypeBits is a bitfield and contains one bit set for every
2094     *    supported memory type for the resource. The bit `1<<i` is set if and
2095     *    only if the memory type `i` in the VkPhysicalDeviceMemoryProperties
2096     *    structure for the physical device is supported.
2097     */
2098    uint32_t memory_types = 0;
2099    for (uint32_t i = 0; i < pdevice->memory.type_count; i++) {
2100       uint32_t valid_usage = pdevice->memory.types[i].valid_buffer_usage;
2101       if ((valid_usage & buffer->usage) == buffer->usage)
2102          memory_types |= (1u << i);
2103    }
2104 
2105    /* Base alignment requirement of a cache line */
2106    uint32_t alignment = 16;
2107 
2108    /* We need an alignment of 32 for pushing UBOs */
2109    if (buffer->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT)
2110       alignment = MAX2(alignment, 32);
2111 
2112    pMemoryRequirements->size = buffer->size;
2113    pMemoryRequirements->alignment = alignment;
2114    pMemoryRequirements->memoryTypeBits = memory_types;
2115 }
2116 
anv_GetBufferMemoryRequirements2KHR(VkDevice _device,const VkBufferMemoryRequirementsInfo2KHR * pInfo,VkMemoryRequirements2KHR * pMemoryRequirements)2117 void anv_GetBufferMemoryRequirements2KHR(
2118     VkDevice                                    _device,
2119     const VkBufferMemoryRequirementsInfo2KHR*   pInfo,
2120     VkMemoryRequirements2KHR*                   pMemoryRequirements)
2121 {
2122    anv_GetBufferMemoryRequirements(_device, pInfo->buffer,
2123                                    &pMemoryRequirements->memoryRequirements);
2124 
2125    vk_foreach_struct(ext, pMemoryRequirements->pNext) {
2126       switch (ext->sType) {
2127       case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR: {
2128          VkMemoryDedicatedRequirementsKHR *requirements = (void *)ext;
2129          requirements->prefersDedicatedAllocation = VK_FALSE;
2130          requirements->requiresDedicatedAllocation = VK_FALSE;
2131          break;
2132       }
2133 
2134       default:
2135          anv_debug_ignored_stype(ext->sType);
2136          break;
2137       }
2138    }
2139 }
2140 
anv_GetImageMemoryRequirements(VkDevice _device,VkImage _image,VkMemoryRequirements * pMemoryRequirements)2141 void anv_GetImageMemoryRequirements(
2142     VkDevice                                    _device,
2143     VkImage                                     _image,
2144     VkMemoryRequirements*                       pMemoryRequirements)
2145 {
2146    ANV_FROM_HANDLE(anv_image, image, _image);
2147    ANV_FROM_HANDLE(anv_device, device, _device);
2148    struct anv_physical_device *pdevice = &device->instance->physicalDevice;
2149 
2150    /* The Vulkan spec (git aaed022) says:
2151     *
2152     *    memoryTypeBits is a bitfield and contains one bit set for every
2153     *    supported memory type for the resource. The bit `1<<i` is set if and
2154     *    only if the memory type `i` in the VkPhysicalDeviceMemoryProperties
2155     *    structure for the physical device is supported.
2156     *
2157     * All types are currently supported for images.
2158     */
2159    uint32_t memory_types = (1ull << pdevice->memory.type_count) - 1;
2160 
2161    pMemoryRequirements->size = image->size;
2162    pMemoryRequirements->alignment = image->alignment;
2163    pMemoryRequirements->memoryTypeBits = memory_types;
2164 }
2165 
anv_GetImageMemoryRequirements2KHR(VkDevice _device,const VkImageMemoryRequirementsInfo2KHR * pInfo,VkMemoryRequirements2KHR * pMemoryRequirements)2166 void anv_GetImageMemoryRequirements2KHR(
2167     VkDevice                                    _device,
2168     const VkImageMemoryRequirementsInfo2KHR*    pInfo,
2169     VkMemoryRequirements2KHR*                   pMemoryRequirements)
2170 {
2171    ANV_FROM_HANDLE(anv_device, device, _device);
2172    ANV_FROM_HANDLE(anv_image, image, pInfo->image);
2173 
2174    anv_GetImageMemoryRequirements(_device, pInfo->image,
2175                                   &pMemoryRequirements->memoryRequirements);
2176 
2177    vk_foreach_struct_const(ext, pInfo->pNext) {
2178       switch (ext->sType) {
2179       case VK_STRUCTURE_TYPE_IMAGE_PLANE_MEMORY_REQUIREMENTS_INFO_KHR: {
2180          struct anv_physical_device *pdevice = &device->instance->physicalDevice;
2181          const VkImagePlaneMemoryRequirementsInfoKHR *plane_reqs =
2182             (const VkImagePlaneMemoryRequirementsInfoKHR *) ext;
2183          uint32_t plane = anv_image_aspect_to_plane(image->aspects,
2184                                                     plane_reqs->planeAspect);
2185 
2186          assert(image->planes[plane].offset == 0);
2187 
2188          /* The Vulkan spec (git aaed022) says:
2189           *
2190           *    memoryTypeBits is a bitfield and contains one bit set for every
2191           *    supported memory type for the resource. The bit `1<<i` is set
2192           *    if and only if the memory type `i` in the
2193           *    VkPhysicalDeviceMemoryProperties structure for the physical
2194           *    device is supported.
2195           *
2196           * All types are currently supported for images.
2197           */
2198          pMemoryRequirements->memoryRequirements.memoryTypeBits =
2199                (1ull << pdevice->memory.type_count) - 1;
2200 
2201          pMemoryRequirements->memoryRequirements.size = image->planes[plane].size;
2202          pMemoryRequirements->memoryRequirements.alignment =
2203             image->planes[plane].alignment;
2204          break;
2205       }
2206 
2207       default:
2208          anv_debug_ignored_stype(ext->sType);
2209          break;
2210       }
2211    }
2212 
2213    vk_foreach_struct(ext, pMemoryRequirements->pNext) {
2214       switch (ext->sType) {
2215       case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR: {
2216          VkMemoryDedicatedRequirementsKHR *requirements = (void *)ext;
2217          if (image->drm_format_mod != DRM_FORMAT_MOD_INVALID) {
2218             /* Require a dedicated allocation for images with modifiers.
2219              *
2220              * See also anv_AllocateMemory.
2221              */
2222             requirements->prefersDedicatedAllocation = VK_TRUE;
2223             requirements->requiresDedicatedAllocation = VK_TRUE;
2224          } else {
2225             requirements->prefersDedicatedAllocation = VK_FALSE;
2226             requirements->requiresDedicatedAllocation = VK_FALSE;
2227          }
2228          break;
2229       }
2230 
2231       default:
2232          anv_debug_ignored_stype(ext->sType);
2233          break;
2234       }
2235    }
2236 }
2237 
anv_GetImageSparseMemoryRequirements(VkDevice device,VkImage image,uint32_t * pSparseMemoryRequirementCount,VkSparseImageMemoryRequirements * pSparseMemoryRequirements)2238 void anv_GetImageSparseMemoryRequirements(
2239     VkDevice                                    device,
2240     VkImage                                     image,
2241     uint32_t*                                   pSparseMemoryRequirementCount,
2242     VkSparseImageMemoryRequirements*            pSparseMemoryRequirements)
2243 {
2244    *pSparseMemoryRequirementCount = 0;
2245 }
2246 
anv_GetImageSparseMemoryRequirements2KHR(VkDevice device,const VkImageSparseMemoryRequirementsInfo2KHR * pInfo,uint32_t * pSparseMemoryRequirementCount,VkSparseImageMemoryRequirements2KHR * pSparseMemoryRequirements)2247 void anv_GetImageSparseMemoryRequirements2KHR(
2248     VkDevice                                    device,
2249     const VkImageSparseMemoryRequirementsInfo2KHR* pInfo,
2250     uint32_t*                                   pSparseMemoryRequirementCount,
2251     VkSparseImageMemoryRequirements2KHR*        pSparseMemoryRequirements)
2252 {
2253    *pSparseMemoryRequirementCount = 0;
2254 }
2255 
anv_GetDeviceMemoryCommitment(VkDevice device,VkDeviceMemory memory,VkDeviceSize * pCommittedMemoryInBytes)2256 void anv_GetDeviceMemoryCommitment(
2257     VkDevice                                    device,
2258     VkDeviceMemory                              memory,
2259     VkDeviceSize*                               pCommittedMemoryInBytes)
2260 {
2261    *pCommittedMemoryInBytes = 0;
2262 }
2263 
2264 static void
anv_bind_buffer_memory(const VkBindBufferMemoryInfoKHR * pBindInfo)2265 anv_bind_buffer_memory(const VkBindBufferMemoryInfoKHR *pBindInfo)
2266 {
2267    ANV_FROM_HANDLE(anv_device_memory, mem, pBindInfo->memory);
2268    ANV_FROM_HANDLE(anv_buffer, buffer, pBindInfo->buffer);
2269 
2270    assert(pBindInfo->sType == VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR);
2271 
2272    if (mem) {
2273       assert((buffer->usage & mem->type->valid_buffer_usage) == buffer->usage);
2274       buffer->bo = mem->bo;
2275       buffer->offset = pBindInfo->memoryOffset;
2276    } else {
2277       buffer->bo = NULL;
2278       buffer->offset = 0;
2279    }
2280 }
2281 
anv_BindBufferMemory(VkDevice device,VkBuffer buffer,VkDeviceMemory memory,VkDeviceSize memoryOffset)2282 VkResult anv_BindBufferMemory(
2283     VkDevice                                    device,
2284     VkBuffer                                    buffer,
2285     VkDeviceMemory                              memory,
2286     VkDeviceSize                                memoryOffset)
2287 {
2288    anv_bind_buffer_memory(
2289       &(VkBindBufferMemoryInfoKHR) {
2290          .sType         = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR,
2291          .buffer        = buffer,
2292          .memory        = memory,
2293          .memoryOffset  = memoryOffset,
2294       });
2295 
2296    return VK_SUCCESS;
2297 }
2298 
anv_BindBufferMemory2KHR(VkDevice device,uint32_t bindInfoCount,const VkBindBufferMemoryInfoKHR * pBindInfos)2299 VkResult anv_BindBufferMemory2KHR(
2300     VkDevice                                    device,
2301     uint32_t                                    bindInfoCount,
2302     const VkBindBufferMemoryInfoKHR*            pBindInfos)
2303 {
2304    for (uint32_t i = 0; i < bindInfoCount; i++)
2305       anv_bind_buffer_memory(&pBindInfos[i]);
2306 
2307    return VK_SUCCESS;
2308 }
2309 
anv_QueueBindSparse(VkQueue _queue,uint32_t bindInfoCount,const VkBindSparseInfo * pBindInfo,VkFence fence)2310 VkResult anv_QueueBindSparse(
2311     VkQueue                                     _queue,
2312     uint32_t                                    bindInfoCount,
2313     const VkBindSparseInfo*                     pBindInfo,
2314     VkFence                                     fence)
2315 {
2316    ANV_FROM_HANDLE(anv_queue, queue, _queue);
2317    if (unlikely(queue->device->lost))
2318       return VK_ERROR_DEVICE_LOST;
2319 
2320    return vk_error(VK_ERROR_FEATURE_NOT_PRESENT);
2321 }
2322 
2323 // Event functions
2324 
anv_CreateEvent(VkDevice _device,const VkEventCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkEvent * pEvent)2325 VkResult anv_CreateEvent(
2326     VkDevice                                    _device,
2327     const VkEventCreateInfo*                    pCreateInfo,
2328     const VkAllocationCallbacks*                pAllocator,
2329     VkEvent*                                    pEvent)
2330 {
2331    ANV_FROM_HANDLE(anv_device, device, _device);
2332    struct anv_state state;
2333    struct anv_event *event;
2334 
2335    assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_EVENT_CREATE_INFO);
2336 
2337    state = anv_state_pool_alloc(&device->dynamic_state_pool,
2338                                 sizeof(*event), 8);
2339    event = state.map;
2340    event->state = state;
2341    event->semaphore = VK_EVENT_RESET;
2342 
2343    if (!device->info.has_llc) {
2344       /* Make sure the writes we're flushing have landed. */
2345       __builtin_ia32_mfence();
2346       __builtin_ia32_clflush(event);
2347    }
2348 
2349    *pEvent = anv_event_to_handle(event);
2350 
2351    return VK_SUCCESS;
2352 }
2353 
anv_DestroyEvent(VkDevice _device,VkEvent _event,const VkAllocationCallbacks * pAllocator)2354 void anv_DestroyEvent(
2355     VkDevice                                    _device,
2356     VkEvent                                     _event,
2357     const VkAllocationCallbacks*                pAllocator)
2358 {
2359    ANV_FROM_HANDLE(anv_device, device, _device);
2360    ANV_FROM_HANDLE(anv_event, event, _event);
2361 
2362    if (!event)
2363       return;
2364 
2365    anv_state_pool_free(&device->dynamic_state_pool, event->state);
2366 }
2367 
anv_GetEventStatus(VkDevice _device,VkEvent _event)2368 VkResult anv_GetEventStatus(
2369     VkDevice                                    _device,
2370     VkEvent                                     _event)
2371 {
2372    ANV_FROM_HANDLE(anv_device, device, _device);
2373    ANV_FROM_HANDLE(anv_event, event, _event);
2374 
2375    if (unlikely(device->lost))
2376       return VK_ERROR_DEVICE_LOST;
2377 
2378    if (!device->info.has_llc) {
2379       /* Invalidate read cache before reading event written by GPU. */
2380       __builtin_ia32_clflush(event);
2381       __builtin_ia32_mfence();
2382 
2383    }
2384 
2385    return event->semaphore;
2386 }
2387 
anv_SetEvent(VkDevice _device,VkEvent _event)2388 VkResult anv_SetEvent(
2389     VkDevice                                    _device,
2390     VkEvent                                     _event)
2391 {
2392    ANV_FROM_HANDLE(anv_device, device, _device);
2393    ANV_FROM_HANDLE(anv_event, event, _event);
2394 
2395    event->semaphore = VK_EVENT_SET;
2396 
2397    if (!device->info.has_llc) {
2398       /* Make sure the writes we're flushing have landed. */
2399       __builtin_ia32_mfence();
2400       __builtin_ia32_clflush(event);
2401    }
2402 
2403    return VK_SUCCESS;
2404 }
2405 
anv_ResetEvent(VkDevice _device,VkEvent _event)2406 VkResult anv_ResetEvent(
2407     VkDevice                                    _device,
2408     VkEvent                                     _event)
2409 {
2410    ANV_FROM_HANDLE(anv_device, device, _device);
2411    ANV_FROM_HANDLE(anv_event, event, _event);
2412 
2413    event->semaphore = VK_EVENT_RESET;
2414 
2415    if (!device->info.has_llc) {
2416       /* Make sure the writes we're flushing have landed. */
2417       __builtin_ia32_mfence();
2418       __builtin_ia32_clflush(event);
2419    }
2420 
2421    return VK_SUCCESS;
2422 }
2423 
2424 // Buffer functions
2425 
anv_CreateBuffer(VkDevice _device,const VkBufferCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkBuffer * pBuffer)2426 VkResult anv_CreateBuffer(
2427     VkDevice                                    _device,
2428     const VkBufferCreateInfo*                   pCreateInfo,
2429     const VkAllocationCallbacks*                pAllocator,
2430     VkBuffer*                                   pBuffer)
2431 {
2432    ANV_FROM_HANDLE(anv_device, device, _device);
2433    struct anv_buffer *buffer;
2434 
2435    assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
2436 
2437    buffer = vk_alloc2(&device->alloc, pAllocator, sizeof(*buffer), 8,
2438                        VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2439    if (buffer == NULL)
2440       return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
2441 
2442    buffer->size = pCreateInfo->size;
2443    buffer->usage = pCreateInfo->usage;
2444    buffer->bo = NULL;
2445    buffer->offset = 0;
2446 
2447    *pBuffer = anv_buffer_to_handle(buffer);
2448 
2449    return VK_SUCCESS;
2450 }
2451 
anv_DestroyBuffer(VkDevice _device,VkBuffer _buffer,const VkAllocationCallbacks * pAllocator)2452 void anv_DestroyBuffer(
2453     VkDevice                                    _device,
2454     VkBuffer                                    _buffer,
2455     const VkAllocationCallbacks*                pAllocator)
2456 {
2457    ANV_FROM_HANDLE(anv_device, device, _device);
2458    ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
2459 
2460    if (!buffer)
2461       return;
2462 
2463    vk_free2(&device->alloc, pAllocator, buffer);
2464 }
2465 
2466 void
anv_fill_buffer_surface_state(struct anv_device * device,struct anv_state state,enum isl_format format,uint32_t offset,uint32_t range,uint32_t stride)2467 anv_fill_buffer_surface_state(struct anv_device *device, struct anv_state state,
2468                               enum isl_format format,
2469                               uint32_t offset, uint32_t range, uint32_t stride)
2470 {
2471    isl_buffer_fill_state(&device->isl_dev, state.map,
2472                          .address = offset,
2473                          .mocs = device->default_mocs,
2474                          .size = range,
2475                          .format = format,
2476                          .stride = stride);
2477 
2478    anv_state_flush(device, state);
2479 }
2480 
anv_DestroySampler(VkDevice _device,VkSampler _sampler,const VkAllocationCallbacks * pAllocator)2481 void anv_DestroySampler(
2482     VkDevice                                    _device,
2483     VkSampler                                   _sampler,
2484     const VkAllocationCallbacks*                pAllocator)
2485 {
2486    ANV_FROM_HANDLE(anv_device, device, _device);
2487    ANV_FROM_HANDLE(anv_sampler, sampler, _sampler);
2488 
2489    if (!sampler)
2490       return;
2491 
2492    vk_free2(&device->alloc, pAllocator, sampler);
2493 }
2494 
anv_CreateFramebuffer(VkDevice _device,const VkFramebufferCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkFramebuffer * pFramebuffer)2495 VkResult anv_CreateFramebuffer(
2496     VkDevice                                    _device,
2497     const VkFramebufferCreateInfo*              pCreateInfo,
2498     const VkAllocationCallbacks*                pAllocator,
2499     VkFramebuffer*                              pFramebuffer)
2500 {
2501    ANV_FROM_HANDLE(anv_device, device, _device);
2502    struct anv_framebuffer *framebuffer;
2503 
2504    assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
2505 
2506    size_t size = sizeof(*framebuffer) +
2507                  sizeof(struct anv_image_view *) * pCreateInfo->attachmentCount;
2508    framebuffer = vk_alloc2(&device->alloc, pAllocator, size, 8,
2509                             VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2510    if (framebuffer == NULL)
2511       return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
2512 
2513    framebuffer->attachment_count = pCreateInfo->attachmentCount;
2514    for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
2515       VkImageView _iview = pCreateInfo->pAttachments[i];
2516       framebuffer->attachments[i] = anv_image_view_from_handle(_iview);
2517    }
2518 
2519    framebuffer->width = pCreateInfo->width;
2520    framebuffer->height = pCreateInfo->height;
2521    framebuffer->layers = pCreateInfo->layers;
2522 
2523    *pFramebuffer = anv_framebuffer_to_handle(framebuffer);
2524 
2525    return VK_SUCCESS;
2526 }
2527 
anv_DestroyFramebuffer(VkDevice _device,VkFramebuffer _fb,const VkAllocationCallbacks * pAllocator)2528 void anv_DestroyFramebuffer(
2529     VkDevice                                    _device,
2530     VkFramebuffer                               _fb,
2531     const VkAllocationCallbacks*                pAllocator)
2532 {
2533    ANV_FROM_HANDLE(anv_device, device, _device);
2534    ANV_FROM_HANDLE(anv_framebuffer, fb, _fb);
2535 
2536    if (!fb)
2537       return;
2538 
2539    vk_free2(&device->alloc, pAllocator, fb);
2540 }
2541 
2542 /* vk_icd.h does not declare this function, so we declare it here to
2543  * suppress Wmissing-prototypes.
2544  */
2545 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
2546 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t* pSupportedVersion);
2547 
2548 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t * pSupportedVersion)2549 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t* pSupportedVersion)
2550 {
2551    /* For the full details on loader interface versioning, see
2552     * <https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/blob/master/loader/LoaderAndLayerInterface.md>.
2553     * What follows is a condensed summary, to help you navigate the large and
2554     * confusing official doc.
2555     *
2556     *   - Loader interface v0 is incompatible with later versions. We don't
2557     *     support it.
2558     *
2559     *   - In loader interface v1:
2560     *       - The first ICD entrypoint called by the loader is
2561     *         vk_icdGetInstanceProcAddr(). The ICD must statically expose this
2562     *         entrypoint.
2563     *       - The ICD must statically expose no other Vulkan symbol unless it is
2564     *         linked with -Bsymbolic.
2565     *       - Each dispatchable Vulkan handle created by the ICD must be
2566     *         a pointer to a struct whose first member is VK_LOADER_DATA. The
2567     *         ICD must initialize VK_LOADER_DATA.loadMagic to ICD_LOADER_MAGIC.
2568     *       - The loader implements vkCreate{PLATFORM}SurfaceKHR() and
2569     *         vkDestroySurfaceKHR(). The ICD must be capable of working with
2570     *         such loader-managed surfaces.
2571     *
2572     *    - Loader interface v2 differs from v1 in:
2573     *       - The first ICD entrypoint called by the loader is
2574     *         vk_icdNegotiateLoaderICDInterfaceVersion(). The ICD must
2575     *         statically expose this entrypoint.
2576     *
2577     *    - Loader interface v3 differs from v2 in:
2578     *        - The ICD must implement vkCreate{PLATFORM}SurfaceKHR(),
2579     *          vkDestroySurfaceKHR(), and other API which uses VKSurfaceKHR,
2580     *          because the loader no longer does so.
2581     */
2582    *pSupportedVersion = MIN2(*pSupportedVersion, 3u);
2583    return VK_SUCCESS;
2584 }
2585