• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2019 Red Hat.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #pragma once
25 #include <stdlib.h>
26 #include <stdio.h>
27 #include <stdbool.h>
28 #include <string.h>
29 #include <assert.h>
30 #include <stdint.h>
31 
32 #include <llvm/Config/llvm-config.h>
33 
34 #include "util/macros.h"
35 #include "util/list.h"
36 #include "util/u_dynarray.h"
37 #include "util/simple_mtx.h"
38 #include "util/u_queue.h"
39 #include "util/u_upload_mgr.h"
40 
41 #include "compiler/shader_enums.h"
42 #include "pipe/p_screen.h"
43 #include "pipe/p_state.h"
44 #include "cso_cache/cso_context.h"
45 #include "nir.h"
46 
47 /* Pre-declarations needed for WSI entrypoints */
48 struct wl_surface;
49 struct wl_display;
50 typedef struct xcb_connection_t xcb_connection_t;
51 typedef uint32_t xcb_visualid_t;
52 typedef uint32_t xcb_window_t;
53 
54 #define VK_PROTOTYPES
55 #include <vulkan/vulkan.h>
56 #include <vulkan/vk_icd.h>
57 
58 #include "lvp_entrypoints.h"
59 #include "vk_buffer.h"
60 #include "vk_buffer_view.h"
61 #include "vk_device.h"
62 #include "vk_instance.h"
63 #include "vk_image.h"
64 #include "vk_log.h"
65 #include "vk_physical_device.h"
66 #include "vk_shader_module.h"
67 #include "vk_util.h"
68 #include "vk_format.h"
69 #include "vk_cmd_queue.h"
70 #include "vk_command_buffer.h"
71 #include "vk_command_pool.h"
72 #include "vk_descriptor_set_layout.h"
73 #include "vk_graphics_state.h"
74 #include "vk_pipeline_layout.h"
75 #include "vk_queue.h"
76 #include "vk_sampler.h"
77 #include "vk_sync.h"
78 #include "vk_sync_timeline.h"
79 #include "vk_ycbcr_conversion.h"
80 #include "lp_jit.h"
81 
82 #include "wsi_common.h"
83 
84 #include <assert.h>
85 #ifdef __cplusplus
86 extern "C" {
87 #endif
88 
89 #define MAX_SETS         8
90 #define MAX_DESCRIPTORS 1000000 /* Required by vkd3d-proton */
91 #define MAX_PUSH_CONSTANTS_SIZE 256
92 #define MAX_PUSH_DESCRIPTORS 32
93 #define MAX_DESCRIPTOR_UNIFORM_BLOCK_SIZE 4096
94 #define MAX_PER_STAGE_DESCRIPTOR_UNIFORM_BLOCKS 8
95 #define MAX_DGC_STREAMS 16
96 #define MAX_DGC_TOKENS 16
97 
98 #ifdef _WIN32
99 #define lvp_printflike(a, b)
100 #else
101 #define lvp_printflike(a, b) __attribute__((__format__(__printf__, a, b)))
102 #endif
103 
104 #define LVP_DEBUG_ALL_ENTRYPOINTS (1 << 0)
105 
106 void __lvp_finishme(const char *file, int line, const char *format, ...)
107    lvp_printflike(3, 4);
108 
109 #define lvp_finishme(format, ...) \
110    __lvp_finishme(__FILE__, __LINE__, format, ##__VA_ARGS__);
111 
112 #define stub_return(v) \
113    do { \
114       lvp_finishme("stub %s", __func__); \
115       return (v); \
116    } while (0)
117 
118 #define stub() \
119    do { \
120       lvp_finishme("stub %s", __func__); \
121       return; \
122    } while (0)
123 
124 #define LVP_SHADER_STAGES (MESA_SHADER_MESH + 1)
125 #define LVP_STAGE_MASK BITFIELD_MASK(LVP_SHADER_STAGES)
126 #define LVP_STAGE_MASK_GFX (BITFIELD_MASK(LVP_SHADER_STAGES) & ~BITFIELD_BIT(MESA_SHADER_COMPUTE))
127 
128 #define lvp_foreach_stage(stage, stage_bits)                         \
129    for (gl_shader_stage stage,                                       \
130         __tmp = (gl_shader_stage)((stage_bits) & LVP_STAGE_MASK);    \
131         stage = ffs(__tmp) - 1, __tmp;                     \
132         __tmp &= ~(1 << (stage)))
133 
134 #define lvp_forall_stage(stage)                                      \
135    for (gl_shader_stage stage = MESA_SHADER_VERTEX; stage < LVP_SHADER_STAGES; stage++)
136 
137 #define lvp_forall_gfx_stage(stage)                                  \
138    for (gl_shader_stage stage,                                       \
139            __tmp = (gl_shader_stage)(LVP_STAGE_MASK_GFX);            \
140         stage = ffs(__tmp) - 1, __tmp;                               \
141         __tmp &= ~(1 << (stage)))
142 
143 struct lvp_physical_device {
144    struct vk_physical_device vk;
145 
146    struct pipe_loader_device *pld;
147    struct pipe_screen *pscreen;
148    const nir_shader_compiler_options *drv_options[LVP_SHADER_STAGES];
149    uint32_t max_images;
150 
151    struct vk_sync_timeline_type sync_timeline_type;
152    const struct vk_sync_type *sync_types[3];
153 
154    struct wsi_device                       wsi_device;
155 };
156 
157 struct lvp_instance {
158    struct vk_instance vk;
159 
160    uint32_t apiVersion;
161 
162    uint64_t debug_flags;
163 
164    struct pipe_loader_device *devs;
165    int num_devices;
166 };
167 
168 VkResult lvp_init_wsi(struct lvp_physical_device *physical_device);
169 void lvp_finish_wsi(struct lvp_physical_device *physical_device);
170 
171 bool lvp_physical_device_extension_supported(struct lvp_physical_device *dev,
172                                               const char *name);
173 
174 struct lvp_queue {
175    struct vk_queue vk;
176    struct lvp_device *                         device;
177    struct pipe_context *ctx;
178    struct cso_context *cso;
179    struct u_upload_mgr *uploader;
180    struct pipe_fence_handle *last_fence;
181    void *state;
182    struct util_dynarray pipeline_destroys;
183    simple_mtx_t lock;
184 };
185 
186 struct lvp_pipeline_cache {
187    struct vk_object_base                        base;
188    struct lvp_device *                          device;
189    VkAllocationCallbacks                        alloc;
190 };
191 
192 struct lvp_device {
193    struct vk_device vk;
194 
195    struct lvp_queue queue;
196    struct lvp_instance *                       instance;
197    struct lvp_physical_device *physical_device;
198    struct pipe_screen *pscreen;
199    void *noop_fs;
200    simple_mtx_t bda_lock;
201    struct hash_table bda;
202    struct pipe_resource *zero_buffer; /* for zeroed bda */
203    bool poison_mem;
204    bool print_cmds;
205 
206    struct lp_texture_handle *null_texture_handle;
207    struct lp_texture_handle *null_image_handle;
208    struct util_dynarray bda_texture_handles;
209    struct util_dynarray bda_image_handles;
210 };
211 
212 void lvp_device_get_cache_uuid(void *uuid);
213 
214 enum lvp_device_memory_type {
215    LVP_DEVICE_MEMORY_TYPE_DEFAULT,
216    LVP_DEVICE_MEMORY_TYPE_USER_PTR,
217    LVP_DEVICE_MEMORY_TYPE_OPAQUE_FD,
218 };
219 
220 struct lvp_device_memory {
221    struct vk_object_base base;
222    struct pipe_memory_allocation *pmem;
223    uint32_t                                     type_index;
224    VkDeviceSize                                 map_size;
225    VkDeviceSize                                 size;
226    void *                                       map;
227    enum lvp_device_memory_type memory_type;
228    int                                          backed_fd;
229 };
230 
231 struct lvp_pipe_sync {
232    struct vk_sync base;
233 
234    mtx_t lock;
235    cnd_t changed;
236 
237    bool signaled;
238    struct pipe_fence_handle *fence;
239 };
240 
241 extern const struct vk_sync_type lvp_pipe_sync_type;
242 
243 void lvp_pipe_sync_signal_with_fence(struct lvp_device *device,
244                                      struct lvp_pipe_sync *sync,
245                                      struct pipe_fence_handle *fence);
246 
247 static inline struct lvp_pipe_sync *
vk_sync_as_lvp_pipe_sync(struct vk_sync * sync)248 vk_sync_as_lvp_pipe_sync(struct vk_sync *sync)
249 {
250    assert(sync->type == &lvp_pipe_sync_type);
251    return container_of(sync, struct lvp_pipe_sync, base);
252 }
253 
254 struct lvp_image_plane {
255    struct pipe_resource *bo;
256    struct pipe_memory_allocation *pmem;
257    VkDeviceSize plane_offset;
258    VkDeviceSize memory_offset;
259    VkDeviceSize size;
260 };
261 
262 struct lvp_image {
263    struct vk_image vk;
264    VkDeviceSize size;
265    uint32_t alignment;
266    bool disjoint;
267    uint8_t plane_count;
268    struct lvp_image_plane planes[3];
269 };
270 
271 struct lvp_image_view {
272    struct vk_image_view vk;
273    const struct lvp_image *image; /**< VkImageViewCreateInfo::image */
274 
275    enum pipe_format pformat;
276 
277    struct pipe_surface *surface; /* have we created a pipe surface for this? */
278    struct lvp_image_view *multisample; //VK_EXT_multisampled_render_to_single_sampled
279 
280    uint8_t plane_count;
281    struct {
282       unsigned image_plane;
283       struct pipe_sampler_view *sv;
284       struct pipe_image_view iv;
285       struct lp_texture_handle *texture_handle;
286       struct lp_texture_handle *image_handle;
287    } planes[3];
288 };
289 
290 struct lvp_sampler {
291    struct vk_sampler vk;
292    struct lp_descriptor desc;
293 
294    struct lp_texture_handle *texture_handle;
295 };
296 
297 struct lvp_descriptor_set_binding_layout {
298    uint32_t descriptor_index;
299    /* Number of array elements in this binding */
300    VkDescriptorType type;
301    uint32_t stride; /* used for planar samplers */
302    uint32_t array_size;
303    bool valid;
304 
305    uint32_t dynamic_index;
306 
307    uint32_t uniform_block_offset;
308    uint32_t uniform_block_size;
309 
310    /* Immutable samplers (or NULL if no immutable samplers) */
311    struct lvp_sampler **immutable_samplers;
312 };
313 
314 struct lvp_descriptor_set_layout {
315    struct vk_descriptor_set_layout vk;
316 
317    /* add new members after this */
318 
319    uint32_t immutable_sampler_count;
320 
321    /* Number of bindings in this descriptor set */
322    uint32_t binding_count;
323 
324    /* Total size of the descriptor set with room for all array entries */
325    uint32_t size;
326 
327    /* Shader stages affected by this descriptor set */
328    uint32_t shader_stages;
329 
330    /* Number of dynamic offsets used by this descriptor set */
331    uint32_t dynamic_offset_count;
332 
333    /* if this layout is comprised solely of immutable samplers, this will be a bindable set */
334    struct lvp_descriptor_set *immutable_set;
335 
336    /* Bindings in this descriptor set */
337    struct lvp_descriptor_set_binding_layout binding[0];
338 };
339 
340 static inline const struct lvp_descriptor_set_layout *
vk_to_lvp_descriptor_set_layout(const struct vk_descriptor_set_layout * layout)341 vk_to_lvp_descriptor_set_layout(const struct vk_descriptor_set_layout *layout)
342 {
343    return container_of(layout, const struct lvp_descriptor_set_layout, vk);
344 }
345 
346 struct lvp_descriptor_set {
347    struct vk_object_base base;
348    struct lvp_descriptor_set_layout *layout;
349    struct list_head link;
350 
351    /* Buffer holding the descriptors. */
352    struct pipe_memory_allocation *pmem;
353    struct pipe_resource *bo;
354    void *map;
355 };
356 
357 struct lvp_descriptor_pool {
358    struct vk_object_base base;
359    VkDescriptorPoolCreateFlags flags;
360    uint32_t max_sets;
361 
362    struct list_head sets;
363 };
364 
365 struct lvp_descriptor_update_template {
366    struct vk_object_base base;
367    unsigned ref_cnt;
368    uint32_t entry_count;
369    uint32_t set;
370    VkDescriptorUpdateTemplateType type;
371    VkPipelineBindPoint bind_point;
372    struct lvp_pipeline_layout *pipeline_layout;
373    VkDescriptorUpdateTemplateEntry entry[0];
374 };
375 
376 uint32_t lvp_descriptor_update_template_entry_size(VkDescriptorType type);
377 
378 static inline void
lvp_descriptor_template_templ_ref(struct lvp_descriptor_update_template * templ)379 lvp_descriptor_template_templ_ref(struct lvp_descriptor_update_template *templ)
380 {
381    assert(templ && templ->ref_cnt >= 1);
382    p_atomic_inc(&templ->ref_cnt);
383 }
384 
385 void
386 lvp_descriptor_template_destroy(struct lvp_device *device, struct lvp_descriptor_update_template *templ);
387 
388 static inline void
lvp_descriptor_template_templ_unref(struct lvp_device * device,struct lvp_descriptor_update_template * templ)389 lvp_descriptor_template_templ_unref(struct lvp_device *device,
390                                     struct lvp_descriptor_update_template *templ)
391 {
392    if (!templ)
393       return;
394    assert(templ->ref_cnt >= 1);
395    if (p_atomic_dec_zero(&templ->ref_cnt))
396       lvp_descriptor_template_destroy(device, templ);
397 }
398 
399 VkResult
400 lvp_descriptor_set_create(struct lvp_device *device,
401                           struct lvp_descriptor_set_layout *layout,
402                           struct lvp_descriptor_set **out_set);
403 
404 void
405 lvp_descriptor_set_destroy(struct lvp_device *device,
406                            struct lvp_descriptor_set *set);
407 
408 void
409 lvp_descriptor_set_update_with_template(VkDevice _device, VkDescriptorSet descriptorSet,
410                                         VkDescriptorUpdateTemplate descriptorUpdateTemplate,
411                                         const void *pData, bool push);
412 
413 struct lvp_pipeline_layout {
414    struct vk_pipeline_layout vk;
415 
416    uint32_t push_constant_size;
417    VkShaderStageFlags push_constant_stages;
418 };
419 
420 
421 struct lvp_pipeline_layout *
422 lvp_pipeline_layout_create(struct lvp_device *device,
423                            const VkPipelineLayoutCreateInfo*           pCreateInfo,
424                            const VkAllocationCallbacks*                pAllocator);
425 
426 struct lvp_pipeline_nir {
427    int ref_cnt;
428    nir_shader *nir;
429 };
430 
431 static inline void
lvp_pipeline_nir_ref(struct lvp_pipeline_nir ** dst,struct lvp_pipeline_nir * src)432 lvp_pipeline_nir_ref(struct lvp_pipeline_nir **dst, struct lvp_pipeline_nir *src)
433 {
434    struct lvp_pipeline_nir *old_dst = *dst;
435    if (old_dst == src || (old_dst && src && old_dst->nir == src->nir))
436       return;
437 
438    if (old_dst && p_atomic_dec_zero(&old_dst->ref_cnt)) {
439       ralloc_free(old_dst->nir);
440       ralloc_free(old_dst);
441    }
442    if (src)
443       p_atomic_inc(&src->ref_cnt);
444    *dst = src;
445 }
446 
447 struct lvp_inline_variant {
448    uint32_t mask;
449    uint32_t vals[PIPE_MAX_CONSTANT_BUFFERS][MAX_INLINABLE_UNIFORMS];
450    void *cso;
451 };
452 
453 struct lvp_shader {
454    struct vk_object_base base;
455    struct lvp_pipeline_layout *layout;
456    struct lvp_pipeline_nir *pipeline_nir;
457    struct lvp_pipeline_nir *tess_ccw;
458    void *shader_cso;
459    void *tess_ccw_cso;
460    struct {
461       uint32_t uniform_offsets[PIPE_MAX_CONSTANT_BUFFERS][MAX_INLINABLE_UNIFORMS];
462       uint8_t count[PIPE_MAX_CONSTANT_BUFFERS];
463       bool must_inline;
464       uint32_t can_inline; //bitmask
465       struct set variants;
466    } inlines;
467    struct pipe_stream_output_info stream_output;
468    struct blob blob; //preserved for GetShaderBinaryDataEXT
469 };
470 
471 enum lvp_pipeline_type {
472    LVP_PIPELINE_GRAPHICS,
473    LVP_PIPELINE_COMPUTE,
474    LVP_PIPELINE_EXEC_GRAPH,
475    LVP_PIPELINE_TYPE_COUNT,
476 };
477 
478 static inline enum lvp_pipeline_type
lvp_pipeline_type_from_bind_point(VkPipelineBindPoint bind_point)479 lvp_pipeline_type_from_bind_point(VkPipelineBindPoint bind_point)
480 {
481    switch (bind_point) {
482    case VK_PIPELINE_BIND_POINT_GRAPHICS: return LVP_PIPELINE_GRAPHICS;
483    case VK_PIPELINE_BIND_POINT_COMPUTE: return LVP_PIPELINE_COMPUTE;
484 #ifdef VK_ENABLE_BETA_EXTENSIONS
485    case VK_PIPELINE_BIND_POINT_EXECUTION_GRAPH_AMDX: return LVP_PIPELINE_EXEC_GRAPH;
486 #endif
487    default: unreachable("Unsupported VkPipelineBindPoint");
488    }
489 }
490 
491 static inline uint32_t
lvp_pipeline_types_from_shader_stages(VkShaderStageFlags stageFlags)492 lvp_pipeline_types_from_shader_stages(VkShaderStageFlags stageFlags)
493 {
494    uint32_t types = 0;
495 #ifdef VK_ENABLE_BETA_EXTENSIONS
496    if (stageFlags & MESA_VK_SHADER_STAGE_WORKGRAPH_HACK_BIT_FIXME)
497       types |= BITFIELD_BIT(LVP_PIPELINE_EXEC_GRAPH);
498 #endif
499    if (stageFlags & VK_SHADER_STAGE_COMPUTE_BIT)
500       types |= BITFIELD_BIT(LVP_PIPELINE_COMPUTE);
501    if (stageFlags & (VK_SHADER_STAGE_ALL_GRAPHICS | VK_SHADER_STAGE_MESH_BIT_EXT | VK_SHADER_STAGE_TASK_BIT_EXT))
502       types |= BITFIELD_BIT(LVP_PIPELINE_GRAPHICS);
503    return types;
504 }
505 
506 struct lvp_pipeline {
507    struct vk_object_base base;
508    struct lvp_device *                          device;
509    struct lvp_pipeline_layout *                 layout;
510 
511    enum lvp_pipeline_type type;
512 
513    void *state_data;
514    bool force_min_sample;
515    struct lvp_shader shaders[LVP_SHADER_STAGES];
516    gl_shader_stage last_vertex;
517    struct vk_graphics_pipeline_state graphics_state;
518    VkGraphicsPipelineLibraryFlagsEXT stages;
519    bool line_smooth;
520    bool disable_multisample;
521    bool line_rectangular;
522    bool library;
523    bool compiled;
524    bool used;
525 
526    struct {
527       const char *name;
528       const char *next_name;
529       uint32_t index;
530       uint32_t scratch_size;
531    } exec_graph;
532 
533    unsigned num_groups;
534    unsigned num_groups_total;
535    VkPipeline groups[0];
536 };
537 
538 /* Minimum requirement by the spec. */
539 #define LVP_MAX_EXEC_GRAPH_PAYLOADS 256
540 
541 struct lvp_exec_graph_shader_output {
542    uint32_t payload_count;
543    uint32_t node_index;
544 };
545 
546 struct lvp_exec_graph_internal_data {
547    /* inputs */
548    void *payload_in;
549    void *payloads;
550    /* outputs */
551    struct lvp_exec_graph_shader_output outputs[LVP_MAX_EXEC_GRAPH_PAYLOADS];
552 };
553 
554 bool
555 lvp_lower_exec_graph(struct lvp_pipeline *pipeline, nir_shader *nir);
556 
557 void
558 lvp_pipeline_shaders_compile(struct lvp_pipeline *pipeline, bool locked);
559 
560 struct lvp_event {
561    struct vk_object_base base;
562    volatile uint64_t event_storage;
563 };
564 
565 struct lvp_buffer {
566    struct vk_buffer vk;
567 
568    struct pipe_memory_allocation *pmem;
569    struct pipe_resource *bo;
570    uint64_t total_size;
571    uint64_t offset;
572 };
573 
574 struct lvp_buffer_view {
575    struct vk_buffer_view vk;
576    enum pipe_format pformat;
577    struct pipe_sampler_view *sv;
578    struct pipe_image_view iv;
579 
580    struct lp_texture_handle *texture_handle;
581    struct lp_texture_handle *image_handle;
582 };
583 
584 struct lvp_query_pool {
585    struct vk_object_base base;
586    VkQueryType type;
587    uint32_t count;
588    VkQueryPipelineStatisticFlags pipeline_stats;
589    enum pipe_query_type base_type;
590    struct pipe_query *queries[0];
591 };
592 
593 struct lvp_cmd_buffer {
594    struct vk_command_buffer vk;
595 
596    struct lvp_device *                          device;
597 
598    uint8_t push_constants[MAX_PUSH_CONSTANTS_SIZE];
599 };
600 
601 struct lvp_indirect_command_layout {
602    struct vk_object_base base;
603    uint8_t stream_count;
604    uint8_t token_count;
605    uint16_t stream_strides[MAX_DGC_STREAMS];
606    VkPipelineBindPoint bind_point;
607    VkIndirectCommandsLayoutUsageFlagsNV flags;
608    VkIndirectCommandsLayoutTokenNV tokens[0];
609 };
610 
611 extern const struct vk_command_buffer_ops lvp_cmd_buffer_ops;
612 
613 static inline const struct lvp_descriptor_set_layout *
get_set_layout(const struct lvp_pipeline_layout * layout,uint32_t set)614 get_set_layout(const struct lvp_pipeline_layout *layout, uint32_t set)
615 {
616    return container_of(layout->vk.set_layouts[set],
617                        const struct lvp_descriptor_set_layout, vk);
618 }
619 
620 static inline const struct lvp_descriptor_set_binding_layout *
get_binding_layout(const struct lvp_pipeline_layout * layout,uint32_t set,uint32_t binding)621 get_binding_layout(const struct lvp_pipeline_layout *layout,
622                    uint32_t set, uint32_t binding)
623 {
624    return &get_set_layout(layout, set)->binding[binding];
625 }
626 
627 #define LVP_FROM_HANDLE(__lvp_type, __name, __handle) \
628    struct __lvp_type *__name = __lvp_type ## _from_handle(__handle)
629 
630 VK_DEFINE_HANDLE_CASTS(lvp_cmd_buffer, vk.base, VkCommandBuffer,
631                        VK_OBJECT_TYPE_COMMAND_BUFFER)
632 VK_DEFINE_HANDLE_CASTS(lvp_device, vk.base, VkDevice, VK_OBJECT_TYPE_DEVICE)
633 VK_DEFINE_HANDLE_CASTS(lvp_instance, vk.base, VkInstance, VK_OBJECT_TYPE_INSTANCE)
634 VK_DEFINE_HANDLE_CASTS(lvp_physical_device, vk.base, VkPhysicalDevice,
635                        VK_OBJECT_TYPE_PHYSICAL_DEVICE)
636 VK_DEFINE_HANDLE_CASTS(lvp_queue, vk.base, VkQueue, VK_OBJECT_TYPE_QUEUE)
637 
638 VK_DEFINE_NONDISP_HANDLE_CASTS(lvp_buffer, vk.base, VkBuffer,
639                                VK_OBJECT_TYPE_BUFFER)
640 VK_DEFINE_NONDISP_HANDLE_CASTS(lvp_buffer_view, vk.base, VkBufferView,
641                                VK_OBJECT_TYPE_BUFFER_VIEW)
642 VK_DEFINE_NONDISP_HANDLE_CASTS(lvp_descriptor_pool, base, VkDescriptorPool,
643                                VK_OBJECT_TYPE_DESCRIPTOR_POOL)
644 VK_DEFINE_NONDISP_HANDLE_CASTS(lvp_descriptor_set, base, VkDescriptorSet,
645                                VK_OBJECT_TYPE_DESCRIPTOR_SET)
646 VK_DEFINE_NONDISP_HANDLE_CASTS(lvp_descriptor_set_layout, vk.base, VkDescriptorSetLayout,
647                                VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT)
648 VK_DEFINE_NONDISP_HANDLE_CASTS(lvp_descriptor_update_template, base, VkDescriptorUpdateTemplate,
649                                VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE)
650 VK_DEFINE_NONDISP_HANDLE_CASTS(lvp_device_memory, base, VkDeviceMemory,
651                                VK_OBJECT_TYPE_DEVICE_MEMORY)
652 VK_DEFINE_NONDISP_HANDLE_CASTS(lvp_event, base, VkEvent, VK_OBJECT_TYPE_EVENT)
653 VK_DEFINE_NONDISP_HANDLE_CASTS(lvp_image, vk.base, VkImage, VK_OBJECT_TYPE_IMAGE)
654 VK_DEFINE_NONDISP_HANDLE_CASTS(lvp_image_view, vk.base, VkImageView,
655                                VK_OBJECT_TYPE_IMAGE_VIEW);
656 VK_DEFINE_NONDISP_HANDLE_CASTS(lvp_pipeline_cache, base, VkPipelineCache,
657                                VK_OBJECT_TYPE_PIPELINE_CACHE)
658 VK_DEFINE_NONDISP_HANDLE_CASTS(lvp_pipeline, base, VkPipeline,
659                                VK_OBJECT_TYPE_PIPELINE)
660 VK_DEFINE_NONDISP_HANDLE_CASTS(lvp_shader, base, VkShaderEXT,
661                                VK_OBJECT_TYPE_SHADER_EXT)
662 VK_DEFINE_NONDISP_HANDLE_CASTS(lvp_pipeline_layout, vk.base, VkPipelineLayout,
663                                VK_OBJECT_TYPE_PIPELINE_LAYOUT)
664 VK_DEFINE_NONDISP_HANDLE_CASTS(lvp_query_pool, base, VkQueryPool,
665                                VK_OBJECT_TYPE_QUERY_POOL)
666 VK_DEFINE_NONDISP_HANDLE_CASTS(lvp_sampler, vk.base, VkSampler,
667                                VK_OBJECT_TYPE_SAMPLER)
668 VK_DEFINE_NONDISP_HANDLE_CASTS(lvp_indirect_command_layout, base, VkIndirectCommandsLayoutNV,
669                                VK_OBJECT_TYPE_INDIRECT_COMMANDS_LAYOUT_NV)
670 
671 void lvp_add_enqueue_cmd_entrypoints(struct vk_device_dispatch_table *disp);
672 
673 VkResult lvp_execute_cmds(struct lvp_device *device,
674                           struct lvp_queue *queue,
675                           struct lvp_cmd_buffer *cmd_buffer);
676 size_t
677 lvp_get_rendering_state_size(void);
678 struct lvp_image *lvp_swapchain_get_image(VkSwapchainKHR swapchain,
679                                           uint32_t index);
680 
681 static inline enum pipe_format
lvp_vk_format_to_pipe_format(VkFormat format)682 lvp_vk_format_to_pipe_format(VkFormat format)
683 {
684    /* Some formats cause problems with CTS right now.*/
685    if (format == VK_FORMAT_R4G4B4A4_UNORM_PACK16 ||
686        format == VK_FORMAT_R8_SRGB ||
687        format == VK_FORMAT_R8G8_SRGB ||
688        format == VK_FORMAT_R64G64B64A64_SFLOAT ||
689        format == VK_FORMAT_R64_SFLOAT ||
690        format == VK_FORMAT_R64G64_SFLOAT ||
691        format == VK_FORMAT_R64G64B64_SFLOAT ||
692        format == VK_FORMAT_A2R10G10B10_SINT_PACK32 ||
693        format == VK_FORMAT_A2B10G10R10_SINT_PACK32 ||
694        format == VK_FORMAT_D16_UNORM_S8_UINT)
695       return PIPE_FORMAT_NONE;
696 
697    return vk_format_to_pipe_format(format);
698 }
699 
700 static inline uint8_t
lvp_image_aspects_to_plane(ASSERTED const struct lvp_image * image,VkImageAspectFlags aspectMask)701 lvp_image_aspects_to_plane(ASSERTED const struct lvp_image *image,
702                            VkImageAspectFlags aspectMask)
703 {
704    /* Verify that the aspects are actually in the image */
705    assert(!(aspectMask & ~image->vk.aspects));
706 
707    /* Must only be one aspect unless it's depth/stencil */
708    assert(aspectMask == (VK_IMAGE_ASPECT_DEPTH_BIT |
709                          VK_IMAGE_ASPECT_STENCIL_BIT) ||
710           util_bitcount(aspectMask) == 1);
711 
712    switch(aspectMask) {
713    case VK_IMAGE_ASPECT_PLANE_1_BIT: return 1;
714    case VK_IMAGE_ASPECT_PLANE_2_BIT: return 2;
715    default: return 0;
716    }
717 }
718 
719 void
720 lvp_pipeline_destroy(struct lvp_device *device, struct lvp_pipeline *pipeline, bool locked);
721 
722 void
723 queue_thread_noop(void *data, void *gdata, int thread_index);
724 
725 void
726 lvp_shader_optimize(nir_shader *nir);
727 bool
728 lvp_find_inlinable_uniforms(struct lvp_shader *shader, nir_shader *nir);
729 void
730 lvp_inline_uniforms(nir_shader *nir, const struct lvp_shader *shader, const uint32_t *uniform_values, uint32_t ubo);
731 void *
732 lvp_shader_compile(struct lvp_device *device, struct lvp_shader *shader, nir_shader *nir, bool locked);
733 enum vk_cmd_type
734 lvp_nv_dgc_token_to_cmd_type(const VkIndirectCommandsLayoutTokenNV *token);
735 #ifdef __cplusplus
736 }
737 #endif
738