• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #ifndef ANV_PRIVATE_H
25 #define ANV_PRIVATE_H
26 
27 #include <stdlib.h>
28 #include <stdio.h>
29 #include <stdbool.h>
30 #include <pthread.h>
31 #include <assert.h>
32 #include <stdint.h>
33 #include <i915_drm.h>
34 
35 #ifdef HAVE_VALGRIND
36 #include <valgrind.h>
37 #include <memcheck.h>
38 #define VG(x) x
39 #define __gen_validate_value(x) VALGRIND_CHECK_MEM_IS_DEFINED(&(x), sizeof(x))
40 #else
41 #define VG(x)
42 #endif
43 
44 #include "common/gen_clflush.h"
45 #include "common/gen_device_info.h"
46 #include "blorp/blorp.h"
47 #include "compiler/brw_compiler.h"
48 #include "util/macros.h"
49 #include "util/list.h"
50 #include "util/u_atomic.h"
51 #include "util/u_vector.h"
52 #include "vk_alloc.h"
53 #include "vk_debug_report.h"
54 
55 /* Pre-declarations needed for WSI entrypoints */
56 struct wl_surface;
57 struct wl_display;
58 typedef struct xcb_connection_t xcb_connection_t;
59 typedef uint32_t xcb_visualid_t;
60 typedef uint32_t xcb_window_t;
61 
62 struct anv_buffer;
63 struct anv_buffer_view;
64 struct anv_image_view;
65 struct anv_instance;
66 
67 struct gen_l3_config;
68 
69 #include <vulkan/vulkan.h>
70 #include <vulkan/vulkan_intel.h>
71 #include <vulkan/vk_icd.h>
72 #include <vulkan/vk_android_native_buffer.h>
73 
74 #include "anv_entrypoints.h"
75 #include "anv_extensions.h"
76 #include "isl/isl.h"
77 
78 #include "common/gen_debug.h"
79 #include "common/intel_log.h"
80 #include "wsi_common.h"
81 
82 /* Allowing different clear colors requires us to perform a depth resolve at
83  * the end of certain render passes. This is because while slow clears store
84  * the clear color in the HiZ buffer, fast clears (without a resolve) don't.
85  * See the PRMs for examples describing when additional resolves would be
86  * necessary. To enable fast clears without requiring extra resolves, we set
87  * the clear value to a globally-defined one. We could allow different values
88  * if the user doesn't expect coherent data during or after a render passes
89  * (VK_ATTACHMENT_STORE_OP_DONT_CARE), but such users (aside from the CTS)
90  * don't seem to exist yet. In almost all Vulkan applications tested thus far,
91  * 1.0f seems to be the only value used. The only application that doesn't set
92  * this value does so through the usage of an seemingly uninitialized clear
93  * value.
94  */
95 #define ANV_HZ_FC_VAL 1.0f
96 
97 #define MAX_VBS         28
98 #define MAX_SETS         8
99 #define MAX_RTS          8
100 #define MAX_VIEWPORTS   16
101 #define MAX_SCISSORS    16
102 #define MAX_PUSH_CONSTANTS_SIZE 128
103 #define MAX_DYNAMIC_BUFFERS 16
104 #define MAX_IMAGES 8
105 #define MAX_PUSH_DESCRIPTORS 32 /* Minimum requirement */
106 
107 #define ANV_SVGS_VB_INDEX    MAX_VBS
108 #define ANV_DRAWID_VB_INDEX (MAX_VBS + 1)
109 
110 #define anv_printflike(a, b) __attribute__((__format__(__printf__, a, b)))
111 
112 static inline uint32_t
align_down_npot_u32(uint32_t v,uint32_t a)113 align_down_npot_u32(uint32_t v, uint32_t a)
114 {
115    return v - (v % a);
116 }
117 
118 static inline uint32_t
align_u32(uint32_t v,uint32_t a)119 align_u32(uint32_t v, uint32_t a)
120 {
121    assert(a != 0 && a == (a & -a));
122    return (v + a - 1) & ~(a - 1);
123 }
124 
125 static inline uint64_t
align_u64(uint64_t v,uint64_t a)126 align_u64(uint64_t v, uint64_t a)
127 {
128    assert(a != 0 && a == (a & -a));
129    return (v + a - 1) & ~(a - 1);
130 }
131 
132 static inline int32_t
align_i32(int32_t v,int32_t a)133 align_i32(int32_t v, int32_t a)
134 {
135    assert(a != 0 && a == (a & -a));
136    return (v + a - 1) & ~(a - 1);
137 }
138 
139 /** Alignment must be a power of 2. */
140 static inline bool
anv_is_aligned(uintmax_t n,uintmax_t a)141 anv_is_aligned(uintmax_t n, uintmax_t a)
142 {
143    assert(a == (a & -a));
144    return (n & (a - 1)) == 0;
145 }
146 
147 static inline uint32_t
anv_minify(uint32_t n,uint32_t levels)148 anv_minify(uint32_t n, uint32_t levels)
149 {
150    if (unlikely(n == 0))
151       return 0;
152    else
153       return MAX2(n >> levels, 1);
154 }
155 
156 static inline float
anv_clamp_f(float f,float min,float max)157 anv_clamp_f(float f, float min, float max)
158 {
159    assert(min < max);
160 
161    if (f > max)
162       return max;
163    else if (f < min)
164       return min;
165    else
166       return f;
167 }
168 
169 static inline bool
anv_clear_mask(uint32_t * inout_mask,uint32_t clear_mask)170 anv_clear_mask(uint32_t *inout_mask, uint32_t clear_mask)
171 {
172    if (*inout_mask & clear_mask) {
173       *inout_mask &= ~clear_mask;
174       return true;
175    } else {
176       return false;
177    }
178 }
179 
180 static inline union isl_color_value
vk_to_isl_color(VkClearColorValue color)181 vk_to_isl_color(VkClearColorValue color)
182 {
183    return (union isl_color_value) {
184       .u32 = {
185          color.uint32[0],
186          color.uint32[1],
187          color.uint32[2],
188          color.uint32[3],
189       },
190    };
191 }
192 
193 #define for_each_bit(b, dword)                          \
194    for (uint32_t __dword = (dword);                     \
195         (b) = __builtin_ffs(__dword) - 1, __dword;      \
196         __dword &= ~(1 << (b)))
197 
198 #define typed_memcpy(dest, src, count) ({ \
199    STATIC_ASSERT(sizeof(*src) == sizeof(*dest)); \
200    memcpy((dest), (src), (count) * sizeof(*(src))); \
201 })
202 
203 /* Mapping from anv object to VkDebugReportObjectTypeEXT. New types need
204  * to be added here in order to utilize mapping in debug/error/perf macros.
205  */
206 #define REPORT_OBJECT_TYPE(o)                                                      \
207    __builtin_choose_expr (                                                         \
208    __builtin_types_compatible_p (__typeof (o), struct anv_instance*),              \
209    VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT,                                       \
210    __builtin_choose_expr (                                                         \
211    __builtin_types_compatible_p (__typeof (o), struct anv_physical_device*),       \
212    VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,                                \
213    __builtin_choose_expr (                                                         \
214    __builtin_types_compatible_p (__typeof (o), struct anv_device*),                \
215    VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,                                         \
216    __builtin_choose_expr (                                                         \
217    __builtin_types_compatible_p (__typeof (o), const struct anv_device*),          \
218    VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,                                         \
219    __builtin_choose_expr (                                                         \
220    __builtin_types_compatible_p (__typeof (o), struct anv_queue*),                 \
221    VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT,                                          \
222    __builtin_choose_expr (                                                         \
223    __builtin_types_compatible_p (__typeof (o), struct anv_semaphore*),             \
224    VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,                                      \
225    __builtin_choose_expr (                                                         \
226    __builtin_types_compatible_p (__typeof (o), struct anv_cmd_buffer*),            \
227    VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,                                 \
228    __builtin_choose_expr (                                                         \
229    __builtin_types_compatible_p (__typeof (o), struct anv_fence*),                 \
230    VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,                                          \
231    __builtin_choose_expr (                                                         \
232    __builtin_types_compatible_p (__typeof (o), struct anv_device_memory*),         \
233    VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,                                  \
234    __builtin_choose_expr (                                                         \
235    __builtin_types_compatible_p (__typeof (o), struct anv_buffer*),                \
236    VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,                                         \
237    __builtin_choose_expr (                                                         \
238    __builtin_types_compatible_p (__typeof (o), struct anv_image*),                 \
239    VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,                                          \
240    __builtin_choose_expr (                                                         \
241    __builtin_types_compatible_p (__typeof (o), const struct anv_image*),           \
242    VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,                                          \
243    __builtin_choose_expr (                                                         \
244    __builtin_types_compatible_p (__typeof (o), struct anv_event*),                 \
245    VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,                                          \
246    __builtin_choose_expr (                                                         \
247    __builtin_types_compatible_p (__typeof (o), struct anv_query_pool*),            \
248    VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT,                                     \
249    __builtin_choose_expr (                                                         \
250    __builtin_types_compatible_p (__typeof (o), struct anv_buffer_view*),           \
251    VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT,                                    \
252    __builtin_choose_expr (                                                         \
253    __builtin_types_compatible_p (__typeof (o), struct anv_image_view*),            \
254    VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,                                     \
255    __builtin_choose_expr (                                                         \
256    __builtin_types_compatible_p (__typeof (o), struct anv_shader_module*),         \
257    VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT,                                  \
258    __builtin_choose_expr (                                                         \
259    __builtin_types_compatible_p (__typeof (o), struct anv_pipeline_cache*),        \
260    VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT,                                 \
261    __builtin_choose_expr (                                                         \
262    __builtin_types_compatible_p (__typeof (o), struct anv_pipeline_layout*),       \
263    VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT,                                \
264    __builtin_choose_expr (                                                         \
265    __builtin_types_compatible_p (__typeof (o), struct anv_render_pass*),           \
266    VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,                                    \
267    __builtin_choose_expr (                                                         \
268    __builtin_types_compatible_p (__typeof (o), struct anv_pipeline*),              \
269    VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,                                       \
270    __builtin_choose_expr (                                                         \
271    __builtin_types_compatible_p (__typeof (o), struct anv_descriptor_set_layout*), \
272    VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT,                          \
273    __builtin_choose_expr (                                                         \
274    __builtin_types_compatible_p (__typeof (o), struct anv_sampler*),               \
275    VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT,                                        \
276    __builtin_choose_expr (                                                         \
277    __builtin_types_compatible_p (__typeof (o), struct anv_descriptor_pool*),       \
278    VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,                                \
279    __builtin_choose_expr (                                                         \
280    __builtin_types_compatible_p (__typeof (o), struct anv_descriptor_set*),        \
281    VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,                                 \
282    __builtin_choose_expr (                                                         \
283    __builtin_types_compatible_p (__typeof (o), struct anv_framebuffer*),           \
284    VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT,                                    \
285    __builtin_choose_expr (                                                         \
286    __builtin_types_compatible_p (__typeof (o), struct anv_cmd_pool*),              \
287    VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT,                                   \
288    __builtin_choose_expr (                                                         \
289    __builtin_types_compatible_p (__typeof (o), struct anv_surface*),               \
290    VK_DEBUG_REPORT_OBJECT_TYPE_SURFACE_KHR_EXT,                                    \
291    __builtin_choose_expr (                                                         \
292    __builtin_types_compatible_p (__typeof (o), struct wsi_swapchain*),             \
293    VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,                                  \
294    __builtin_choose_expr (                                                         \
295    __builtin_types_compatible_p (__typeof (o), struct vk_debug_callback*),         \
296    VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT_EXT,                      \
297    __builtin_choose_expr (                                                         \
298    __builtin_types_compatible_p (__typeof (o), void*),                             \
299    VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,                                        \
300    /* The void expression results in a compile-time error                          \
301       when assigning the result to something.  */                                  \
302    (void)0)))))))))))))))))))))))))))))))
303 
304 /* Whenever we generate an error, pass it through this function. Useful for
305  * debugging, where we can break on it. Only call at error site, not when
306  * propagating errors. Might be useful to plug in a stack trace here.
307  */
308 
309 VkResult __vk_errorf(struct anv_instance *instance, const void *object,
310                      VkDebugReportObjectTypeEXT type, VkResult error,
311                      const char *file, int line, const char *format, ...);
312 
313 #ifdef DEBUG
314 #define vk_error(error) __vk_errorf(NULL, NULL,\
315                                     VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,\
316                                     error, __FILE__, __LINE__, NULL)
317 #define vk_errorf(instance, obj, error, format, ...)\
318     __vk_errorf(instance, obj, REPORT_OBJECT_TYPE(obj), error,\
319                 __FILE__, __LINE__, format, ## __VA_ARGS__)
320 #else
321 #define vk_error(error) error
322 #define vk_errorf(instance, obj, error, format, ...) error
323 #endif
324 
325 /**
326  * Warn on ignored extension structs.
327  *
328  * The Vulkan spec requires us to ignore unsupported or unknown structs in
329  * a pNext chain.  In debug mode, emitting warnings for ignored structs may
330  * help us discover structs that we should not have ignored.
331  *
332  *
333  * From the Vulkan 1.0.38 spec:
334  *
335  *    Any component of the implementation (the loader, any enabled layers,
336  *    and drivers) must skip over, without processing (other than reading the
337  *    sType and pNext members) any chained structures with sType values not
338  *    defined by extensions supported by that component.
339  */
340 #define anv_debug_ignored_stype(sType) \
341    intel_logd("%s: ignored VkStructureType %u\n", __func__, (sType))
342 
343 void __anv_perf_warn(struct anv_instance *instance, const void *object,
344                      VkDebugReportObjectTypeEXT type, const char *file,
345                      int line, const char *format, ...)
346    anv_printflike(6, 7);
347 void anv_loge(const char *format, ...) anv_printflike(1, 2);
348 void anv_loge_v(const char *format, va_list va);
349 
350 /**
351  * Print a FINISHME message, including its source location.
352  */
353 #define anv_finishme(format, ...) \
354    do { \
355       static bool reported = false; \
356       if (!reported) { \
357          intel_logw("%s:%d: FINISHME: " format, __FILE__, __LINE__, \
358                     ##__VA_ARGS__); \
359          reported = true; \
360       } \
361    } while (0)
362 
363 /**
364  * Print a perf warning message.  Set INTEL_DEBUG=perf to see these.
365  */
366 #define anv_perf_warn(instance, obj, format, ...) \
367    do { \
368       static bool reported = false; \
369       if (!reported && unlikely(INTEL_DEBUG & DEBUG_PERF)) { \
370          __anv_perf_warn(instance, obj, REPORT_OBJECT_TYPE(obj), __FILE__, __LINE__,\
371                          format, ##__VA_ARGS__); \
372          reported = true; \
373       } \
374    } while (0)
375 
376 /* A non-fatal assert.  Useful for debugging. */
377 #ifdef DEBUG
378 #define anv_assert(x) ({ \
379    if (unlikely(!(x))) \
380       intel_loge("%s:%d ASSERT: %s", __FILE__, __LINE__, #x); \
381 })
382 #else
383 #define anv_assert(x)
384 #endif
385 
386 /* A multi-pointer allocator
387  *
388  * When copying data structures from the user (such as a render pass), it's
389  * common to need to allocate data for a bunch of different things.  Instead
390  * of doing several allocations and having to handle all of the error checking
391  * that entails, it can be easier to do a single allocation.  This struct
392  * helps facilitate that.  The intended usage looks like this:
393  *
394  *    ANV_MULTIALLOC(ma)
395  *    anv_multialloc_add(&ma, &main_ptr, 1);
396  *    anv_multialloc_add(&ma, &substruct1, substruct1Count);
397  *    anv_multialloc_add(&ma, &substruct2, substruct2Count);
398  *
399  *    if (!anv_multialloc_alloc(&ma, pAllocator, VK_ALLOCATION_SCOPE_FOO))
400  *       return vk_error(VK_ERROR_OUT_OF_HOST_MEORY);
401  */
402 struct anv_multialloc {
403     size_t size;
404     size_t align;
405 
406     uint32_t ptr_count;
407     void **ptrs[8];
408 };
409 
410 #define ANV_MULTIALLOC_INIT \
411    ((struct anv_multialloc) { 0, })
412 
413 #define ANV_MULTIALLOC(_name) \
414    struct anv_multialloc _name = ANV_MULTIALLOC_INIT
415 
416 __attribute__((always_inline))
417 static inline void
_anv_multialloc_add(struct anv_multialloc * ma,void ** ptr,size_t size,size_t align)418 _anv_multialloc_add(struct anv_multialloc *ma,
419                     void **ptr, size_t size, size_t align)
420 {
421    size_t offset = align_u64(ma->size, align);
422    ma->size = offset + size;
423    ma->align = MAX2(ma->align, align);
424 
425    /* Store the offset in the pointer. */
426    *ptr = (void *)(uintptr_t)offset;
427 
428    assert(ma->ptr_count < ARRAY_SIZE(ma->ptrs));
429    ma->ptrs[ma->ptr_count++] = ptr;
430 }
431 
432 #define anv_multialloc_add_size(_ma, _ptr, _size) \
433    _anv_multialloc_add((_ma), (void **)(_ptr), (_size), __alignof__(**(_ptr)))
434 
435 #define anv_multialloc_add(_ma, _ptr, _count) \
436    anv_multialloc_add_size(_ma, _ptr, (_count) * sizeof(**(_ptr)));
437 
438 __attribute__((always_inline))
439 static inline void *
anv_multialloc_alloc(struct anv_multialloc * ma,const VkAllocationCallbacks * alloc,VkSystemAllocationScope scope)440 anv_multialloc_alloc(struct anv_multialloc *ma,
441                      const VkAllocationCallbacks *alloc,
442                      VkSystemAllocationScope scope)
443 {
444    void *ptr = vk_alloc(alloc, ma->size, ma->align, scope);
445    if (!ptr)
446       return NULL;
447 
448    /* Fill out each of the pointers with their final value.
449     *
450     *   for (uint32_t i = 0; i < ma->ptr_count; i++)
451     *      *ma->ptrs[i] = ptr + (uintptr_t)*ma->ptrs[i];
452     *
453     * Unfortunately, even though ma->ptr_count is basically guaranteed to be a
454     * constant, GCC is incapable of figuring this out and unrolling the loop
455     * so we have to give it a little help.
456     */
457    STATIC_ASSERT(ARRAY_SIZE(ma->ptrs) == 8);
458 #define _ANV_MULTIALLOC_UPDATE_POINTER(_i) \
459    if ((_i) < ma->ptr_count) \
460       *ma->ptrs[_i] = ptr + (uintptr_t)*ma->ptrs[_i]
461    _ANV_MULTIALLOC_UPDATE_POINTER(0);
462    _ANV_MULTIALLOC_UPDATE_POINTER(1);
463    _ANV_MULTIALLOC_UPDATE_POINTER(2);
464    _ANV_MULTIALLOC_UPDATE_POINTER(3);
465    _ANV_MULTIALLOC_UPDATE_POINTER(4);
466    _ANV_MULTIALLOC_UPDATE_POINTER(5);
467    _ANV_MULTIALLOC_UPDATE_POINTER(6);
468    _ANV_MULTIALLOC_UPDATE_POINTER(7);
469 #undef _ANV_MULTIALLOC_UPDATE_POINTER
470 
471    return ptr;
472 }
473 
474 __attribute__((always_inline))
475 static inline void *
anv_multialloc_alloc2(struct anv_multialloc * ma,const VkAllocationCallbacks * parent_alloc,const VkAllocationCallbacks * alloc,VkSystemAllocationScope scope)476 anv_multialloc_alloc2(struct anv_multialloc *ma,
477                       const VkAllocationCallbacks *parent_alloc,
478                       const VkAllocationCallbacks *alloc,
479                       VkSystemAllocationScope scope)
480 {
481    return anv_multialloc_alloc(ma, alloc ? alloc : parent_alloc, scope);
482 }
483 
484 struct anv_bo {
485    uint32_t gem_handle;
486 
487    /* Index into the current validation list.  This is used by the
488     * validation list building alrogithm to track which buffers are already
489     * in the validation list so that we can ensure uniqueness.
490     */
491    uint32_t index;
492 
493    /* Last known offset.  This value is provided by the kernel when we
494     * execbuf and is used as the presumed offset for the next bunch of
495     * relocations.
496     */
497    uint64_t offset;
498 
499    uint64_t size;
500    void *map;
501 
502    /** Flags to pass to the kernel through drm_i915_exec_object2::flags */
503    uint32_t flags;
504 };
505 
506 static inline void
anv_bo_init(struct anv_bo * bo,uint32_t gem_handle,uint64_t size)507 anv_bo_init(struct anv_bo *bo, uint32_t gem_handle, uint64_t size)
508 {
509    bo->gem_handle = gem_handle;
510    bo->index = 0;
511    bo->offset = -1;
512    bo->size = size;
513    bo->map = NULL;
514    bo->flags = 0;
515 }
516 
517 /* Represents a lock-free linked list of "free" things.  This is used by
518  * both the block pool and the state pools.  Unfortunately, in order to
519  * solve the ABA problem, we can't use a single uint32_t head.
520  */
521 union anv_free_list {
522    struct {
523       int32_t offset;
524 
525       /* A simple count that is incremented every time the head changes. */
526       uint32_t count;
527    };
528    uint64_t u64;
529 };
530 
531 #define ANV_FREE_LIST_EMPTY ((union anv_free_list) { { 1, 0 } })
532 
533 struct anv_block_state {
534    union {
535       struct {
536          uint32_t next;
537          uint32_t end;
538       };
539       uint64_t u64;
540    };
541 };
542 
543 struct anv_block_pool {
544    struct anv_device *device;
545 
546    uint64_t bo_flags;
547 
548    struct anv_bo bo;
549 
550    /* The offset from the start of the bo to the "center" of the block
551     * pool.  Pointers to allocated blocks are given by
552     * bo.map + center_bo_offset + offsets.
553     */
554    uint32_t center_bo_offset;
555 
556    /* Current memory map of the block pool.  This pointer may or may not
557     * point to the actual beginning of the block pool memory.  If
558     * anv_block_pool_alloc_back has ever been called, then this pointer
559     * will point to the "center" position of the buffer and all offsets
560     * (negative or positive) given out by the block pool alloc functions
561     * will be valid relative to this pointer.
562     *
563     * In particular, map == bo.map + center_offset
564     */
565    void *map;
566    int fd;
567 
568    /**
569     * Array of mmaps and gem handles owned by the block pool, reclaimed when
570     * the block pool is destroyed.
571     */
572    struct u_vector mmap_cleanups;
573 
574    struct anv_block_state state;
575 
576    struct anv_block_state back_state;
577 };
578 
579 /* Block pools are backed by a fixed-size 1GB memfd */
580 #define BLOCK_POOL_MEMFD_SIZE (1ul << 30)
581 
582 /* The center of the block pool is also the middle of the memfd.  This may
583  * change in the future if we decide differently for some reason.
584  */
585 #define BLOCK_POOL_MEMFD_CENTER (BLOCK_POOL_MEMFD_SIZE / 2)
586 
587 static inline uint32_t
anv_block_pool_size(struct anv_block_pool * pool)588 anv_block_pool_size(struct anv_block_pool *pool)
589 {
590    return pool->state.end + pool->back_state.end;
591 }
592 
593 struct anv_state {
594    int32_t offset;
595    uint32_t alloc_size;
596    void *map;
597 };
598 
599 #define ANV_STATE_NULL ((struct anv_state) { .alloc_size = 0 })
600 
601 struct anv_fixed_size_state_pool {
602    union anv_free_list free_list;
603    struct anv_block_state block;
604 };
605 
606 #define ANV_MIN_STATE_SIZE_LOG2 6
607 #define ANV_MAX_STATE_SIZE_LOG2 20
608 
609 #define ANV_STATE_BUCKETS (ANV_MAX_STATE_SIZE_LOG2 - ANV_MIN_STATE_SIZE_LOG2 + 1)
610 
611 struct anv_state_pool {
612    struct anv_block_pool block_pool;
613 
614    /* The size of blocks which will be allocated from the block pool */
615    uint32_t block_size;
616 
617    /** Free list for "back" allocations */
618    union anv_free_list back_alloc_free_list;
619 
620    struct anv_fixed_size_state_pool buckets[ANV_STATE_BUCKETS];
621 };
622 
623 struct anv_state_stream_block;
624 
625 struct anv_state_stream {
626    struct anv_state_pool *state_pool;
627 
628    /* The size of blocks to allocate from the state pool */
629    uint32_t block_size;
630 
631    /* Current block we're allocating from */
632    struct anv_state block;
633 
634    /* Offset into the current block at which to allocate the next state */
635    uint32_t next;
636 
637    /* List of all blocks allocated from this pool */
638    struct anv_state_stream_block *block_list;
639 };
640 
641 /* The block_pool functions exported for testing only.  The block pool should
642  * only be used via a state pool (see below).
643  */
644 VkResult anv_block_pool_init(struct anv_block_pool *pool,
645                              struct anv_device *device,
646                              uint32_t initial_size,
647                              uint64_t bo_flags);
648 void anv_block_pool_finish(struct anv_block_pool *pool);
649 int32_t anv_block_pool_alloc(struct anv_block_pool *pool,
650                              uint32_t block_size);
651 int32_t anv_block_pool_alloc_back(struct anv_block_pool *pool,
652                                   uint32_t block_size);
653 
654 VkResult anv_state_pool_init(struct anv_state_pool *pool,
655                              struct anv_device *device,
656                              uint32_t block_size,
657                              uint64_t bo_flags);
658 void anv_state_pool_finish(struct anv_state_pool *pool);
659 struct anv_state anv_state_pool_alloc(struct anv_state_pool *pool,
660                                       uint32_t state_size, uint32_t alignment);
661 struct anv_state anv_state_pool_alloc_back(struct anv_state_pool *pool);
662 void anv_state_pool_free(struct anv_state_pool *pool, struct anv_state state);
663 void anv_state_stream_init(struct anv_state_stream *stream,
664                            struct anv_state_pool *state_pool,
665                            uint32_t block_size);
666 void anv_state_stream_finish(struct anv_state_stream *stream);
667 struct anv_state anv_state_stream_alloc(struct anv_state_stream *stream,
668                                         uint32_t size, uint32_t alignment);
669 
670 /**
671  * Implements a pool of re-usable BOs.  The interface is identical to that
672  * of block_pool except that each block is its own BO.
673  */
674 struct anv_bo_pool {
675    struct anv_device *device;
676 
677    uint64_t bo_flags;
678 
679    void *free_list[16];
680 };
681 
682 void anv_bo_pool_init(struct anv_bo_pool *pool, struct anv_device *device,
683                       uint64_t bo_flags);
684 void anv_bo_pool_finish(struct anv_bo_pool *pool);
685 VkResult anv_bo_pool_alloc(struct anv_bo_pool *pool, struct anv_bo *bo,
686                            uint32_t size);
687 void anv_bo_pool_free(struct anv_bo_pool *pool, const struct anv_bo *bo);
688 
689 struct anv_scratch_bo {
690    bool exists;
691    struct anv_bo bo;
692 };
693 
694 struct anv_scratch_pool {
695    /* Indexed by Per-Thread Scratch Space number (the hardware value) and stage */
696    struct anv_scratch_bo bos[16][MESA_SHADER_STAGES];
697 };
698 
699 void anv_scratch_pool_init(struct anv_device *device,
700                            struct anv_scratch_pool *pool);
701 void anv_scratch_pool_finish(struct anv_device *device,
702                              struct anv_scratch_pool *pool);
703 struct anv_bo *anv_scratch_pool_alloc(struct anv_device *device,
704                                       struct anv_scratch_pool *pool,
705                                       gl_shader_stage stage,
706                                       unsigned per_thread_scratch);
707 
708 /** Implements a BO cache that ensures a 1-1 mapping of GEM BOs to anv_bos */
709 struct anv_bo_cache {
710    struct hash_table *bo_map;
711    pthread_mutex_t mutex;
712 };
713 
714 VkResult anv_bo_cache_init(struct anv_bo_cache *cache);
715 void anv_bo_cache_finish(struct anv_bo_cache *cache);
716 VkResult anv_bo_cache_alloc(struct anv_device *device,
717                             struct anv_bo_cache *cache,
718                             uint64_t size, struct anv_bo **bo);
719 VkResult anv_bo_cache_import(struct anv_device *device,
720                              struct anv_bo_cache *cache,
721                              int fd, struct anv_bo **bo);
722 VkResult anv_bo_cache_export(struct anv_device *device,
723                              struct anv_bo_cache *cache,
724                              struct anv_bo *bo_in, int *fd_out);
725 void anv_bo_cache_release(struct anv_device *device,
726                           struct anv_bo_cache *cache,
727                           struct anv_bo *bo);
728 
729 struct anv_memory_type {
730    /* Standard bits passed on to the client */
731    VkMemoryPropertyFlags   propertyFlags;
732    uint32_t                heapIndex;
733 
734    /* Driver-internal book-keeping */
735    VkBufferUsageFlags      valid_buffer_usage;
736 };
737 
738 struct anv_memory_heap {
739    /* Standard bits passed on to the client */
740    VkDeviceSize      size;
741    VkMemoryHeapFlags flags;
742 
743    /* Driver-internal book-keeping */
744    bool              supports_48bit_addresses;
745 };
746 
747 struct anv_physical_device {
748     VK_LOADER_DATA                              _loader_data;
749 
750     struct anv_instance *                       instance;
751     uint32_t                                    chipset_id;
752     char                                        path[20];
753     const char *                                name;
754     struct gen_device_info                      info;
755     /** Amount of "GPU memory" we want to advertise
756      *
757      * Clearly, this value is bogus since Intel is a UMA architecture.  On
758      * gen7 platforms, we are limited by GTT size unless we want to implement
759      * fine-grained tracking and GTT splitting.  On Broadwell and above we are
760      * practically unlimited.  However, we will never report more than 3/4 of
761      * the total system ram to try and avoid running out of RAM.
762      */
763     bool                                        supports_48bit_addresses;
764     struct brw_compiler *                       compiler;
765     struct isl_device                           isl_dev;
766     int                                         cmd_parser_version;
767     bool                                        has_exec_async;
768     bool                                        has_exec_capture;
769     bool                                        has_exec_fence;
770     bool                                        has_syncobj;
771     bool                                        has_syncobj_wait;
772 
773     struct anv_device_extension_table           supported_extensions;
774 
775     uint32_t                                    eu_total;
776     uint32_t                                    subslice_total;
777 
778     struct {
779       uint32_t                                  type_count;
780       struct anv_memory_type                    types[VK_MAX_MEMORY_TYPES];
781       uint32_t                                  heap_count;
782       struct anv_memory_heap                    heaps[VK_MAX_MEMORY_HEAPS];
783     } memory;
784 
785     uint8_t                                     pipeline_cache_uuid[VK_UUID_SIZE];
786     uint8_t                                     driver_uuid[VK_UUID_SIZE];
787     uint8_t                                     device_uuid[VK_UUID_SIZE];
788 
789     struct wsi_device                       wsi_device;
790     int                                         local_fd;
791 };
792 
793 struct anv_instance {
794     VK_LOADER_DATA                              _loader_data;
795 
796     VkAllocationCallbacks                       alloc;
797 
798     uint32_t                                    apiVersion;
799     struct anv_instance_extension_table         enabled_extensions;
800     struct anv_dispatch_table                   dispatch;
801 
802     int                                         physicalDeviceCount;
803     struct anv_physical_device                  physicalDevice;
804 
805     struct vk_debug_report_instance             debug_report_callbacks;
806 };
807 
808 VkResult anv_init_wsi(struct anv_physical_device *physical_device);
809 void anv_finish_wsi(struct anv_physical_device *physical_device);
810 
811 uint32_t anv_physical_device_api_version(struct anv_physical_device *dev);
812 bool anv_physical_device_extension_supported(struct anv_physical_device *dev,
813                                              const char *name);
814 
815 struct anv_queue {
816     VK_LOADER_DATA                              _loader_data;
817 
818     struct anv_device *                         device;
819 
820     struct anv_state_pool *                     pool;
821 };
822 
823 struct anv_pipeline_cache {
824    struct anv_device *                          device;
825    pthread_mutex_t                              mutex;
826 
827    struct hash_table *                          cache;
828 };
829 
830 struct anv_pipeline_bind_map;
831 
832 void anv_pipeline_cache_init(struct anv_pipeline_cache *cache,
833                              struct anv_device *device,
834                              bool cache_enabled);
835 void anv_pipeline_cache_finish(struct anv_pipeline_cache *cache);
836 
837 struct anv_shader_bin *
838 anv_pipeline_cache_search(struct anv_pipeline_cache *cache,
839                           const void *key, uint32_t key_size);
840 struct anv_shader_bin *
841 anv_pipeline_cache_upload_kernel(struct anv_pipeline_cache *cache,
842                                  const void *key_data, uint32_t key_size,
843                                  const void *kernel_data, uint32_t kernel_size,
844                                  const struct brw_stage_prog_data *prog_data,
845                                  uint32_t prog_data_size,
846                                  const struct anv_pipeline_bind_map *bind_map);
847 
848 struct anv_device {
849     VK_LOADER_DATA                              _loader_data;
850 
851     VkAllocationCallbacks                       alloc;
852 
853     struct anv_instance *                       instance;
854     uint32_t                                    chipset_id;
855     struct gen_device_info                      info;
856     struct isl_device                           isl_dev;
857     int                                         context_id;
858     int                                         fd;
859     bool                                        can_chain_batches;
860     bool                                        robust_buffer_access;
861     struct anv_device_extension_table           enabled_extensions;
862     struct anv_dispatch_table                   dispatch;
863 
864     struct anv_bo_pool                          batch_bo_pool;
865 
866     struct anv_bo_cache                         bo_cache;
867 
868     struct anv_state_pool                       dynamic_state_pool;
869     struct anv_state_pool                       instruction_state_pool;
870     struct anv_state_pool                       surface_state_pool;
871 
872     struct anv_bo                               workaround_bo;
873     struct anv_bo                               trivial_batch_bo;
874 
875     struct anv_pipeline_cache                   blorp_shader_cache;
876     struct blorp_context                        blorp;
877 
878     struct anv_state                            border_colors;
879 
880     struct anv_queue                            queue;
881 
882     struct anv_scratch_pool                     scratch_pool;
883 
884     uint32_t                                    default_mocs;
885 
886     pthread_mutex_t                             mutex;
887     pthread_cond_t                              queue_submit;
888     bool                                        lost;
889 };
890 
891 static void inline
anv_state_flush(struct anv_device * device,struct anv_state state)892 anv_state_flush(struct anv_device *device, struct anv_state state)
893 {
894    if (device->info.has_llc)
895       return;
896 
897    gen_flush_range(state.map, state.alloc_size);
898 }
899 
900 void anv_device_init_blorp(struct anv_device *device);
901 void anv_device_finish_blorp(struct anv_device *device);
902 
903 VkResult anv_device_execbuf(struct anv_device *device,
904                             struct drm_i915_gem_execbuffer2 *execbuf,
905                             struct anv_bo **execbuf_bos);
906 VkResult anv_device_query_status(struct anv_device *device);
907 VkResult anv_device_bo_busy(struct anv_device *device, struct anv_bo *bo);
908 VkResult anv_device_wait(struct anv_device *device, struct anv_bo *bo,
909                          int64_t timeout);
910 
911 void* anv_gem_mmap(struct anv_device *device,
912                    uint32_t gem_handle, uint64_t offset, uint64_t size, uint32_t flags);
913 void anv_gem_munmap(void *p, uint64_t size);
914 uint32_t anv_gem_create(struct anv_device *device, uint64_t size);
915 void anv_gem_close(struct anv_device *device, uint32_t gem_handle);
916 uint32_t anv_gem_userptr(struct anv_device *device, void *mem, size_t size);
917 int anv_gem_busy(struct anv_device *device, uint32_t gem_handle);
918 int anv_gem_wait(struct anv_device *device, uint32_t gem_handle, int64_t *timeout_ns);
919 int anv_gem_execbuffer(struct anv_device *device,
920                        struct drm_i915_gem_execbuffer2 *execbuf);
921 int anv_gem_set_tiling(struct anv_device *device, uint32_t gem_handle,
922                        uint32_t stride, uint32_t tiling);
923 int anv_gem_create_context(struct anv_device *device);
924 int anv_gem_destroy_context(struct anv_device *device, int context);
925 int anv_gem_get_context_param(int fd, int context, uint32_t param,
926                               uint64_t *value);
927 int anv_gem_get_param(int fd, uint32_t param);
928 int anv_gem_get_tiling(struct anv_device *device, uint32_t gem_handle);
929 bool anv_gem_get_bit6_swizzle(int fd, uint32_t tiling);
930 int anv_gem_get_aperture(int fd, uint64_t *size);
931 bool anv_gem_supports_48b_addresses(int fd);
932 int anv_gem_gpu_get_reset_stats(struct anv_device *device,
933                                 uint32_t *active, uint32_t *pending);
934 int anv_gem_handle_to_fd(struct anv_device *device, uint32_t gem_handle);
935 uint32_t anv_gem_fd_to_handle(struct anv_device *device, int fd);
936 int anv_gem_set_caching(struct anv_device *device, uint32_t gem_handle, uint32_t caching);
937 int anv_gem_set_domain(struct anv_device *device, uint32_t gem_handle,
938                        uint32_t read_domains, uint32_t write_domain);
939 int anv_gem_sync_file_merge(struct anv_device *device, int fd1, int fd2);
940 uint32_t anv_gem_syncobj_create(struct anv_device *device, uint32_t flags);
941 void anv_gem_syncobj_destroy(struct anv_device *device, uint32_t handle);
942 int anv_gem_syncobj_handle_to_fd(struct anv_device *device, uint32_t handle);
943 uint32_t anv_gem_syncobj_fd_to_handle(struct anv_device *device, int fd);
944 int anv_gem_syncobj_export_sync_file(struct anv_device *device,
945                                      uint32_t handle);
946 int anv_gem_syncobj_import_sync_file(struct anv_device *device,
947                                      uint32_t handle, int fd);
948 void anv_gem_syncobj_reset(struct anv_device *device, uint32_t handle);
949 bool anv_gem_supports_syncobj_wait(int fd);
950 int anv_gem_syncobj_wait(struct anv_device *device,
951                          uint32_t *handles, uint32_t num_handles,
952                          int64_t abs_timeout_ns, bool wait_all);
953 
954 VkResult anv_bo_init_new(struct anv_bo *bo, struct anv_device *device, uint64_t size);
955 
956 struct anv_reloc_list {
957    uint32_t                                     num_relocs;
958    uint32_t                                     array_length;
959    struct drm_i915_gem_relocation_entry *       relocs;
960    struct anv_bo **                             reloc_bos;
961 };
962 
963 VkResult anv_reloc_list_init(struct anv_reloc_list *list,
964                              const VkAllocationCallbacks *alloc);
965 void anv_reloc_list_finish(struct anv_reloc_list *list,
966                            const VkAllocationCallbacks *alloc);
967 
968 VkResult anv_reloc_list_add(struct anv_reloc_list *list,
969                             const VkAllocationCallbacks *alloc,
970                             uint32_t offset, struct anv_bo *target_bo,
971                             uint32_t delta);
972 
973 struct anv_batch_bo {
974    /* Link in the anv_cmd_buffer.owned_batch_bos list */
975    struct list_head                             link;
976 
977    struct anv_bo                                bo;
978 
979    /* Bytes actually consumed in this batch BO */
980    uint32_t                                     length;
981 
982    struct anv_reloc_list                        relocs;
983 };
984 
985 struct anv_batch {
986    const VkAllocationCallbacks *                alloc;
987 
988    void *                                       start;
989    void *                                       end;
990    void *                                       next;
991 
992    struct anv_reloc_list *                      relocs;
993 
994    /* This callback is called (with the associated user data) in the event
995     * that the batch runs out of space.
996     */
997    VkResult (*extend_cb)(struct anv_batch *, void *);
998    void *                                       user_data;
999 
1000    /**
1001     * Current error status of the command buffer. Used to track inconsistent
1002     * or incomplete command buffer states that are the consequence of run-time
1003     * errors such as out of memory scenarios. We want to track this in the
1004     * batch because the command buffer object is not visible to some parts
1005     * of the driver.
1006     */
1007    VkResult                                     status;
1008 };
1009 
1010 void *anv_batch_emit_dwords(struct anv_batch *batch, int num_dwords);
1011 void anv_batch_emit_batch(struct anv_batch *batch, struct anv_batch *other);
1012 uint64_t anv_batch_emit_reloc(struct anv_batch *batch,
1013                               void *location, struct anv_bo *bo, uint32_t offset);
1014 VkResult anv_device_submit_simple_batch(struct anv_device *device,
1015                                         struct anv_batch *batch);
1016 
1017 static inline VkResult
anv_batch_set_error(struct anv_batch * batch,VkResult error)1018 anv_batch_set_error(struct anv_batch *batch, VkResult error)
1019 {
1020    assert(error != VK_SUCCESS);
1021    if (batch->status == VK_SUCCESS)
1022       batch->status = error;
1023    return batch->status;
1024 }
1025 
1026 static inline bool
anv_batch_has_error(struct anv_batch * batch)1027 anv_batch_has_error(struct anv_batch *batch)
1028 {
1029    return batch->status != VK_SUCCESS;
1030 }
1031 
1032 struct anv_address {
1033    struct anv_bo *bo;
1034    uint32_t offset;
1035 };
1036 
1037 static inline uint64_t
_anv_combine_address(struct anv_batch * batch,void * location,const struct anv_address address,uint32_t delta)1038 _anv_combine_address(struct anv_batch *batch, void *location,
1039                      const struct anv_address address, uint32_t delta)
1040 {
1041    if (address.bo == NULL) {
1042       return address.offset + delta;
1043    } else {
1044       assert(batch->start <= location && location < batch->end);
1045 
1046       return anv_batch_emit_reloc(batch, location, address.bo, address.offset + delta);
1047    }
1048 }
1049 
1050 #define __gen_address_type struct anv_address
1051 #define __gen_user_data struct anv_batch
1052 #define __gen_combine_address _anv_combine_address
1053 
1054 /* Wrapper macros needed to work around preprocessor argument issues.  In
1055  * particular, arguments don't get pre-evaluated if they are concatenated.
1056  * This means that, if you pass GENX(3DSTATE_PS) into the emit macro, the
1057  * GENX macro won't get evaluated if the emit macro contains "cmd ## foo".
1058  * We can work around this easily enough with these helpers.
1059  */
1060 #define __anv_cmd_length(cmd) cmd ## _length
1061 #define __anv_cmd_length_bias(cmd) cmd ## _length_bias
1062 #define __anv_cmd_header(cmd) cmd ## _header
1063 #define __anv_cmd_pack(cmd) cmd ## _pack
1064 #define __anv_reg_num(reg) reg ## _num
1065 
1066 #define anv_pack_struct(dst, struc, ...) do {                              \
1067       struct struc __template = {                                          \
1068          __VA_ARGS__                                                       \
1069       };                                                                   \
1070       __anv_cmd_pack(struc)(NULL, dst, &__template);                       \
1071       VG(VALGRIND_CHECK_MEM_IS_DEFINED(dst, __anv_cmd_length(struc) * 4)); \
1072    } while (0)
1073 
1074 #define anv_batch_emitn(batch, n, cmd, ...) ({             \
1075       void *__dst = anv_batch_emit_dwords(batch, n);       \
1076       if (__dst) {                                         \
1077          struct cmd __template = {                         \
1078             __anv_cmd_header(cmd),                         \
1079            .DWordLength = n - __anv_cmd_length_bias(cmd),  \
1080             __VA_ARGS__                                    \
1081          };                                                \
1082          __anv_cmd_pack(cmd)(batch, __dst, &__template);   \
1083       }                                                    \
1084       __dst;                                               \
1085    })
1086 
1087 #define anv_batch_emit_merge(batch, dwords0, dwords1)                   \
1088    do {                                                                 \
1089       uint32_t *dw;                                                     \
1090                                                                         \
1091       STATIC_ASSERT(ARRAY_SIZE(dwords0) == ARRAY_SIZE(dwords1));        \
1092       dw = anv_batch_emit_dwords((batch), ARRAY_SIZE(dwords0));         \
1093       if (!dw)                                                          \
1094          break;                                                         \
1095       for (uint32_t i = 0; i < ARRAY_SIZE(dwords0); i++)                \
1096          dw[i] = (dwords0)[i] | (dwords1)[i];                           \
1097       VG(VALGRIND_CHECK_MEM_IS_DEFINED(dw, ARRAY_SIZE(dwords0) * 4));\
1098    } while (0)
1099 
1100 #define anv_batch_emit(batch, cmd, name)                            \
1101    for (struct cmd name = { __anv_cmd_header(cmd) },                    \
1102         *_dst = anv_batch_emit_dwords(batch, __anv_cmd_length(cmd));    \
1103         __builtin_expect(_dst != NULL, 1);                              \
1104         ({ __anv_cmd_pack(cmd)(batch, _dst, &name);                     \
1105            VG(VALGRIND_CHECK_MEM_IS_DEFINED(_dst, __anv_cmd_length(cmd) * 4)); \
1106            _dst = NULL;                                                 \
1107          }))
1108 
1109 #define GEN7_MOCS (struct GEN7_MEMORY_OBJECT_CONTROL_STATE) {  \
1110    .GraphicsDataTypeGFDT                        = 0,           \
1111    .LLCCacheabilityControlLLCCC                 = 0,           \
1112    .L3CacheabilityControlL3CC                   = 1,           \
1113 }
1114 
1115 #define GEN75_MOCS (struct GEN75_MEMORY_OBJECT_CONTROL_STATE) {  \
1116    .LLCeLLCCacheabilityControlLLCCC             = 0,           \
1117    .L3CacheabilityControlL3CC                   = 1,           \
1118 }
1119 
1120 #define GEN8_MOCS (struct GEN8_MEMORY_OBJECT_CONTROL_STATE) {  \
1121       .MemoryTypeLLCeLLCCacheabilityControl = WB,              \
1122       .TargetCache = L3DefertoPATforLLCeLLCselection,          \
1123       .AgeforQUADLRU = 0                                       \
1124    }
1125 
1126 /* Skylake: MOCS is now an index into an array of 62 different caching
1127  * configurations programmed by the kernel.
1128  */
1129 
1130 #define GEN9_MOCS (struct GEN9_MEMORY_OBJECT_CONTROL_STATE) {  \
1131       /* TC=LLC/eLLC, LeCC=WB, LRUM=3, L3CC=WB */              \
1132       .IndextoMOCSTables                           = 2         \
1133    }
1134 
1135 #define GEN9_MOCS_PTE {                                 \
1136       /* TC=LLC/eLLC, LeCC=WB, LRUM=3, L3CC=WB */       \
1137       .IndextoMOCSTables                           = 1  \
1138    }
1139 
1140 /* Cannonlake MOCS defines are duplicates of Skylake MOCS defines. */
1141 #define GEN10_MOCS (struct GEN10_MEMORY_OBJECT_CONTROL_STATE) {  \
1142       /* TC=LLC/eLLC, LeCC=WB, LRUM=3, L3CC=WB */              \
1143       .IndextoMOCSTables                           = 2         \
1144    }
1145 
1146 #define GEN10_MOCS_PTE {                                 \
1147       /* TC=LLC/eLLC, LeCC=WB, LRUM=3, L3CC=WB */       \
1148       .IndextoMOCSTables                           = 1  \
1149    }
1150 
1151 struct anv_device_memory {
1152    struct anv_bo *                              bo;
1153    struct anv_memory_type *                     type;
1154    VkDeviceSize                                 map_size;
1155    void *                                       map;
1156 };
1157 
1158 /**
1159  * Header for Vertex URB Entry (VUE)
1160  */
1161 struct anv_vue_header {
1162    uint32_t Reserved;
1163    uint32_t RTAIndex; /* RenderTargetArrayIndex */
1164    uint32_t ViewportIndex;
1165    float PointWidth;
1166 };
1167 
1168 struct anv_descriptor_set_binding_layout {
1169 #ifndef NDEBUG
1170    /* The type of the descriptors in this binding */
1171    VkDescriptorType type;
1172 #endif
1173 
1174    /* Number of array elements in this binding */
1175    uint16_t array_size;
1176 
1177    /* Index into the flattend descriptor set */
1178    uint16_t descriptor_index;
1179 
1180    /* Index into the dynamic state array for a dynamic buffer */
1181    int16_t dynamic_offset_index;
1182 
1183    /* Index into the descriptor set buffer views */
1184    int16_t buffer_index;
1185 
1186    struct {
1187       /* Index into the binding table for the associated surface */
1188       int16_t surface_index;
1189 
1190       /* Index into the sampler table for the associated sampler */
1191       int16_t sampler_index;
1192 
1193       /* Index into the image table for the associated image */
1194       int16_t image_index;
1195    } stage[MESA_SHADER_STAGES];
1196 
1197    /* Immutable samplers (or NULL if no immutable samplers) */
1198    struct anv_sampler **immutable_samplers;
1199 };
1200 
1201 struct anv_descriptor_set_layout {
1202    /* Number of bindings in this descriptor set */
1203    uint16_t binding_count;
1204 
1205    /* Total size of the descriptor set with room for all array entries */
1206    uint16_t size;
1207 
1208    /* Shader stages affected by this descriptor set */
1209    uint16_t shader_stages;
1210 
1211    /* Number of buffers in this descriptor set */
1212    uint16_t buffer_count;
1213 
1214    /* Number of dynamic offsets used by this descriptor set */
1215    uint16_t dynamic_offset_count;
1216 
1217    /* Bindings in this descriptor set */
1218    struct anv_descriptor_set_binding_layout binding[0];
1219 };
1220 
1221 struct anv_descriptor {
1222    VkDescriptorType type;
1223 
1224    union {
1225       struct {
1226          VkImageLayout layout;
1227          struct anv_image_view *image_view;
1228          struct anv_sampler *sampler;
1229       };
1230 
1231       struct {
1232          struct anv_buffer *buffer;
1233          uint64_t offset;
1234          uint64_t range;
1235       };
1236 
1237       struct anv_buffer_view *buffer_view;
1238    };
1239 };
1240 
1241 struct anv_descriptor_set {
1242    const struct anv_descriptor_set_layout *layout;
1243    uint32_t size;
1244    uint32_t buffer_count;
1245    struct anv_buffer_view *buffer_views;
1246    struct anv_descriptor descriptors[0];
1247 };
1248 
1249 struct anv_buffer_view {
1250    enum isl_format format; /**< VkBufferViewCreateInfo::format */
1251    struct anv_bo *bo;
1252    uint32_t offset; /**< Offset into bo. */
1253    uint64_t range; /**< VkBufferViewCreateInfo::range */
1254 
1255    struct anv_state surface_state;
1256    struct anv_state storage_surface_state;
1257    struct anv_state writeonly_storage_surface_state;
1258 
1259    struct brw_image_param storage_image_param;
1260 };
1261 
1262 struct anv_push_descriptor_set {
1263    struct anv_descriptor_set set;
1264 
1265    /* Put this field right behind anv_descriptor_set so it fills up the
1266     * descriptors[0] field. */
1267    struct anv_descriptor descriptors[MAX_PUSH_DESCRIPTORS];
1268    struct anv_buffer_view buffer_views[MAX_PUSH_DESCRIPTORS];
1269 };
1270 
1271 struct anv_descriptor_pool {
1272    uint32_t size;
1273    uint32_t next;
1274    uint32_t free_list;
1275 
1276    struct anv_state_stream surface_state_stream;
1277    void *surface_state_free_list;
1278 
1279    char data[0];
1280 };
1281 
1282 enum anv_descriptor_template_entry_type {
1283    ANV_DESCRIPTOR_TEMPLATE_ENTRY_TYPE_IMAGE,
1284    ANV_DESCRIPTOR_TEMPLATE_ENTRY_TYPE_BUFFER,
1285    ANV_DESCRIPTOR_TEMPLATE_ENTRY_TYPE_BUFFER_VIEW
1286 };
1287 
1288 struct anv_descriptor_template_entry {
1289    /* The type of descriptor in this entry */
1290    VkDescriptorType type;
1291 
1292    /* Binding in the descriptor set */
1293    uint32_t binding;
1294 
1295    /* Offset at which to write into the descriptor set binding */
1296    uint32_t array_element;
1297 
1298    /* Number of elements to write into the descriptor set binding */
1299    uint32_t array_count;
1300 
1301    /* Offset into the user provided data */
1302    size_t offset;
1303 
1304    /* Stride between elements into the user provided data */
1305    size_t stride;
1306 };
1307 
1308 struct anv_descriptor_update_template {
1309     VkPipelineBindPoint bind_point;
1310 
1311    /* The descriptor set this template corresponds to. This value is only
1312     * valid if the template was created with the templateType
1313     * VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET_KHR.
1314     */
1315    uint8_t set;
1316 
1317    /* Number of entries in this template */
1318    uint32_t entry_count;
1319 
1320    /* Entries of the template */
1321    struct anv_descriptor_template_entry entries[0];
1322 };
1323 
1324 size_t
1325 anv_descriptor_set_binding_layout_get_hw_size(const struct anv_descriptor_set_binding_layout *binding);
1326 
1327 size_t
1328 anv_descriptor_set_layout_size(const struct anv_descriptor_set_layout *layout);
1329 
1330 void
1331 anv_descriptor_set_write_image_view(struct anv_descriptor_set *set,
1332                                     const struct gen_device_info * const devinfo,
1333                                     const VkDescriptorImageInfo * const info,
1334                                     VkDescriptorType type,
1335                                     uint32_t binding,
1336                                     uint32_t element);
1337 
1338 void
1339 anv_descriptor_set_write_buffer_view(struct anv_descriptor_set *set,
1340                                      VkDescriptorType type,
1341                                      struct anv_buffer_view *buffer_view,
1342                                      uint32_t binding,
1343                                      uint32_t element);
1344 
1345 void
1346 anv_descriptor_set_write_buffer(struct anv_descriptor_set *set,
1347                                 struct anv_device *device,
1348                                 struct anv_state_stream *alloc_stream,
1349                                 VkDescriptorType type,
1350                                 struct anv_buffer *buffer,
1351                                 uint32_t binding,
1352                                 uint32_t element,
1353                                 VkDeviceSize offset,
1354                                 VkDeviceSize range);
1355 
1356 void
1357 anv_descriptor_set_write_template(struct anv_descriptor_set *set,
1358                                   struct anv_device *device,
1359                                   struct anv_state_stream *alloc_stream,
1360                                   const struct anv_descriptor_update_template *template,
1361                                   const void *data);
1362 
1363 VkResult
1364 anv_descriptor_set_create(struct anv_device *device,
1365                           struct anv_descriptor_pool *pool,
1366                           const struct anv_descriptor_set_layout *layout,
1367                           struct anv_descriptor_set **out_set);
1368 
1369 void
1370 anv_descriptor_set_destroy(struct anv_device *device,
1371                            struct anv_descriptor_pool *pool,
1372                            struct anv_descriptor_set *set);
1373 
1374 #define ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS UINT8_MAX
1375 
1376 struct anv_pipeline_binding {
1377    /* The descriptor set this surface corresponds to.  The special value of
1378     * ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS indicates that the offset refers
1379     * to a color attachment and not a regular descriptor.
1380     */
1381    uint8_t set;
1382 
1383    /* Binding in the descriptor set */
1384    uint32_t binding;
1385 
1386    /* Index in the binding */
1387    uint32_t index;
1388 
1389    /* Plane in the binding index */
1390    uint8_t plane;
1391 
1392    /* Input attachment index (relative to the subpass) */
1393    uint8_t input_attachment_index;
1394 
1395    /* For a storage image, whether it is write-only */
1396    bool write_only;
1397 };
1398 
1399 struct anv_pipeline_layout {
1400    struct {
1401       struct anv_descriptor_set_layout *layout;
1402       uint32_t dynamic_offset_start;
1403    } set[MAX_SETS];
1404 
1405    uint32_t num_sets;
1406 
1407    struct {
1408       bool has_dynamic_offsets;
1409    } stage[MESA_SHADER_STAGES];
1410 
1411    unsigned char sha1[20];
1412 };
1413 
1414 struct anv_buffer {
1415    struct anv_device *                          device;
1416    VkDeviceSize                                 size;
1417 
1418    VkBufferUsageFlags                           usage;
1419 
1420    /* Set when bound */
1421    struct anv_bo *                              bo;
1422    VkDeviceSize                                 offset;
1423 };
1424 
1425 static inline uint64_t
anv_buffer_get_range(struct anv_buffer * buffer,uint64_t offset,uint64_t range)1426 anv_buffer_get_range(struct anv_buffer *buffer, uint64_t offset, uint64_t range)
1427 {
1428    assert(offset <= buffer->size);
1429    if (range == VK_WHOLE_SIZE) {
1430       return buffer->size - offset;
1431    } else {
1432       assert(range <= buffer->size);
1433       return range;
1434    }
1435 }
1436 
1437 enum anv_cmd_dirty_bits {
1438    ANV_CMD_DIRTY_DYNAMIC_VIEWPORT                  = 1 << 0, /* VK_DYNAMIC_STATE_VIEWPORT */
1439    ANV_CMD_DIRTY_DYNAMIC_SCISSOR                   = 1 << 1, /* VK_DYNAMIC_STATE_SCISSOR */
1440    ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH                = 1 << 2, /* VK_DYNAMIC_STATE_LINE_WIDTH */
1441    ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS                = 1 << 3, /* VK_DYNAMIC_STATE_DEPTH_BIAS */
1442    ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS           = 1 << 4, /* VK_DYNAMIC_STATE_BLEND_CONSTANTS */
1443    ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS              = 1 << 5, /* VK_DYNAMIC_STATE_DEPTH_BOUNDS */
1444    ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK      = 1 << 6, /* VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK */
1445    ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK        = 1 << 7, /* VK_DYNAMIC_STATE_STENCIL_WRITE_MASK */
1446    ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE         = 1 << 8, /* VK_DYNAMIC_STATE_STENCIL_REFERENCE */
1447    ANV_CMD_DIRTY_DYNAMIC_ALL                       = (1 << 9) - 1,
1448    ANV_CMD_DIRTY_PIPELINE                          = 1 << 9,
1449    ANV_CMD_DIRTY_INDEX_BUFFER                      = 1 << 10,
1450    ANV_CMD_DIRTY_RENDER_TARGETS                    = 1 << 11,
1451 };
1452 typedef uint32_t anv_cmd_dirty_mask_t;
1453 
1454 enum anv_pipe_bits {
1455    ANV_PIPE_DEPTH_CACHE_FLUSH_BIT            = (1 << 0),
1456    ANV_PIPE_STALL_AT_SCOREBOARD_BIT          = (1 << 1),
1457    ANV_PIPE_STATE_CACHE_INVALIDATE_BIT       = (1 << 2),
1458    ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT    = (1 << 3),
1459    ANV_PIPE_VF_CACHE_INVALIDATE_BIT          = (1 << 4),
1460    ANV_PIPE_DATA_CACHE_FLUSH_BIT             = (1 << 5),
1461    ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT     = (1 << 10),
1462    ANV_PIPE_INSTRUCTION_CACHE_INVALIDATE_BIT = (1 << 11),
1463    ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT    = (1 << 12),
1464    ANV_PIPE_DEPTH_STALL_BIT                  = (1 << 13),
1465    ANV_PIPE_CS_STALL_BIT                     = (1 << 20),
1466 
1467    /* This bit does not exist directly in PIPE_CONTROL.  Instead it means that
1468     * a flush has happened but not a CS stall.  The next time we do any sort
1469     * of invalidation we need to insert a CS stall at that time.  Otherwise,
1470     * we would have to CS stall on every flush which could be bad.
1471     */
1472    ANV_PIPE_NEEDS_CS_STALL_BIT               = (1 << 21),
1473 };
1474 
1475 #define ANV_PIPE_FLUSH_BITS ( \
1476    ANV_PIPE_DEPTH_CACHE_FLUSH_BIT | \
1477    ANV_PIPE_DATA_CACHE_FLUSH_BIT | \
1478    ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT)
1479 
1480 #define ANV_PIPE_STALL_BITS ( \
1481    ANV_PIPE_STALL_AT_SCOREBOARD_BIT | \
1482    ANV_PIPE_DEPTH_STALL_BIT | \
1483    ANV_PIPE_CS_STALL_BIT)
1484 
1485 #define ANV_PIPE_INVALIDATE_BITS ( \
1486    ANV_PIPE_STATE_CACHE_INVALIDATE_BIT | \
1487    ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT | \
1488    ANV_PIPE_VF_CACHE_INVALIDATE_BIT | \
1489    ANV_PIPE_DATA_CACHE_FLUSH_BIT | \
1490    ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT | \
1491    ANV_PIPE_INSTRUCTION_CACHE_INVALIDATE_BIT)
1492 
1493 static inline enum anv_pipe_bits
anv_pipe_flush_bits_for_access_flags(VkAccessFlags flags)1494 anv_pipe_flush_bits_for_access_flags(VkAccessFlags flags)
1495 {
1496    enum anv_pipe_bits pipe_bits = 0;
1497 
1498    unsigned b;
1499    for_each_bit(b, flags) {
1500       switch ((VkAccessFlagBits)(1 << b)) {
1501       case VK_ACCESS_SHADER_WRITE_BIT:
1502          pipe_bits |= ANV_PIPE_DATA_CACHE_FLUSH_BIT;
1503          break;
1504       case VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT:
1505          pipe_bits |= ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT;
1506          break;
1507       case VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT:
1508          pipe_bits |= ANV_PIPE_DEPTH_CACHE_FLUSH_BIT;
1509          break;
1510       case VK_ACCESS_TRANSFER_WRITE_BIT:
1511          pipe_bits |= ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT;
1512          pipe_bits |= ANV_PIPE_DEPTH_CACHE_FLUSH_BIT;
1513          break;
1514       default:
1515          break; /* Nothing to do */
1516       }
1517    }
1518 
1519    return pipe_bits;
1520 }
1521 
1522 static inline enum anv_pipe_bits
anv_pipe_invalidate_bits_for_access_flags(VkAccessFlags flags)1523 anv_pipe_invalidate_bits_for_access_flags(VkAccessFlags flags)
1524 {
1525    enum anv_pipe_bits pipe_bits = 0;
1526 
1527    unsigned b;
1528    for_each_bit(b, flags) {
1529       switch ((VkAccessFlagBits)(1 << b)) {
1530       case VK_ACCESS_INDIRECT_COMMAND_READ_BIT:
1531       case VK_ACCESS_INDEX_READ_BIT:
1532       case VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT:
1533          pipe_bits |= ANV_PIPE_VF_CACHE_INVALIDATE_BIT;
1534          break;
1535       case VK_ACCESS_UNIFORM_READ_BIT:
1536          pipe_bits |= ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT;
1537          pipe_bits |= ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT;
1538          break;
1539       case VK_ACCESS_SHADER_READ_BIT:
1540       case VK_ACCESS_INPUT_ATTACHMENT_READ_BIT:
1541       case VK_ACCESS_TRANSFER_READ_BIT:
1542          pipe_bits |= ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT;
1543          break;
1544       default:
1545          break; /* Nothing to do */
1546       }
1547    }
1548 
1549    return pipe_bits;
1550 }
1551 
1552 #define VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV (         \
1553    VK_IMAGE_ASPECT_COLOR_BIT | \
1554    VK_IMAGE_ASPECT_PLANE_0_BIT_KHR | \
1555    VK_IMAGE_ASPECT_PLANE_1_BIT_KHR | \
1556    VK_IMAGE_ASPECT_PLANE_2_BIT_KHR)
1557 #define VK_IMAGE_ASPECT_PLANES_BITS_ANV ( \
1558    VK_IMAGE_ASPECT_PLANE_0_BIT_KHR | \
1559    VK_IMAGE_ASPECT_PLANE_1_BIT_KHR | \
1560    VK_IMAGE_ASPECT_PLANE_2_BIT_KHR)
1561 
1562 struct anv_vertex_binding {
1563    struct anv_buffer *                          buffer;
1564    VkDeviceSize                                 offset;
1565 };
1566 
1567 #define ANV_PARAM_PUSH(offset)         ((1 << 16) | (uint32_t)(offset))
1568 #define ANV_PARAM_PUSH_OFFSET(param)   ((param) & 0xffff)
1569 
1570 struct anv_push_constants {
1571    /* Current allocated size of this push constants data structure.
1572     * Because a decent chunk of it may not be used (images on SKL, for
1573     * instance), we won't actually allocate the entire structure up-front.
1574     */
1575    uint32_t size;
1576 
1577    /* Push constant data provided by the client through vkPushConstants */
1578    uint8_t client_data[MAX_PUSH_CONSTANTS_SIZE];
1579 
1580    /* Image data for image_load_store on pre-SKL */
1581    struct brw_image_param images[MAX_IMAGES];
1582 };
1583 
1584 struct anv_dynamic_state {
1585    struct {
1586       uint32_t                                  count;
1587       VkViewport                                viewports[MAX_VIEWPORTS];
1588    } viewport;
1589 
1590    struct {
1591       uint32_t                                  count;
1592       VkRect2D                                  scissors[MAX_SCISSORS];
1593    } scissor;
1594 
1595    float                                        line_width;
1596 
1597    struct {
1598       float                                     bias;
1599       float                                     clamp;
1600       float                                     slope;
1601    } depth_bias;
1602 
1603    float                                        blend_constants[4];
1604 
1605    struct {
1606       float                                     min;
1607       float                                     max;
1608    } depth_bounds;
1609 
1610    struct {
1611       uint32_t                                  front;
1612       uint32_t                                  back;
1613    } stencil_compare_mask;
1614 
1615    struct {
1616       uint32_t                                  front;
1617       uint32_t                                  back;
1618    } stencil_write_mask;
1619 
1620    struct {
1621       uint32_t                                  front;
1622       uint32_t                                  back;
1623    } stencil_reference;
1624 };
1625 
1626 extern const struct anv_dynamic_state default_dynamic_state;
1627 
1628 void anv_dynamic_state_copy(struct anv_dynamic_state *dest,
1629                             const struct anv_dynamic_state *src,
1630                             uint32_t copy_mask);
1631 
1632 struct anv_surface_state {
1633    struct anv_state state;
1634    /** Address of the surface referred to by this state
1635     *
1636     * This address is relative to the start of the BO.
1637     */
1638    uint64_t address;
1639    /* Address of the aux surface, if any
1640     *
1641     * This field is 0 if and only if no aux surface exists.
1642     *
1643     * This address is relative to the start of the BO.  On gen7, the bottom 12
1644     * bits of this address include extra aux information.
1645     */
1646    uint64_t aux_address;
1647 };
1648 
1649 /**
1650  * Attachment state when recording a renderpass instance.
1651  *
1652  * The clear value is valid only if there exists a pending clear.
1653  */
1654 struct anv_attachment_state {
1655    enum isl_aux_usage                           aux_usage;
1656    enum isl_aux_usage                           input_aux_usage;
1657    struct anv_surface_state                     color;
1658    struct anv_surface_state                     input;
1659 
1660    VkImageLayout                                current_layout;
1661    VkImageAspectFlags                           pending_clear_aspects;
1662    bool                                         fast_clear;
1663    VkClearValue                                 clear_value;
1664    bool                                         clear_color_is_zero_one;
1665    bool                                         clear_color_is_zero;
1666 };
1667 
1668 /** State tracking for particular pipeline bind point
1669  *
1670  * This struct is the base struct for anv_cmd_graphics_state and
1671  * anv_cmd_compute_state.  These are used to track state which is bound to a
1672  * particular type of pipeline.  Generic state that applies per-stage such as
1673  * binding table offsets and push constants is tracked generically with a
1674  * per-stage array in anv_cmd_state.
1675  */
1676 struct anv_cmd_pipeline_state {
1677    struct anv_pipeline *pipeline;
1678 
1679    struct anv_descriptor_set *descriptors[MAX_SETS];
1680    uint32_t dynamic_offsets[MAX_DYNAMIC_BUFFERS];
1681 
1682    struct anv_push_descriptor_set *push_descriptors[MAX_SETS];
1683 };
1684 
1685 /** State tracking for graphics pipeline
1686  *
1687  * This has anv_cmd_pipeline_state as a base struct to track things which get
1688  * bound to a graphics pipeline.  Along with general pipeline bind point state
1689  * which is in the anv_cmd_pipeline_state base struct, it also contains other
1690  * state which is graphics-specific.
1691  */
1692 struct anv_cmd_graphics_state {
1693    struct anv_cmd_pipeline_state base;
1694 
1695    anv_cmd_dirty_mask_t dirty;
1696    uint32_t vb_dirty;
1697 
1698    struct anv_dynamic_state dynamic;
1699 
1700    struct {
1701       struct anv_buffer *index_buffer;
1702       uint32_t index_type; /**< 3DSTATE_INDEX_BUFFER.IndexFormat */
1703       uint32_t index_offset;
1704    } gen7;
1705 };
1706 
1707 /** State tracking for compute pipeline
1708  *
1709  * This has anv_cmd_pipeline_state as a base struct to track things which get
1710  * bound to a compute pipeline.  Along with general pipeline bind point state
1711  * which is in the anv_cmd_pipeline_state base struct, it also contains other
1712  * state which is compute-specific.
1713  */
1714 struct anv_cmd_compute_state {
1715    struct anv_cmd_pipeline_state base;
1716 
1717    bool pipeline_dirty;
1718 
1719    struct anv_address num_workgroups;
1720 };
1721 
1722 /** State required while building cmd buffer */
1723 struct anv_cmd_state {
1724    /* PIPELINE_SELECT.PipelineSelection */
1725    uint32_t                                     current_pipeline;
1726    const struct gen_l3_config *                 current_l3_config;
1727 
1728    struct anv_cmd_graphics_state                gfx;
1729    struct anv_cmd_compute_state                 compute;
1730 
1731    enum anv_pipe_bits                           pending_pipe_bits;
1732    VkShaderStageFlags                           descriptors_dirty;
1733    VkShaderStageFlags                           push_constants_dirty;
1734 
1735    struct anv_framebuffer *                     framebuffer;
1736    struct anv_render_pass *                     pass;
1737    struct anv_subpass *                         subpass;
1738    VkRect2D                                     render_area;
1739    uint32_t                                     restart_index;
1740    struct anv_vertex_binding                    vertex_bindings[MAX_VBS];
1741    VkShaderStageFlags                           push_constant_stages;
1742    struct anv_push_constants *                  push_constants[MESA_SHADER_STAGES];
1743    struct anv_state                             binding_tables[MESA_SHADER_STAGES];
1744    struct anv_state                             samplers[MESA_SHADER_STAGES];
1745 
1746    /**
1747     * Whether or not the gen8 PMA fix is enabled.  We ensure that, at the top
1748     * of any command buffer it is disabled by disabling it in EndCommandBuffer
1749     * and before invoking the secondary in ExecuteCommands.
1750     */
1751    bool                                         pma_fix_enabled;
1752 
1753    /**
1754     * Whether or not we know for certain that HiZ is enabled for the current
1755     * subpass.  If, for whatever reason, we are unsure as to whether HiZ is
1756     * enabled or not, this will be false.
1757     */
1758    bool                                         hiz_enabled;
1759 
1760    /**
1761     * Array length is anv_cmd_state::pass::attachment_count. Array content is
1762     * valid only when recording a render pass instance.
1763     */
1764    struct anv_attachment_state *                attachments;
1765 
1766    /**
1767     * Surface states for color render targets.  These are stored in a single
1768     * flat array.  For depth-stencil attachments, the surface state is simply
1769     * left blank.
1770     */
1771    struct anv_state                             render_pass_states;
1772 
1773    /**
1774     * A null surface state of the right size to match the framebuffer.  This
1775     * is one of the states in render_pass_states.
1776     */
1777    struct anv_state                             null_surface_state;
1778 };
1779 
1780 struct anv_cmd_pool {
1781    VkAllocationCallbacks                        alloc;
1782    struct list_head                             cmd_buffers;
1783 };
1784 
1785 #define ANV_CMD_BUFFER_BATCH_SIZE 8192
1786 
1787 enum anv_cmd_buffer_exec_mode {
1788    ANV_CMD_BUFFER_EXEC_MODE_PRIMARY,
1789    ANV_CMD_BUFFER_EXEC_MODE_EMIT,
1790    ANV_CMD_BUFFER_EXEC_MODE_GROW_AND_EMIT,
1791    ANV_CMD_BUFFER_EXEC_MODE_CHAIN,
1792    ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN,
1793 };
1794 
1795 struct anv_cmd_buffer {
1796    VK_LOADER_DATA                               _loader_data;
1797 
1798    struct anv_device *                          device;
1799 
1800    struct anv_cmd_pool *                        pool;
1801    struct list_head                             pool_link;
1802 
1803    struct anv_batch                             batch;
1804 
1805    /* Fields required for the actual chain of anv_batch_bo's.
1806     *
1807     * These fields are initialized by anv_cmd_buffer_init_batch_bo_chain().
1808     */
1809    struct list_head                             batch_bos;
1810    enum anv_cmd_buffer_exec_mode                exec_mode;
1811 
1812    /* A vector of anv_batch_bo pointers for every batch or surface buffer
1813     * referenced by this command buffer
1814     *
1815     * initialized by anv_cmd_buffer_init_batch_bo_chain()
1816     */
1817    struct u_vector                            seen_bbos;
1818 
1819    /* A vector of int32_t's for every block of binding tables.
1820     *
1821     * initialized by anv_cmd_buffer_init_batch_bo_chain()
1822     */
1823    struct u_vector                              bt_block_states;
1824    uint32_t                                     bt_next;
1825 
1826    struct anv_reloc_list                        surface_relocs;
1827    /** Last seen surface state block pool center bo offset */
1828    uint32_t                                     last_ss_pool_center;
1829 
1830    /* Serial for tracking buffer completion */
1831    uint32_t                                     serial;
1832 
1833    /* Stream objects for storing temporary data */
1834    struct anv_state_stream                      surface_state_stream;
1835    struct anv_state_stream                      dynamic_state_stream;
1836 
1837    VkCommandBufferUsageFlags                    usage_flags;
1838    VkCommandBufferLevel                         level;
1839 
1840    struct anv_cmd_state                         state;
1841 };
1842 
1843 VkResult anv_cmd_buffer_init_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer);
1844 void anv_cmd_buffer_fini_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer);
1845 void anv_cmd_buffer_reset_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer);
1846 void anv_cmd_buffer_end_batch_buffer(struct anv_cmd_buffer *cmd_buffer);
1847 void anv_cmd_buffer_add_secondary(struct anv_cmd_buffer *primary,
1848                                   struct anv_cmd_buffer *secondary);
1849 void anv_cmd_buffer_prepare_execbuf(struct anv_cmd_buffer *cmd_buffer);
1850 VkResult anv_cmd_buffer_execbuf(struct anv_device *device,
1851                                 struct anv_cmd_buffer *cmd_buffer,
1852                                 const VkSemaphore *in_semaphores,
1853                                 uint32_t num_in_semaphores,
1854                                 const VkSemaphore *out_semaphores,
1855                                 uint32_t num_out_semaphores,
1856                                 VkFence fence);
1857 
1858 VkResult anv_cmd_buffer_reset(struct anv_cmd_buffer *cmd_buffer);
1859 
1860 VkResult
1861 anv_cmd_buffer_ensure_push_constants_size(struct anv_cmd_buffer *cmd_buffer,
1862                                           gl_shader_stage stage, uint32_t size);
1863 #define anv_cmd_buffer_ensure_push_constant_field(cmd_buffer, stage, field) \
1864    anv_cmd_buffer_ensure_push_constants_size(cmd_buffer, stage, \
1865       (offsetof(struct anv_push_constants, field) + \
1866        sizeof(cmd_buffer->state.push_constants[0]->field)))
1867 
1868 struct anv_state anv_cmd_buffer_emit_dynamic(struct anv_cmd_buffer *cmd_buffer,
1869                                              const void *data, uint32_t size, uint32_t alignment);
1870 struct anv_state anv_cmd_buffer_merge_dynamic(struct anv_cmd_buffer *cmd_buffer,
1871                                               uint32_t *a, uint32_t *b,
1872                                               uint32_t dwords, uint32_t alignment);
1873 
1874 struct anv_address
1875 anv_cmd_buffer_surface_base_address(struct anv_cmd_buffer *cmd_buffer);
1876 struct anv_state
1877 anv_cmd_buffer_alloc_binding_table(struct anv_cmd_buffer *cmd_buffer,
1878                                    uint32_t entries, uint32_t *state_offset);
1879 struct anv_state
1880 anv_cmd_buffer_alloc_surface_state(struct anv_cmd_buffer *cmd_buffer);
1881 struct anv_state
1882 anv_cmd_buffer_alloc_dynamic_state(struct anv_cmd_buffer *cmd_buffer,
1883                                    uint32_t size, uint32_t alignment);
1884 
1885 VkResult
1886 anv_cmd_buffer_new_binding_table_block(struct anv_cmd_buffer *cmd_buffer);
1887 
1888 void gen8_cmd_buffer_emit_viewport(struct anv_cmd_buffer *cmd_buffer);
1889 void gen8_cmd_buffer_emit_depth_viewport(struct anv_cmd_buffer *cmd_buffer,
1890                                          bool depth_clamp_enable);
1891 void gen7_cmd_buffer_emit_scissor(struct anv_cmd_buffer *cmd_buffer);
1892 
1893 void anv_cmd_buffer_setup_attachments(struct anv_cmd_buffer *cmd_buffer,
1894                                       struct anv_render_pass *pass,
1895                                       struct anv_framebuffer *framebuffer,
1896                                       const VkClearValue *clear_values);
1897 
1898 void anv_cmd_buffer_emit_state_base_address(struct anv_cmd_buffer *cmd_buffer);
1899 
1900 struct anv_state
1901 anv_cmd_buffer_push_constants(struct anv_cmd_buffer *cmd_buffer,
1902                               gl_shader_stage stage);
1903 struct anv_state
1904 anv_cmd_buffer_cs_push_constants(struct anv_cmd_buffer *cmd_buffer);
1905 
1906 void anv_cmd_buffer_clear_subpass(struct anv_cmd_buffer *cmd_buffer);
1907 void anv_cmd_buffer_resolve_subpass(struct anv_cmd_buffer *cmd_buffer);
1908 
1909 const struct anv_image_view *
1910 anv_cmd_buffer_get_depth_stencil_view(const struct anv_cmd_buffer *cmd_buffer);
1911 
1912 VkResult
1913 anv_cmd_buffer_alloc_blorp_binding_table(struct anv_cmd_buffer *cmd_buffer,
1914                                          uint32_t num_entries,
1915                                          uint32_t *state_offset,
1916                                          struct anv_state *bt_state);
1917 
1918 void anv_cmd_buffer_dump(struct anv_cmd_buffer *cmd_buffer);
1919 
1920 enum anv_fence_type {
1921    ANV_FENCE_TYPE_NONE = 0,
1922    ANV_FENCE_TYPE_BO,
1923    ANV_FENCE_TYPE_SYNCOBJ,
1924 };
1925 
1926 enum anv_bo_fence_state {
1927    /** Indicates that this is a new (or newly reset fence) */
1928    ANV_BO_FENCE_STATE_RESET,
1929 
1930    /** Indicates that this fence has been submitted to the GPU but is still
1931     * (as far as we know) in use by the GPU.
1932     */
1933    ANV_BO_FENCE_STATE_SUBMITTED,
1934 
1935    ANV_BO_FENCE_STATE_SIGNALED,
1936 };
1937 
1938 struct anv_fence_impl {
1939    enum anv_fence_type type;
1940 
1941    union {
1942       /** Fence implementation for BO fences
1943        *
1944        * These fences use a BO and a set of CPU-tracked state flags.  The BO
1945        * is added to the object list of the last execbuf call in a QueueSubmit
1946        * and is marked EXEC_WRITE.  The state flags track when the BO has been
1947        * submitted to the kernel.  We need to do this because Vulkan lets you
1948        * wait on a fence that has not yet been submitted and I915_GEM_BUSY
1949        * will say it's idle in this case.
1950        */
1951       struct {
1952          struct anv_bo bo;
1953          enum anv_bo_fence_state state;
1954       } bo;
1955 
1956       /** DRM syncobj handle for syncobj-based fences */
1957       uint32_t syncobj;
1958    };
1959 };
1960 
1961 struct anv_fence {
1962    /* Permanent fence state.  Every fence has some form of permanent state
1963     * (type != ANV_SEMAPHORE_TYPE_NONE).  This may be a BO to fence on (for
1964     * cross-process fences) or it could just be a dummy for use internally.
1965     */
1966    struct anv_fence_impl permanent;
1967 
1968    /* Temporary fence state.  A fence *may* have temporary state.  That state
1969     * is added to the fence by an import operation and is reset back to
1970     * ANV_SEMAPHORE_TYPE_NONE when the fence is reset.  A fence with temporary
1971     * state cannot be signaled because the fence must already be signaled
1972     * before the temporary state can be exported from the fence in the other
1973     * process and imported here.
1974     */
1975    struct anv_fence_impl temporary;
1976 };
1977 
1978 struct anv_event {
1979    uint64_t                                     semaphore;
1980    struct anv_state                             state;
1981 };
1982 
1983 enum anv_semaphore_type {
1984    ANV_SEMAPHORE_TYPE_NONE = 0,
1985    ANV_SEMAPHORE_TYPE_DUMMY,
1986    ANV_SEMAPHORE_TYPE_BO,
1987    ANV_SEMAPHORE_TYPE_SYNC_FILE,
1988    ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ,
1989 };
1990 
1991 struct anv_semaphore_impl {
1992    enum anv_semaphore_type type;
1993 
1994    union {
1995       /* A BO representing this semaphore when type == ANV_SEMAPHORE_TYPE_BO.
1996        * This BO will be added to the object list on any execbuf2 calls for
1997        * which this semaphore is used as a wait or signal fence.  When used as
1998        * a signal fence, the EXEC_OBJECT_WRITE flag will be set.
1999        */
2000       struct anv_bo *bo;
2001 
2002       /* The sync file descriptor when type == ANV_SEMAPHORE_TYPE_SYNC_FILE.
2003        * If the semaphore is in the unsignaled state due to either just being
2004        * created or because it has been used for a wait, fd will be -1.
2005        */
2006       int fd;
2007 
2008       /* Sync object handle when type == ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ.
2009        * Unlike GEM BOs, DRM sync objects aren't deduplicated by the kernel on
2010        * import so we don't need to bother with a userspace cache.
2011        */
2012       uint32_t syncobj;
2013    };
2014 };
2015 
2016 struct anv_semaphore {
2017    /* Permanent semaphore state.  Every semaphore has some form of permanent
2018     * state (type != ANV_SEMAPHORE_TYPE_NONE).  This may be a BO to fence on
2019     * (for cross-process semaphores0 or it could just be a dummy for use
2020     * internally.
2021     */
2022    struct anv_semaphore_impl permanent;
2023 
2024    /* Temporary semaphore state.  A semaphore *may* have temporary state.
2025     * That state is added to the semaphore by an import operation and is reset
2026     * back to ANV_SEMAPHORE_TYPE_NONE when the semaphore is waited on.  A
2027     * semaphore with temporary state cannot be signaled because the semaphore
2028     * must already be signaled before the temporary state can be exported from
2029     * the semaphore in the other process and imported here.
2030     */
2031    struct anv_semaphore_impl temporary;
2032 };
2033 
2034 void anv_semaphore_reset_temporary(struct anv_device *device,
2035                                    struct anv_semaphore *semaphore);
2036 
2037 struct anv_shader_module {
2038    unsigned char                                sha1[20];
2039    uint32_t                                     size;
2040    char                                         data[0];
2041 };
2042 
2043 static inline gl_shader_stage
vk_to_mesa_shader_stage(VkShaderStageFlagBits vk_stage)2044 vk_to_mesa_shader_stage(VkShaderStageFlagBits vk_stage)
2045 {
2046    assert(__builtin_popcount(vk_stage) == 1);
2047    return ffs(vk_stage) - 1;
2048 }
2049 
2050 static inline VkShaderStageFlagBits
mesa_to_vk_shader_stage(gl_shader_stage mesa_stage)2051 mesa_to_vk_shader_stage(gl_shader_stage mesa_stage)
2052 {
2053    return (1 << mesa_stage);
2054 }
2055 
2056 #define ANV_STAGE_MASK ((1 << MESA_SHADER_STAGES) - 1)
2057 
2058 #define anv_foreach_stage(stage, stage_bits)                         \
2059    for (gl_shader_stage stage,                                       \
2060         __tmp = (gl_shader_stage)((stage_bits) & ANV_STAGE_MASK);    \
2061         stage = __builtin_ffs(__tmp) - 1, __tmp;                     \
2062         __tmp &= ~(1 << (stage)))
2063 
2064 struct anv_pipeline_bind_map {
2065    uint32_t surface_count;
2066    uint32_t sampler_count;
2067    uint32_t image_count;
2068 
2069    struct anv_pipeline_binding *                surface_to_descriptor;
2070    struct anv_pipeline_binding *                sampler_to_descriptor;
2071 };
2072 
2073 struct anv_shader_bin_key {
2074    uint32_t size;
2075    uint8_t data[0];
2076 };
2077 
2078 struct anv_shader_bin {
2079    uint32_t ref_cnt;
2080 
2081    const struct anv_shader_bin_key *key;
2082 
2083    struct anv_state kernel;
2084    uint32_t kernel_size;
2085 
2086    const struct brw_stage_prog_data *prog_data;
2087    uint32_t prog_data_size;
2088 
2089    struct anv_pipeline_bind_map bind_map;
2090 };
2091 
2092 struct anv_shader_bin *
2093 anv_shader_bin_create(struct anv_device *device,
2094                       const void *key, uint32_t key_size,
2095                       const void *kernel, uint32_t kernel_size,
2096                       const struct brw_stage_prog_data *prog_data,
2097                       uint32_t prog_data_size, const void *prog_data_param,
2098                       const struct anv_pipeline_bind_map *bind_map);
2099 
2100 void
2101 anv_shader_bin_destroy(struct anv_device *device, struct anv_shader_bin *shader);
2102 
2103 static inline void
anv_shader_bin_ref(struct anv_shader_bin * shader)2104 anv_shader_bin_ref(struct anv_shader_bin *shader)
2105 {
2106    assert(shader && shader->ref_cnt >= 1);
2107    p_atomic_inc(&shader->ref_cnt);
2108 }
2109 
2110 static inline void
anv_shader_bin_unref(struct anv_device * device,struct anv_shader_bin * shader)2111 anv_shader_bin_unref(struct anv_device *device, struct anv_shader_bin *shader)
2112 {
2113    assert(shader && shader->ref_cnt >= 1);
2114    if (p_atomic_dec_zero(&shader->ref_cnt))
2115       anv_shader_bin_destroy(device, shader);
2116 }
2117 
2118 struct anv_pipeline {
2119    struct anv_device *                          device;
2120    struct anv_batch                             batch;
2121    uint32_t                                     batch_data[512];
2122    struct anv_reloc_list                        batch_relocs;
2123    uint32_t                                     dynamic_state_mask;
2124    struct anv_dynamic_state                     dynamic_state;
2125 
2126    struct anv_subpass *                         subpass;
2127    struct anv_pipeline_layout *                 layout;
2128 
2129    bool                                         needs_data_cache;
2130 
2131    struct anv_shader_bin *                      shaders[MESA_SHADER_STAGES];
2132 
2133    struct {
2134       const struct gen_l3_config *              l3_config;
2135       uint32_t                                  total_size;
2136    } urb;
2137 
2138    VkShaderStageFlags                           active_stages;
2139    struct anv_state                             blend_state;
2140 
2141    uint32_t                                     vb_used;
2142    uint32_t                                     binding_stride[MAX_VBS];
2143    bool                                         instancing_enable[MAX_VBS];
2144    bool                                         primitive_restart;
2145    uint32_t                                     topology;
2146 
2147    uint32_t                                     cs_right_mask;
2148 
2149    bool                                         writes_depth;
2150    bool                                         depth_test_enable;
2151    bool                                         writes_stencil;
2152    bool                                         stencil_test_enable;
2153    bool                                         depth_clamp_enable;
2154    bool                                         sample_shading_enable;
2155    bool                                         kill_pixel;
2156 
2157    struct {
2158       uint32_t                                  sf[7];
2159       uint32_t                                  depth_stencil_state[3];
2160    } gen7;
2161 
2162    struct {
2163       uint32_t                                  sf[4];
2164       uint32_t                                  raster[5];
2165       uint32_t                                  wm_depth_stencil[3];
2166    } gen8;
2167 
2168    struct {
2169       uint32_t                                  wm_depth_stencil[4];
2170    } gen9;
2171 
2172    uint32_t                                     interface_descriptor_data[8];
2173 };
2174 
2175 static inline bool
anv_pipeline_has_stage(const struct anv_pipeline * pipeline,gl_shader_stage stage)2176 anv_pipeline_has_stage(const struct anv_pipeline *pipeline,
2177                        gl_shader_stage stage)
2178 {
2179    return (pipeline->active_stages & mesa_to_vk_shader_stage(stage)) != 0;
2180 }
2181 
2182 #define ANV_DECL_GET_PROG_DATA_FUNC(prefix, stage)                   \
2183 static inline const struct brw_##prefix##_prog_data *                \
2184 get_##prefix##_prog_data(const struct anv_pipeline *pipeline)        \
2185 {                                                                    \
2186    if (anv_pipeline_has_stage(pipeline, stage)) {                    \
2187       return (const struct brw_##prefix##_prog_data *)               \
2188              pipeline->shaders[stage]->prog_data;                    \
2189    } else {                                                          \
2190       return NULL;                                                   \
2191    }                                                                 \
2192 }
2193 
ANV_DECL_GET_PROG_DATA_FUNC(vs,MESA_SHADER_VERTEX)2194 ANV_DECL_GET_PROG_DATA_FUNC(vs, MESA_SHADER_VERTEX)
2195 ANV_DECL_GET_PROG_DATA_FUNC(tcs, MESA_SHADER_TESS_CTRL)
2196 ANV_DECL_GET_PROG_DATA_FUNC(tes, MESA_SHADER_TESS_EVAL)
2197 ANV_DECL_GET_PROG_DATA_FUNC(gs, MESA_SHADER_GEOMETRY)
2198 ANV_DECL_GET_PROG_DATA_FUNC(wm, MESA_SHADER_FRAGMENT)
2199 ANV_DECL_GET_PROG_DATA_FUNC(cs, MESA_SHADER_COMPUTE)
2200 
2201 static inline const struct brw_vue_prog_data *
2202 anv_pipeline_get_last_vue_prog_data(const struct anv_pipeline *pipeline)
2203 {
2204    if (anv_pipeline_has_stage(pipeline, MESA_SHADER_GEOMETRY))
2205       return &get_gs_prog_data(pipeline)->base;
2206    else if (anv_pipeline_has_stage(pipeline, MESA_SHADER_TESS_EVAL))
2207       return &get_tes_prog_data(pipeline)->base;
2208    else
2209       return &get_vs_prog_data(pipeline)->base;
2210 }
2211 
2212 VkResult
2213 anv_pipeline_init(struct anv_pipeline *pipeline, struct anv_device *device,
2214                   struct anv_pipeline_cache *cache,
2215                   const VkGraphicsPipelineCreateInfo *pCreateInfo,
2216                   const VkAllocationCallbacks *alloc);
2217 
2218 VkResult
2219 anv_pipeline_compile_cs(struct anv_pipeline *pipeline,
2220                         struct anv_pipeline_cache *cache,
2221                         const VkComputePipelineCreateInfo *info,
2222                         struct anv_shader_module *module,
2223                         const char *entrypoint,
2224                         const VkSpecializationInfo *spec_info);
2225 
2226 struct anv_format_plane {
2227    enum isl_format isl_format:16;
2228    struct isl_swizzle swizzle;
2229 
2230    /* Whether this plane contains chroma channels */
2231    bool has_chroma;
2232 
2233    /* For downscaling of YUV planes */
2234    uint8_t denominator_scales[2];
2235 
2236    /* How to map sampled ycbcr planes to a single 4 component element. */
2237    struct isl_swizzle ycbcr_swizzle;
2238 };
2239 
2240 
2241 struct anv_format {
2242    struct anv_format_plane planes[3];
2243    uint8_t n_planes;
2244    bool can_ycbcr;
2245 };
2246 
2247 static inline uint32_t
anv_image_aspect_to_plane(VkImageAspectFlags image_aspects,VkImageAspectFlags aspect_mask)2248 anv_image_aspect_to_plane(VkImageAspectFlags image_aspects,
2249                           VkImageAspectFlags aspect_mask)
2250 {
2251    switch (aspect_mask) {
2252    case VK_IMAGE_ASPECT_COLOR_BIT:
2253    case VK_IMAGE_ASPECT_DEPTH_BIT:
2254    case VK_IMAGE_ASPECT_PLANE_0_BIT_KHR:
2255       return 0;
2256    case VK_IMAGE_ASPECT_STENCIL_BIT:
2257       if ((image_aspects & VK_IMAGE_ASPECT_DEPTH_BIT) == 0)
2258          return 0;
2259       /* Fall-through */
2260    case VK_IMAGE_ASPECT_PLANE_1_BIT_KHR:
2261       return 1;
2262    case VK_IMAGE_ASPECT_PLANE_2_BIT_KHR:
2263       return 2;
2264    default:
2265       /* Purposefully assert with depth/stencil aspects. */
2266       unreachable("invalid image aspect");
2267    }
2268 }
2269 
2270 static inline uint32_t
anv_image_aspect_get_planes(VkImageAspectFlags aspect_mask)2271 anv_image_aspect_get_planes(VkImageAspectFlags aspect_mask)
2272 {
2273    uint32_t planes = 0;
2274 
2275    if (aspect_mask & (VK_IMAGE_ASPECT_COLOR_BIT |
2276                       VK_IMAGE_ASPECT_DEPTH_BIT |
2277                       VK_IMAGE_ASPECT_STENCIL_BIT |
2278                       VK_IMAGE_ASPECT_PLANE_0_BIT_KHR))
2279       planes++;
2280    if (aspect_mask & VK_IMAGE_ASPECT_PLANE_1_BIT_KHR)
2281       planes++;
2282    if (aspect_mask & VK_IMAGE_ASPECT_PLANE_2_BIT_KHR)
2283       planes++;
2284 
2285    if ((aspect_mask & VK_IMAGE_ASPECT_DEPTH_BIT) != 0 &&
2286        (aspect_mask & VK_IMAGE_ASPECT_STENCIL_BIT) != 0)
2287       planes++;
2288 
2289    return planes;
2290 }
2291 
2292 static inline VkImageAspectFlags
anv_plane_to_aspect(VkImageAspectFlags image_aspects,uint32_t plane)2293 anv_plane_to_aspect(VkImageAspectFlags image_aspects,
2294                     uint32_t plane)
2295 {
2296    if (image_aspects & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV) {
2297       if (_mesa_bitcount(image_aspects) > 1)
2298          return VK_IMAGE_ASPECT_PLANE_0_BIT_KHR << plane;
2299       return VK_IMAGE_ASPECT_COLOR_BIT;
2300    }
2301    if (image_aspects & VK_IMAGE_ASPECT_DEPTH_BIT)
2302       return VK_IMAGE_ASPECT_DEPTH_BIT << plane;
2303    assert(image_aspects == VK_IMAGE_ASPECT_STENCIL_BIT);
2304    return VK_IMAGE_ASPECT_STENCIL_BIT;
2305 }
2306 
2307 #define anv_foreach_image_aspect_bit(b, image, aspects) \
2308    for_each_bit(b, anv_image_expand_aspects(image, aspects))
2309 
2310 const struct anv_format *
2311 anv_get_format(VkFormat format);
2312 
2313 static inline uint32_t
anv_get_format_planes(VkFormat vk_format)2314 anv_get_format_planes(VkFormat vk_format)
2315 {
2316    const struct anv_format *format = anv_get_format(vk_format);
2317 
2318    return format != NULL ? format->n_planes : 0;
2319 }
2320 
2321 struct anv_format_plane
2322 anv_get_format_plane(const struct gen_device_info *devinfo, VkFormat vk_format,
2323                      VkImageAspectFlagBits aspect, VkImageTiling tiling);
2324 
2325 static inline enum isl_format
anv_get_isl_format(const struct gen_device_info * devinfo,VkFormat vk_format,VkImageAspectFlags aspect,VkImageTiling tiling)2326 anv_get_isl_format(const struct gen_device_info *devinfo, VkFormat vk_format,
2327                    VkImageAspectFlags aspect, VkImageTiling tiling)
2328 {
2329    return anv_get_format_plane(devinfo, vk_format, aspect, tiling).isl_format;
2330 }
2331 
2332 static inline struct isl_swizzle
anv_swizzle_for_render(struct isl_swizzle swizzle)2333 anv_swizzle_for_render(struct isl_swizzle swizzle)
2334 {
2335    /* Sometimes the swizzle will have alpha map to one.  We do this to fake
2336     * RGB as RGBA for texturing
2337     */
2338    assert(swizzle.a == ISL_CHANNEL_SELECT_ONE ||
2339           swizzle.a == ISL_CHANNEL_SELECT_ALPHA);
2340 
2341    /* But it doesn't matter what we render to that channel */
2342    swizzle.a = ISL_CHANNEL_SELECT_ALPHA;
2343 
2344    return swizzle;
2345 }
2346 
2347 void
2348 anv_pipeline_setup_l3_config(struct anv_pipeline *pipeline, bool needs_slm);
2349 
2350 /**
2351  * Subsurface of an anv_image.
2352  */
2353 struct anv_surface {
2354    /** Valid only if isl_surf::size > 0. */
2355    struct isl_surf isl;
2356 
2357    /**
2358     * Offset from VkImage's base address, as bound by vkBindImageMemory().
2359     */
2360    uint32_t offset;
2361 };
2362 
2363 struct anv_image {
2364    VkImageType type;
2365    /* The original VkFormat provided by the client.  This may not match any
2366     * of the actual surface formats.
2367     */
2368    VkFormat vk_format;
2369    const struct anv_format *format;
2370 
2371    VkImageAspectFlags aspects;
2372    VkExtent3D extent;
2373    uint32_t levels;
2374    uint32_t array_size;
2375    uint32_t samples; /**< VkImageCreateInfo::samples */
2376    uint32_t n_planes;
2377    VkImageUsageFlags usage; /**< Superset of VkImageCreateInfo::usage. */
2378    VkImageTiling tiling; /** VkImageCreateInfo::tiling */
2379 
2380    /**
2381     * DRM format modifier for this image or DRM_FORMAT_MOD_INVALID.
2382     */
2383    uint64_t drm_format_mod;
2384 
2385    VkDeviceSize size;
2386    uint32_t alignment;
2387 
2388    /* Whether the image is made of several underlying buffer objects rather a
2389     * single one with different offsets.
2390     */
2391    bool disjoint;
2392 
2393    /**
2394     * Image subsurfaces
2395     *
2396     * For each foo, anv_image::planes[x].surface is valid if and only if
2397     * anv_image::aspects has a x aspect. Refer to anv_image_aspect_to_plane()
2398     * to figure the number associated with a given aspect.
2399     *
2400     * The hardware requires that the depth buffer and stencil buffer be
2401     * separate surfaces.  From Vulkan's perspective, though, depth and stencil
2402     * reside in the same VkImage.  To satisfy both the hardware and Vulkan, we
2403     * allocate the depth and stencil buffers as separate surfaces in the same
2404     * bo.
2405     *
2406     * Memory layout :
2407     *
2408     * -----------------------
2409     * |     surface0        |   /|\
2410     * -----------------------    |
2411     * |   shadow surface0   |    |
2412     * -----------------------    | Plane 0
2413     * |    aux surface0     |    |
2414     * -----------------------    |
2415     * | fast clear colors0  |   \|/
2416     * -----------------------
2417     * |     surface1        |   /|\
2418     * -----------------------    |
2419     * |   shadow surface1   |    |
2420     * -----------------------    | Plane 1
2421     * |    aux surface1     |    |
2422     * -----------------------    |
2423     * | fast clear colors1  |   \|/
2424     * -----------------------
2425     * |        ...          |
2426     * |                     |
2427     * -----------------------
2428     */
2429    struct {
2430       /**
2431        * Offset of the entire plane (whenever the image is disjoint this is
2432        * set to 0).
2433        */
2434       uint32_t offset;
2435 
2436       VkDeviceSize size;
2437       uint32_t alignment;
2438 
2439       struct anv_surface surface;
2440 
2441       /**
2442        * A surface which shadows the main surface and may have different
2443        * tiling. This is used for sampling using a tiling that isn't supported
2444        * for other operations.
2445        */
2446       struct anv_surface shadow_surface;
2447 
2448       /**
2449        * For color images, this is the aux usage for this image when not used
2450        * as a color attachment.
2451        *
2452        * For depth/stencil images, this is set to ISL_AUX_USAGE_HIZ if the
2453        * image has a HiZ buffer.
2454        */
2455       enum isl_aux_usage aux_usage;
2456 
2457       struct anv_surface aux_surface;
2458 
2459       /**
2460        * Offset of the fast clear state (used to compute the
2461        * fast_clear_state_offset of the following planes).
2462        */
2463       uint32_t fast_clear_state_offset;
2464 
2465       /**
2466        * BO associated with this plane, set when bound.
2467        */
2468       struct anv_bo *bo;
2469       VkDeviceSize bo_offset;
2470 
2471       /**
2472        * When destroying the image, also free the bo.
2473        * */
2474       bool bo_is_owned;
2475    } planes[3];
2476 };
2477 
2478 /* Returns the number of auxiliary buffer levels attached to an image. */
2479 static inline uint8_t
anv_image_aux_levels(const struct anv_image * const image,VkImageAspectFlagBits aspect)2480 anv_image_aux_levels(const struct anv_image * const image,
2481                      VkImageAspectFlagBits aspect)
2482 {
2483    uint32_t plane = anv_image_aspect_to_plane(image->aspects, aspect);
2484    return image->planes[plane].aux_surface.isl.size > 0 ?
2485           image->planes[plane].aux_surface.isl.levels : 0;
2486 }
2487 
2488 /* Returns the number of auxiliary buffer layers attached to an image. */
2489 static inline uint32_t
anv_image_aux_layers(const struct anv_image * const image,VkImageAspectFlagBits aspect,const uint8_t miplevel)2490 anv_image_aux_layers(const struct anv_image * const image,
2491                      VkImageAspectFlagBits aspect,
2492                      const uint8_t miplevel)
2493 {
2494    assert(image);
2495 
2496    /* The miplevel must exist in the main buffer. */
2497    assert(miplevel < image->levels);
2498 
2499    if (miplevel >= anv_image_aux_levels(image, aspect)) {
2500       /* There are no layers with auxiliary data because the miplevel has no
2501        * auxiliary data.
2502        */
2503       return 0;
2504    } else {
2505       uint32_t plane = anv_image_aspect_to_plane(image->aspects, aspect);
2506       return MAX2(image->planes[plane].aux_surface.isl.logical_level0_px.array_len,
2507                   image->planes[plane].aux_surface.isl.logical_level0_px.depth >> miplevel);
2508    }
2509 }
2510 
2511 static inline unsigned
anv_fast_clear_state_entry_size(const struct anv_device * device)2512 anv_fast_clear_state_entry_size(const struct anv_device *device)
2513 {
2514    assert(device);
2515    /* Entry contents:
2516     *   +--------------------------------------------+
2517     *   | clear value dword(s) | needs resolve dword |
2518     *   +--------------------------------------------+
2519     */
2520 
2521    /* Ensure that the needs resolve dword is in fact dword-aligned to enable
2522     * GPU memcpy operations.
2523     */
2524    assert(device->isl_dev.ss.clear_value_size % 4 == 0);
2525    return device->isl_dev.ss.clear_value_size + 4;
2526 }
2527 
2528 static inline struct anv_address
anv_image_get_clear_color_addr(const struct anv_device * device,const struct anv_image * image,VkImageAspectFlagBits aspect,unsigned level)2529 anv_image_get_clear_color_addr(const struct anv_device *device,
2530                                const struct anv_image *image,
2531                                VkImageAspectFlagBits aspect,
2532                                unsigned level)
2533 {
2534    uint32_t plane = anv_image_aspect_to_plane(image->aspects, aspect);
2535    return (struct anv_address) {
2536       .bo = image->planes[plane].bo,
2537       .offset = image->planes[plane].bo_offset +
2538                 image->planes[plane].fast_clear_state_offset +
2539                 anv_fast_clear_state_entry_size(device) * level,
2540    };
2541 }
2542 
2543 static inline struct anv_address
anv_image_get_needs_resolve_addr(const struct anv_device * device,const struct anv_image * image,VkImageAspectFlagBits aspect,unsigned level)2544 anv_image_get_needs_resolve_addr(const struct anv_device *device,
2545                                  const struct anv_image *image,
2546                                  VkImageAspectFlagBits aspect,
2547                                  unsigned level)
2548 {
2549    struct anv_address addr =
2550       anv_image_get_clear_color_addr(device, image, aspect, level);
2551    addr.offset += device->isl_dev.ss.clear_value_size;
2552    return addr;
2553 }
2554 
2555 /* Returns true if a HiZ-enabled depth buffer can be sampled from. */
2556 static inline bool
anv_can_sample_with_hiz(const struct gen_device_info * const devinfo,const struct anv_image * image)2557 anv_can_sample_with_hiz(const struct gen_device_info * const devinfo,
2558                         const struct anv_image *image)
2559 {
2560    if (!(image->aspects & VK_IMAGE_ASPECT_DEPTH_BIT))
2561       return false;
2562 
2563    if (devinfo->gen < 8)
2564       return false;
2565 
2566    return image->samples == 1;
2567 }
2568 
2569 void
2570 anv_gen8_hiz_op_resolve(struct anv_cmd_buffer *cmd_buffer,
2571                         const struct anv_image *image,
2572                         enum blorp_hiz_op op);
2573 void
2574 anv_ccs_resolve(struct anv_cmd_buffer * const cmd_buffer,
2575                 const struct anv_image * const image,
2576                 VkImageAspectFlagBits aspect,
2577                 const uint8_t level,
2578                 const uint32_t start_layer, const uint32_t layer_count,
2579                 const enum blorp_fast_clear_op op);
2580 
2581 void
2582 anv_image_fast_clear(struct anv_cmd_buffer *cmd_buffer,
2583                      const struct anv_image *image,
2584                      VkImageAspectFlagBits aspect,
2585                      const uint32_t base_level, const uint32_t level_count,
2586                      const uint32_t base_layer, uint32_t layer_count);
2587 
2588 void
2589 anv_image_copy_to_shadow(struct anv_cmd_buffer *cmd_buffer,
2590                          const struct anv_image *image,
2591                          uint32_t base_level, uint32_t level_count,
2592                          uint32_t base_layer, uint32_t layer_count);
2593 
2594 enum isl_aux_usage
2595 anv_layout_to_aux_usage(const struct gen_device_info * const devinfo,
2596                         const struct anv_image *image,
2597                         const VkImageAspectFlagBits aspect,
2598                         const VkImageLayout layout);
2599 
2600 /* This is defined as a macro so that it works for both
2601  * VkImageSubresourceRange and VkImageSubresourceLayers
2602  */
2603 #define anv_get_layerCount(_image, _range) \
2604    ((_range)->layerCount == VK_REMAINING_ARRAY_LAYERS ? \
2605     (_image)->array_size - (_range)->baseArrayLayer : (_range)->layerCount)
2606 
2607 static inline uint32_t
anv_get_levelCount(const struct anv_image * image,const VkImageSubresourceRange * range)2608 anv_get_levelCount(const struct anv_image *image,
2609                    const VkImageSubresourceRange *range)
2610 {
2611    return range->levelCount == VK_REMAINING_MIP_LEVELS ?
2612           image->levels - range->baseMipLevel : range->levelCount;
2613 }
2614 
2615 static inline VkImageAspectFlags
anv_image_expand_aspects(const struct anv_image * image,VkImageAspectFlags aspects)2616 anv_image_expand_aspects(const struct anv_image *image,
2617                          VkImageAspectFlags aspects)
2618 {
2619    /* If the underlying image has color plane aspects and
2620     * VK_IMAGE_ASPECT_COLOR_BIT has been requested, then return the aspects of
2621     * the underlying image. */
2622    if ((image->aspects & VK_IMAGE_ASPECT_PLANES_BITS_ANV) != 0 &&
2623        aspects == VK_IMAGE_ASPECT_COLOR_BIT)
2624       return image->aspects;
2625 
2626    return aspects;
2627 }
2628 
2629 static inline bool
anv_image_aspects_compatible(VkImageAspectFlags aspects1,VkImageAspectFlags aspects2)2630 anv_image_aspects_compatible(VkImageAspectFlags aspects1,
2631                              VkImageAspectFlags aspects2)
2632 {
2633    if (aspects1 == aspects2)
2634       return true;
2635 
2636    /* Only 1 color aspects are compatibles. */
2637    if ((aspects1 & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV) != 0 &&
2638        (aspects2 & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV) != 0 &&
2639        _mesa_bitcount(aspects1) == _mesa_bitcount(aspects2))
2640       return true;
2641 
2642    return false;
2643 }
2644 
2645 struct anv_image_view {
2646    const struct anv_image *image; /**< VkImageViewCreateInfo::image */
2647 
2648    VkImageAspectFlags aspect_mask;
2649    VkFormat vk_format;
2650    VkExtent3D extent; /**< Extent of VkImageViewCreateInfo::baseMipLevel. */
2651 
2652    unsigned n_planes;
2653    struct {
2654       uint32_t image_plane;
2655 
2656       struct isl_view isl;
2657 
2658       /**
2659        * RENDER_SURFACE_STATE when using image as a sampler surface with an
2660        * image layout of SHADER_READ_ONLY_OPTIMAL or
2661        * DEPTH_STENCIL_READ_ONLY_OPTIMAL.
2662        */
2663       struct anv_surface_state optimal_sampler_surface_state;
2664 
2665       /**
2666        * RENDER_SURFACE_STATE when using image as a sampler surface with an
2667        * image layout of GENERAL.
2668        */
2669       struct anv_surface_state general_sampler_surface_state;
2670 
2671       /**
2672        * RENDER_SURFACE_STATE when using image as a storage image. Separate
2673        * states for write-only and readable, using the real format for
2674        * write-only and the lowered format for readable.
2675        */
2676       struct anv_surface_state storage_surface_state;
2677       struct anv_surface_state writeonly_storage_surface_state;
2678 
2679       struct brw_image_param storage_image_param;
2680    } planes[3];
2681 };
2682 
2683 enum anv_image_view_state_flags {
2684    ANV_IMAGE_VIEW_STATE_STORAGE_WRITE_ONLY   = (1 << 0),
2685    ANV_IMAGE_VIEW_STATE_TEXTURE_OPTIMAL      = (1 << 1),
2686 };
2687 
2688 void anv_image_fill_surface_state(struct anv_device *device,
2689                                   const struct anv_image *image,
2690                                   VkImageAspectFlagBits aspect,
2691                                   const struct isl_view *view,
2692                                   isl_surf_usage_flags_t view_usage,
2693                                   enum isl_aux_usage aux_usage,
2694                                   const union isl_color_value *clear_color,
2695                                   enum anv_image_view_state_flags flags,
2696                                   struct anv_surface_state *state_inout,
2697                                   struct brw_image_param *image_param_out);
2698 
2699 struct anv_image_create_info {
2700    const VkImageCreateInfo *vk_info;
2701 
2702    /** An opt-in bitmask which filters an ISL-mapping of the Vulkan tiling. */
2703    isl_tiling_flags_t isl_tiling_flags;
2704 
2705    /** These flags will be added to any derived from VkImageCreateInfo. */
2706    isl_surf_usage_flags_t isl_extra_usage_flags;
2707 
2708    uint32_t stride;
2709 };
2710 
2711 VkResult anv_image_create(VkDevice _device,
2712                           const struct anv_image_create_info *info,
2713                           const VkAllocationCallbacks* alloc,
2714                           VkImage *pImage);
2715 
2716 #ifdef ANDROID
2717 VkResult anv_image_from_gralloc(VkDevice device_h,
2718                                 const VkImageCreateInfo *base_info,
2719                                 const VkNativeBufferANDROID *gralloc_info,
2720                                 const VkAllocationCallbacks *alloc,
2721                                 VkImage *pImage);
2722 #endif
2723 
2724 const struct anv_surface *
2725 anv_image_get_surface_for_aspect_mask(const struct anv_image *image,
2726                                       VkImageAspectFlags aspect_mask);
2727 
2728 enum isl_format
2729 anv_isl_format_for_descriptor_type(VkDescriptorType type);
2730 
2731 static inline struct VkExtent3D
anv_sanitize_image_extent(const VkImageType imageType,const struct VkExtent3D imageExtent)2732 anv_sanitize_image_extent(const VkImageType imageType,
2733                           const struct VkExtent3D imageExtent)
2734 {
2735    switch (imageType) {
2736    case VK_IMAGE_TYPE_1D:
2737       return (VkExtent3D) { imageExtent.width, 1, 1 };
2738    case VK_IMAGE_TYPE_2D:
2739       return (VkExtent3D) { imageExtent.width, imageExtent.height, 1 };
2740    case VK_IMAGE_TYPE_3D:
2741       return imageExtent;
2742    default:
2743       unreachable("invalid image type");
2744    }
2745 }
2746 
2747 static inline struct VkOffset3D
anv_sanitize_image_offset(const VkImageType imageType,const struct VkOffset3D imageOffset)2748 anv_sanitize_image_offset(const VkImageType imageType,
2749                           const struct VkOffset3D imageOffset)
2750 {
2751    switch (imageType) {
2752    case VK_IMAGE_TYPE_1D:
2753       return (VkOffset3D) { imageOffset.x, 0, 0 };
2754    case VK_IMAGE_TYPE_2D:
2755       return (VkOffset3D) { imageOffset.x, imageOffset.y, 0 };
2756    case VK_IMAGE_TYPE_3D:
2757       return imageOffset;
2758    default:
2759       unreachable("invalid image type");
2760    }
2761 }
2762 
2763 
2764 void anv_fill_buffer_surface_state(struct anv_device *device,
2765                                    struct anv_state state,
2766                                    enum isl_format format,
2767                                    uint32_t offset, uint32_t range,
2768                                    uint32_t stride);
2769 
2770 
2771 struct anv_ycbcr_conversion {
2772    const struct anv_format *        format;
2773    VkSamplerYcbcrModelConversionKHR ycbcr_model;
2774    VkSamplerYcbcrRangeKHR           ycbcr_range;
2775    VkComponentSwizzle               mapping[4];
2776    VkChromaLocationKHR              chroma_offsets[2];
2777    VkFilter                         chroma_filter;
2778    bool                             chroma_reconstruction;
2779 };
2780 
2781 struct anv_sampler {
2782    uint32_t                     state[3][4];
2783    uint32_t                     n_planes;
2784    struct anv_ycbcr_conversion *conversion;
2785 };
2786 
2787 struct anv_framebuffer {
2788    uint32_t                                     width;
2789    uint32_t                                     height;
2790    uint32_t                                     layers;
2791 
2792    uint32_t                                     attachment_count;
2793    struct anv_image_view *                      attachments[0];
2794 };
2795 
2796 struct anv_subpass {
2797    uint32_t                                     attachment_count;
2798 
2799    /**
2800     * A pointer to all attachment references used in this subpass.
2801     * Only valid if ::attachment_count > 0.
2802     */
2803    VkAttachmentReference *                      attachments;
2804    uint32_t                                     input_count;
2805    VkAttachmentReference *                      input_attachments;
2806    uint32_t                                     color_count;
2807    VkAttachmentReference *                      color_attachments;
2808    VkAttachmentReference *                      resolve_attachments;
2809 
2810    VkAttachmentReference                        depth_stencil_attachment;
2811 
2812    uint32_t                                     view_mask;
2813 
2814    /** Subpass has a depth/stencil self-dependency */
2815    bool                                         has_ds_self_dep;
2816 
2817    /** Subpass has at least one resolve attachment */
2818    bool                                         has_resolve;
2819 };
2820 
2821 static inline unsigned
anv_subpass_view_count(const struct anv_subpass * subpass)2822 anv_subpass_view_count(const struct anv_subpass *subpass)
2823 {
2824    return MAX2(1, _mesa_bitcount(subpass->view_mask));
2825 }
2826 
2827 struct anv_render_pass_attachment {
2828    /* TODO: Consider using VkAttachmentDescription instead of storing each of
2829     * its members individually.
2830     */
2831    VkFormat                                     format;
2832    uint32_t                                     samples;
2833    VkImageUsageFlags                            usage;
2834    VkAttachmentLoadOp                           load_op;
2835    VkAttachmentStoreOp                          store_op;
2836    VkAttachmentLoadOp                           stencil_load_op;
2837    VkImageLayout                                initial_layout;
2838    VkImageLayout                                final_layout;
2839    VkImageLayout                                first_subpass_layout;
2840 
2841    /* The subpass id in which the attachment will be used last. */
2842    uint32_t                                     last_subpass_idx;
2843 };
2844 
2845 struct anv_render_pass {
2846    uint32_t                                     attachment_count;
2847    uint32_t                                     subpass_count;
2848    /* An array of subpass_count+1 flushes, one per subpass boundary */
2849    enum anv_pipe_bits *                         subpass_flushes;
2850    struct anv_render_pass_attachment *          attachments;
2851    struct anv_subpass                           subpasses[0];
2852 };
2853 
2854 #define ANV_PIPELINE_STATISTICS_MASK 0x000007ff
2855 
2856 struct anv_query_pool {
2857    VkQueryType                                  type;
2858    VkQueryPipelineStatisticFlags                pipeline_statistics;
2859    /** Stride between slots, in bytes */
2860    uint32_t                                     stride;
2861    /** Number of slots in this query pool */
2862    uint32_t                                     slots;
2863    struct anv_bo                                bo;
2864 };
2865 
2866 int anv_get_entrypoint_index(const char *name);
2867 
2868 bool
2869 anv_entrypoint_is_enabled(int index, uint32_t core_version,
2870                           const struct anv_instance_extension_table *instance,
2871                           const struct anv_device_extension_table *device);
2872 
2873 void *anv_lookup_entrypoint(const struct gen_device_info *devinfo,
2874                             const char *name);
2875 
2876 void anv_dump_image_to_ppm(struct anv_device *device,
2877                            struct anv_image *image, unsigned miplevel,
2878                            unsigned array_layer, VkImageAspectFlagBits aspect,
2879                            const char *filename);
2880 
2881 enum anv_dump_action {
2882    ANV_DUMP_FRAMEBUFFERS_BIT = 0x1,
2883 };
2884 
2885 void anv_dump_start(struct anv_device *device, enum anv_dump_action actions);
2886 void anv_dump_finish(void);
2887 
2888 void anv_dump_add_framebuffer(struct anv_cmd_buffer *cmd_buffer,
2889                               struct anv_framebuffer *fb);
2890 
2891 static inline uint32_t
anv_get_subpass_id(const struct anv_cmd_state * const cmd_state)2892 anv_get_subpass_id(const struct anv_cmd_state * const cmd_state)
2893 {
2894    /* This function must be called from within a subpass. */
2895    assert(cmd_state->pass && cmd_state->subpass);
2896 
2897    const uint32_t subpass_id = cmd_state->subpass - cmd_state->pass->subpasses;
2898 
2899    /* The id of this subpass shouldn't exceed the number of subpasses in this
2900     * render pass minus 1.
2901     */
2902    assert(subpass_id < cmd_state->pass->subpass_count);
2903    return subpass_id;
2904 }
2905 
2906 #define ANV_DEFINE_HANDLE_CASTS(__anv_type, __VkType)                      \
2907                                                                            \
2908    static inline struct __anv_type *                                       \
2909    __anv_type ## _from_handle(__VkType _handle)                            \
2910    {                                                                       \
2911       return (struct __anv_type *) _handle;                                \
2912    }                                                                       \
2913                                                                            \
2914    static inline __VkType                                                  \
2915    __anv_type ## _to_handle(struct __anv_type *_obj)                       \
2916    {                                                                       \
2917       return (__VkType) _obj;                                              \
2918    }
2919 
2920 #define ANV_DEFINE_NONDISP_HANDLE_CASTS(__anv_type, __VkType)              \
2921                                                                            \
2922    static inline struct __anv_type *                                       \
2923    __anv_type ## _from_handle(__VkType _handle)                            \
2924    {                                                                       \
2925       return (struct __anv_type *)(uintptr_t) _handle;                     \
2926    }                                                                       \
2927                                                                            \
2928    static inline __VkType                                                  \
2929    __anv_type ## _to_handle(struct __anv_type *_obj)                       \
2930    {                                                                       \
2931       return (__VkType)(uintptr_t) _obj;                                   \
2932    }
2933 
2934 #define ANV_FROM_HANDLE(__anv_type, __name, __handle) \
2935    struct __anv_type *__name = __anv_type ## _from_handle(__handle)
2936 
2937 ANV_DEFINE_HANDLE_CASTS(anv_cmd_buffer, VkCommandBuffer)
2938 ANV_DEFINE_HANDLE_CASTS(anv_device, VkDevice)
2939 ANV_DEFINE_HANDLE_CASTS(anv_instance, VkInstance)
2940 ANV_DEFINE_HANDLE_CASTS(anv_physical_device, VkPhysicalDevice)
2941 ANV_DEFINE_HANDLE_CASTS(anv_queue, VkQueue)
2942 
2943 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_cmd_pool, VkCommandPool)
2944 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_buffer, VkBuffer)
2945 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_buffer_view, VkBufferView)
2946 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_pool, VkDescriptorPool)
2947 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_set, VkDescriptorSet)
2948 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_set_layout, VkDescriptorSetLayout)
2949 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_update_template, VkDescriptorUpdateTemplateKHR)
2950 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_device_memory, VkDeviceMemory)
2951 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_fence, VkFence)
2952 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_event, VkEvent)
2953 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_framebuffer, VkFramebuffer)
2954 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_image, VkImage)
2955 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_image_view, VkImageView);
2956 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_pipeline_cache, VkPipelineCache)
2957 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_pipeline, VkPipeline)
2958 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_pipeline_layout, VkPipelineLayout)
2959 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_query_pool, VkQueryPool)
2960 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_render_pass, VkRenderPass)
2961 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_sampler, VkSampler)
2962 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_semaphore, VkSemaphore)
2963 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_shader_module, VkShaderModule)
2964 ANV_DEFINE_NONDISP_HANDLE_CASTS(vk_debug_report_callback, VkDebugReportCallbackEXT)
2965 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_ycbcr_conversion, VkSamplerYcbcrConversionKHR)
2966 
2967 /* Gen-specific function declarations */
2968 #ifdef genX
2969 #  include "anv_genX.h"
2970 #else
2971 #  define genX(x) gen7_##x
2972 #  include "anv_genX.h"
2973 #  undef genX
2974 #  define genX(x) gen75_##x
2975 #  include "anv_genX.h"
2976 #  undef genX
2977 #  define genX(x) gen8_##x
2978 #  include "anv_genX.h"
2979 #  undef genX
2980 #  define genX(x) gen9_##x
2981 #  include "anv_genX.h"
2982 #  undef genX
2983 #  define genX(x) gen10_##x
2984 #  include "anv_genX.h"
2985 #  undef genX
2986 #endif
2987 
2988 #endif /* ANV_PRIVATE_H */
2989