• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2017 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 #ifndef VK_UTIL_H
24 #define VK_UTIL_H
25 
26 #include "util/bitscan.h"
27 #include "util/macros.h"
28 #include "compiler/shader_enums.h"
29 #include <stdlib.h>
30 #include <string.h>
31 
32 #ifdef __cplusplus
33 extern "C" {
34 #endif
35 
36 /* common inlines and macros for vulkan drivers */
37 
38 #include <vulkan/vulkan.h>
39 
40 struct vk_pnext_iterator {
41    VkBaseOutStructure *pos;
42 #ifndef NDEBUG
43    VkBaseOutStructure *half_pos;
44    unsigned idx;
45 #endif
46    bool done;
47 };
48 
49 static inline struct vk_pnext_iterator
vk_pnext_iterator_init(void * start)50 vk_pnext_iterator_init(void *start)
51 {
52    struct vk_pnext_iterator iter;
53 
54    iter.pos = (VkBaseOutStructure *)start;
55 #ifndef NDEBUG
56    iter.half_pos = (VkBaseOutStructure *)start;
57    iter.idx = 0;
58 #endif
59    iter.done = false;
60 
61    return iter;
62 }
63 
64 static inline struct vk_pnext_iterator
vk_pnext_iterator_init_const(const void * start)65 vk_pnext_iterator_init_const(const void *start)
66 {
67    return vk_pnext_iterator_init((void *)start);
68 }
69 
70 static inline VkBaseOutStructure *
vk_pnext_iterator_next(struct vk_pnext_iterator * iter)71 vk_pnext_iterator_next(struct vk_pnext_iterator *iter)
72 {
73    iter->pos = iter->pos->pNext;
74 
75 #ifndef NDEBUG
76    if (iter->idx++ & 1) {
77       /** This the "tortoise and the hare" algorithm.  We increment
78        * chaser->pNext every other time *iter gets incremented.  Because *iter
79        * is incrementing twice as fast as chaser->pNext, the distance between
80        * them in the list increases by one for each time we get here.  If we
81        * have a loop, eventually, both iterators will be inside the loop and
82        * this distance will be an integer multiple of the loop length, at
83        * which point the two pointers will be equal.
84        */
85       iter->half_pos = iter->half_pos->pNext;
86       if (iter->half_pos == iter->pos)
87          assert(!"Vulkan input pNext chain has a loop!");
88    }
89 #endif
90 
91    return iter->pos;
92 }
93 
94 /* Because the outer loop only executes once, independently of what happens in
95  * the inner loop, breaks and continues should work exactly the same as if
96  * there were only one for loop.
97  */
98 #define vk_foreach_struct(__e, __start) \
99    for (struct vk_pnext_iterator __iter = vk_pnext_iterator_init(__start); \
100         !__iter.done; __iter.done = true) \
101       for (VkBaseOutStructure *__e = __iter.pos; \
102            __e; __e = vk_pnext_iterator_next(&__iter))
103 
104 #define vk_foreach_struct_const(__e, __start) \
105    for (struct vk_pnext_iterator __iter = \
106             vk_pnext_iterator_init_const(__start); \
107         !__iter.done; __iter.done = true) \
108       for (const VkBaseInStructure *__e = (VkBaseInStructure *)__iter.pos; \
109            __e; __e = (VkBaseInStructure *)vk_pnext_iterator_next(&__iter))
110 
111 
112 /**
113  * A wrapper for a Vulkan output array. A Vulkan output array is one that
114  * follows the convention of the parameters to
115  * vkGetPhysicalDeviceQueueFamilyProperties().
116  *
117  * Example Usage:
118  *
119  *    VkResult
120  *    vkGetPhysicalDeviceQueueFamilyProperties(
121  *       VkPhysicalDevice           physicalDevice,
122  *       uint32_t*                  pQueueFamilyPropertyCount,
123  *       VkQueueFamilyProperties*   pQueueFamilyProperties)
124  *    {
125  *       VK_OUTARRAY_MAKE_TYPED(VkQueueFamilyProperties, props,
126  *                              pQueueFamilyProperties,
127  *                              pQueueFamilyPropertyCount);
128  *
129  *       vk_outarray_append_typed(VkQueueFamilyProperties, &props, p) {
130  *          p->queueFlags = ...;
131  *          p->queueCount = ...;
132  *       }
133  *
134  *       vk_outarray_append_typed(VkQueueFamilyProperties, &props, p) {
135  *          p->queueFlags = ...;
136  *          p->queueCount = ...;
137  *       }
138  *
139  *       return vk_outarray_status(&props);
140  *    }
141  */
142 struct __vk_outarray {
143    /** May be null. */
144    void *data;
145 
146    /**
147     * Capacity, in number of elements. Capacity is unlimited (UINT32_MAX) if
148     * data is null.
149     */
150    uint32_t cap;
151 
152    /**
153     * Count of elements successfully written to the array. Every write is
154     * considered successful if data is null.
155     */
156    uint32_t *filled_len;
157 
158    /**
159     * Count of elements that would have been written to the array if its
160     * capacity were sufficient. Vulkan functions often return VK_INCOMPLETE
161     * when `*filled_len < wanted_len`.
162     */
163    uint32_t wanted_len;
164 };
165 
166 static inline void
__vk_outarray_init(struct __vk_outarray * a,void * data,uint32_t * restrict len)167 __vk_outarray_init(struct __vk_outarray *a,
168                    void *data, uint32_t *restrict len)
169 {
170    a->data = data;
171    a->cap = *len;
172    a->filled_len = len;
173    *a->filled_len = 0;
174    a->wanted_len = 0;
175 
176    if (a->data == NULL)
177       a->cap = UINT32_MAX;
178 }
179 
180 static inline VkResult
__vk_outarray_status(const struct __vk_outarray * a)181 __vk_outarray_status(const struct __vk_outarray *a)
182 {
183    if (*a->filled_len < a->wanted_len)
184       return VK_INCOMPLETE;
185    else
186       return VK_SUCCESS;
187 }
188 
189 static inline void *
__vk_outarray_next(struct __vk_outarray * a,size_t elem_size)190 __vk_outarray_next(struct __vk_outarray *a, size_t elem_size)
191 {
192    void *p = NULL;
193 
194    a->wanted_len += 1;
195 
196    if (*a->filled_len >= a->cap)
197       return NULL;
198 
199    if (a->data != NULL)
200       p = (uint8_t *)a->data + (*a->filled_len) * elem_size;
201 
202    *a->filled_len += 1;
203 
204    return p;
205 }
206 
207 #define vk_outarray(elem_t) \
208    struct { \
209       struct __vk_outarray base; \
210       elem_t meta[]; \
211    }
212 
213 #define vk_outarray_typeof_elem(a) __typeof__((a)->meta[0])
214 #define vk_outarray_sizeof_elem(a) sizeof((a)->meta[0])
215 
216 #define vk_outarray_init(a, data, len) \
217    __vk_outarray_init(&(a)->base, (data), (len))
218 
219 #define VK_OUTARRAY_MAKE_TYPED(type, name, data, len) \
220    vk_outarray(type) name; \
221    vk_outarray_init(&name, (data), (len))
222 
223 #define vk_outarray_status(a) \
224    __vk_outarray_status(&(a)->base)
225 
226 #define vk_outarray_next(a) \
227    vk_outarray_next_typed(vk_outarray_typeof_elem(a), a)
228 #define vk_outarray_next_typed(type, a) \
229    ((type *) \
230       __vk_outarray_next(&(a)->base, vk_outarray_sizeof_elem(a)))
231 
232 /**
233  * Append to a Vulkan output array.
234  *
235  * This is a block-based macro. For example:
236  *
237  *    vk_outarray_append_typed(T, &a, elem) {
238  *       elem->foo = ...;
239  *       elem->bar = ...;
240  *    }
241  *
242  * The array `a` has type `vk_outarray(elem_t) *`. It is usually declared with
243  * VK_OUTARRAY_MAKE_TYPED(). The variable `elem` is block-scoped and has type
244  * `elem_t *`.
245  *
246  * The macro unconditionally increments the array's `wanted_len`. If the array
247  * is not full, then the macro also increment its `filled_len` and then
248  * executes the block. When the block is executed, `elem` is non-null and
249  * points to the newly appended element.
250  */
251 #define vk_outarray_append_typed(type, a, elem) \
252    for (type *elem = vk_outarray_next_typed(type, a); \
253         elem != NULL; elem = NULL)
254 
255 static inline void *
__vk_find_struct(void * start,VkStructureType sType)256 __vk_find_struct(void *start, VkStructureType sType)
257 {
258    vk_foreach_struct(s, start) {
259       if (s->sType == sType)
260          return s;
261    }
262 
263    return NULL;
264 }
265 
266 #define vk_find_struct(__start, __sType) \
267    __vk_find_struct((__start), VK_STRUCTURE_TYPE_##__sType)
268 
269 #define vk_find_struct_const(__start, __sType) \
270    (const void *)__vk_find_struct((void *)(__start), VK_STRUCTURE_TYPE_##__sType)
271 
272 static inline void
__vk_append_struct(void * start,void * element)273 __vk_append_struct(void *start, void *element)
274 {
275    vk_foreach_struct(s, start) {
276       if (s->pNext)
277          continue;
278 
279       s->pNext = (struct VkBaseOutStructure *) element;
280       break;
281    }
282 }
283 
284 uint32_t vk_get_driver_version(void);
285 
286 uint32_t vk_get_version_override(void);
287 
288 void vk_warn_non_conformant_implementation(const char *driver_name);
289 
290 struct vk_pipeline_cache_header {
291    uint32_t header_size;
292    uint32_t header_version;
293    uint32_t vendor_id;
294    uint32_t device_id;
295    uint8_t  uuid[VK_UUID_SIZE];
296 };
297 
298 #define VK_EXT_OFFSET (1000000000UL)
299 #define VK_ENUM_EXTENSION(__enum) \
300    ((__enum) >= VK_EXT_OFFSET ? ((((__enum) - VK_EXT_OFFSET) / 1000UL) + 1) : 0)
301 #define VK_ENUM_OFFSET(__enum) \
302    ((__enum) >= VK_EXT_OFFSET ? ((__enum) % 1000) : (__enum))
303 
304 #define typed_memcpy(dest, src, count) do { \
305    STATIC_ASSERT(sizeof(*(src)) == sizeof(*(dest))); \
306    memcpy((dest), (src), (count) * sizeof(*(src))); \
307 } while (0)
308 
309 static inline gl_shader_stage
vk_to_mesa_shader_stage(VkShaderStageFlagBits vk_stage)310 vk_to_mesa_shader_stage(VkShaderStageFlagBits vk_stage)
311 {
312    assert(util_bitcount((uint32_t) vk_stage) == 1);
313    return (gl_shader_stage) (ffs((uint32_t) vk_stage) - 1);
314 }
315 
316 static inline VkShaderStageFlagBits
mesa_to_vk_shader_stage(gl_shader_stage mesa_stage)317 mesa_to_vk_shader_stage(gl_shader_stage mesa_stage)
318 {
319    return (VkShaderStageFlagBits) (1 << ((uint32_t) mesa_stage));
320 }
321 
322 /* iterate over a sequence of indexed multidraws for VK_EXT_multi_draw extension */
323 /* 'i' must be explicitly declared */
324 #define vk_foreach_multi_draw_indexed(_draw, _i, _pDrawInfo, _num_draws, _stride) \
325    for (const VkMultiDrawIndexedInfoEXT *_draw = (const void*)(_pDrawInfo); \
326         (_i) < (_num_draws); \
327         (_i)++, (_draw) = (const VkMultiDrawIndexedInfoEXT*)((const uint8_t*)(_draw) + (_stride)))
328 
329 /* iterate over a sequence of multidraws for VK_EXT_multi_draw extension */
330 /* 'i' must be explicitly declared */
331 #define vk_foreach_multi_draw(_draw, _i, _pDrawInfo, _num_draws, _stride) \
332    for (const VkMultiDrawInfoEXT *_draw = (const void*)(_pDrawInfo); \
333         (_i) < (_num_draws); \
334         (_i)++, (_draw) = (const VkMultiDrawInfoEXT*)((const uint8_t*)(_draw) + (_stride)))
335 
336 
337 struct nir_spirv_specialization;
338 
339 struct nir_spirv_specialization*
340 vk_spec_info_to_nir_spirv(const VkSpecializationInfo *spec_info,
341                           uint32_t *out_num_spec_entries);
342 
343 #define STACK_ARRAY_SIZE 8
344 
345 #ifdef __cplusplus
346 #define STACK_ARRAY_ZERO_INIT {}
347 #else
348 #define STACK_ARRAY_ZERO_INIT {0}
349 #endif
350 
351 #define STACK_ARRAY(type, name, size) \
352    type _stack_##name[STACK_ARRAY_SIZE] = STACK_ARRAY_ZERO_INIT; \
353    type *const name = \
354      ((size) <= STACK_ARRAY_SIZE ? _stack_##name : (type *)malloc((size) * sizeof(type)))
355 
356 #define STACK_ARRAY_FINISH(name) \
357    if (name != _stack_##name) free(name)
358 
359 #ifdef __cplusplus
360 }
361 #endif
362 
363 #endif /* VK_UTIL_H */
364