• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2017 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 #ifndef VK_UTIL_H
24 #define VK_UTIL_H
25 
26 #include "util/macros.h"
27 
28 #include <stdlib.h>
29 #include <string.h>
30 
31 #if USE_VK_COMPILER
32 #include "vk_util_compiler.h"
33 #else
34 #include <stdbool.h>
35 #endif
36 
37 #include "vk_struct_type_cast.h"
38 
39 #ifdef __cplusplus
40 extern "C" {
41 #endif
42 
43 /* common inlines and macros for vulkan drivers */
44 
45 #include <vulkan/vulkan_core.h>
46 
47 struct vk_pnext_iterator {
48    VkBaseOutStructure *pos;
49 #ifndef NDEBUG
50    VkBaseOutStructure *half_pos;
51    unsigned idx;
52 #endif
53    bool done;
54 };
55 
56 static inline struct vk_pnext_iterator
vk_pnext_iterator_init(void * start)57 vk_pnext_iterator_init(void *start)
58 {
59    struct vk_pnext_iterator iter;
60 
61    iter.pos = (VkBaseOutStructure *)start;
62 #ifndef NDEBUG
63    iter.half_pos = (VkBaseOutStructure *)start;
64    iter.idx = 0;
65 #endif
66    iter.done = false;
67 
68    return iter;
69 }
70 
71 static inline struct vk_pnext_iterator
vk_pnext_iterator_init_const(const void * start)72 vk_pnext_iterator_init_const(const void *start)
73 {
74    return vk_pnext_iterator_init((void *)start);
75 }
76 
77 static inline VkBaseOutStructure *
vk_pnext_iterator_next(struct vk_pnext_iterator * iter)78 vk_pnext_iterator_next(struct vk_pnext_iterator *iter)
79 {
80    iter->pos = iter->pos->pNext;
81 
82 #ifndef NDEBUG
83    if (iter->idx++ & 1) {
84       /** This the "tortoise and the hare" algorithm.  We increment
85        * chaser->pNext every other time *iter gets incremented.  Because *iter
86        * is incrementing twice as fast as chaser->pNext, the distance between
87        * them in the list increases by one for each time we get here.  If we
88        * have a loop, eventually, both iterators will be inside the loop and
89        * this distance will be an integer multiple of the loop length, at
90        * which point the two pointers will be equal.
91        */
92       iter->half_pos = iter->half_pos->pNext;
93       if (iter->half_pos == iter->pos)
94          assert(!"Vulkan input pNext chain has a loop!");
95    }
96 #endif
97 
98    return iter->pos;
99 }
100 
101 /* Because the outer loop only executes once, independently of what happens in
102  * the inner loop, breaks and continues should work exactly the same as if
103  * there were only one for loop.
104  */
105 #define vk_foreach_struct(__e, __start) \
106    for (struct vk_pnext_iterator __iter = vk_pnext_iterator_init(__start); \
107         !__iter.done; __iter.done = true) \
108       for (VkBaseOutStructure *__e = __iter.pos; \
109            __e; __e = vk_pnext_iterator_next(&__iter))
110 
111 #define vk_foreach_struct_const(__e, __start) \
112    for (struct vk_pnext_iterator __iter = \
113             vk_pnext_iterator_init_const(__start); \
114         !__iter.done; __iter.done = true) \
115       for (const VkBaseInStructure *__e = (VkBaseInStructure *)__iter.pos; \
116            __e; __e = (VkBaseInStructure *)vk_pnext_iterator_next(&__iter))
117 
118 static inline void
vk_copy_struct_guts(VkBaseOutStructure * dst,VkBaseInStructure * src,size_t struct_size)119 vk_copy_struct_guts(VkBaseOutStructure *dst, VkBaseInStructure *src, size_t struct_size)
120 {
121    STATIC_ASSERT(sizeof(*dst) == sizeof(*src));
122    memcpy(dst + 1, src + 1, struct_size - sizeof(VkBaseOutStructure));
123 }
124 
125 /**
126  * A wrapper for a Vulkan output array. A Vulkan output array is one that
127  * follows the convention of the parameters to
128  * vkGetPhysicalDeviceQueueFamilyProperties().
129  *
130  * Example Usage:
131  *
132  *    VkResult
133  *    vkGetPhysicalDeviceQueueFamilyProperties(
134  *       VkPhysicalDevice           physicalDevice,
135  *       uint32_t*                  pQueueFamilyPropertyCount,
136  *       VkQueueFamilyProperties*   pQueueFamilyProperties)
137  *    {
138  *       VK_OUTARRAY_MAKE_TYPED(VkQueueFamilyProperties, props,
139  *                              pQueueFamilyProperties,
140  *                              pQueueFamilyPropertyCount);
141  *
142  *       vk_outarray_append_typed(VkQueueFamilyProperties, &props, p) {
143  *          p->queueFlags = ...;
144  *          p->queueCount = ...;
145  *       }
146  *
147  *       vk_outarray_append_typed(VkQueueFamilyProperties, &props, p) {
148  *          p->queueFlags = ...;
149  *          p->queueCount = ...;
150  *       }
151  *
152  *       return vk_outarray_status(&props);
153  *    }
154  */
155 struct __vk_outarray {
156    /** May be null. */
157    void *data;
158 
159    /**
160     * Capacity, in number of elements. Capacity is unlimited (UINT32_MAX) if
161     * data is null.
162     */
163    uint32_t cap;
164 
165    /**
166     * Count of elements successfully written to the array. Every write is
167     * considered successful if data is null.
168     */
169    uint32_t *filled_len;
170 
171    /**
172     * Count of elements that would have been written to the array if its
173     * capacity were sufficient. Vulkan functions often return VK_INCOMPLETE
174     * when `*filled_len < wanted_len`.
175     */
176    uint32_t wanted_len;
177 };
178 
179 static inline void
__vk_outarray_init(struct __vk_outarray * a,void * data,uint32_t * restrict len)180 __vk_outarray_init(struct __vk_outarray *a,
181                    void *data, uint32_t *restrict len)
182 {
183    a->data = data;
184    a->cap = *len;
185    a->filled_len = len;
186    *a->filled_len = 0;
187    a->wanted_len = 0;
188 
189    if (a->data == NULL)
190       a->cap = UINT32_MAX;
191 }
192 
193 static inline VkResult
__vk_outarray_status(const struct __vk_outarray * a)194 __vk_outarray_status(const struct __vk_outarray *a)
195 {
196    if (*a->filled_len < a->wanted_len)
197       return VK_INCOMPLETE;
198    else
199       return VK_SUCCESS;
200 }
201 
202 static inline void *
__vk_outarray_next(struct __vk_outarray * a,size_t elem_size)203 __vk_outarray_next(struct __vk_outarray *a, size_t elem_size)
204 {
205    void *p = NULL;
206 
207    a->wanted_len += 1;
208 
209    if (*a->filled_len >= a->cap)
210       return NULL;
211 
212    if (a->data != NULL)
213       p = (uint8_t *)a->data + (*a->filled_len) * elem_size;
214 
215    *a->filled_len += 1;
216 
217    return p;
218 }
219 
220 #define vk_outarray(elem_t) \
221    struct { \
222       struct __vk_outarray base; \
223       elem_t meta[]; \
224    }
225 
226 #define vk_outarray_typeof_elem(a) __typeof__((a)->meta[0])
227 #define vk_outarray_sizeof_elem(a) sizeof((a)->meta[0])
228 
229 #define vk_outarray_init(a, data, len) \
230    __vk_outarray_init(&(a)->base, (data), (len))
231 
232 #define VK_OUTARRAY_MAKE_TYPED(type, name, data, len) \
233    vk_outarray(type) name; \
234    vk_outarray_init(&name, (data), (len))
235 
236 #define vk_outarray_status(a) \
237    __vk_outarray_status(&(a)->base)
238 
239 #define vk_outarray_next(a) \
240    vk_outarray_next_typed(vk_outarray_typeof_elem(a), a)
241 #define vk_outarray_next_typed(type, a) \
242    ((type *) \
243       __vk_outarray_next(&(a)->base, vk_outarray_sizeof_elem(a)))
244 
245 /**
246  * Append to a Vulkan output array.
247  *
248  * This is a block-based macro. For example:
249  *
250  *    vk_outarray_append_typed(T, &a, elem) {
251  *       elem->foo = ...;
252  *       elem->bar = ...;
253  *    }
254  *
255  * The array `a` has type `vk_outarray(elem_t) *`. It is usually declared with
256  * VK_OUTARRAY_MAKE_TYPED(). The variable `elem` is block-scoped and has type
257  * `elem_t *`.
258  *
259  * The macro unconditionally increments the array's `wanted_len`. If the array
260  * is not full, then the macro also increment its `filled_len` and then
261  * executes the block. When the block is executed, `elem` is non-null and
262  * points to the newly appended element.
263  */
264 #define vk_outarray_append_typed(type, a, elem) \
265    for (type *elem = vk_outarray_next_typed(type, a); \
266         elem != NULL; elem = NULL)
267 
268 static inline void *
__vk_find_struct(void * start,VkStructureType sType)269 __vk_find_struct(void *start, VkStructureType sType)
270 {
271    vk_foreach_struct(s, start) {
272       if (s->sType == sType)
273          return s;
274    }
275 
276    return NULL;
277 }
278 
279 #define vk_find_struct(__start, __sType)                                       \
280   (VK_STRUCTURE_TYPE_##__sType##_cast *)__vk_find_struct(                      \
281       (__start), VK_STRUCTURE_TYPE_##__sType)
282 
283 #define vk_find_struct_const(__start, __sType)                                 \
284   (const VK_STRUCTURE_TYPE_##__sType##_cast *)__vk_find_struct(                \
285       (void *)(__start), VK_STRUCTURE_TYPE_##__sType)
286 
287 static inline void
__vk_append_struct(void * start,void * element)288 __vk_append_struct(void *start, void *element)
289 {
290    vk_foreach_struct(s, start) {
291       if (s->pNext)
292          continue;
293 
294       s->pNext = (struct VkBaseOutStructure *) element;
295       break;
296    }
297 }
298 
299 uint32_t vk_get_driver_version(void);
300 
301 uint32_t vk_get_version_override(void);
302 
303 void vk_warn_non_conformant_implementation(const char *driver_name);
304 
305 struct vk_pipeline_cache_header {
306    uint32_t header_size;
307    uint32_t header_version;
308    uint32_t vendor_id;
309    uint32_t device_id;
310    uint8_t  uuid[VK_UUID_SIZE];
311 };
312 
313 #define VK_EXT_OFFSET (1000000000UL)
314 #define VK_ENUM_EXTENSION(__enum) \
315    ((__enum) >= VK_EXT_OFFSET ? ((((__enum) - VK_EXT_OFFSET) / 1000UL) + 1) : 0)
316 #define VK_ENUM_OFFSET(__enum) \
317    ((__enum) >= VK_EXT_OFFSET ? ((__enum) % 1000) : (__enum))
318 
319 #define typed_memcpy(dest, src, count) do { \
320    STATIC_ASSERT(sizeof(*(src)) == sizeof(*(dest))); \
321    memcpy((dest), (src), (count) * sizeof(*(src))); \
322 } while (0)
323 
324 /* iterate over a sequence of indexed multidraws for VK_EXT_multi_draw extension */
325 /* 'i' must be explicitly declared */
326 #define vk_foreach_multi_draw_indexed(_draw, _i, _pDrawInfo, _num_draws, _stride) \
327    for (const VkMultiDrawIndexedInfoEXT *_draw = (const VkMultiDrawIndexedInfoEXT*)(_pDrawInfo); \
328         (_i) < (_num_draws); \
329         (_i)++, (_draw) = (const VkMultiDrawIndexedInfoEXT*)((const uint8_t*)(_draw) + (_stride)))
330 
331 /* iterate over a sequence of multidraws for VK_EXT_multi_draw extension */
332 /* 'i' must be explicitly declared */
333 #define vk_foreach_multi_draw(_draw, _i, _pDrawInfo, _num_draws, _stride) \
334    for (const VkMultiDrawInfoEXT *_draw = (const VkMultiDrawInfoEXT*)(_pDrawInfo); \
335         (_i) < (_num_draws); \
336         (_i)++, (_draw) = (const VkMultiDrawInfoEXT*)((const uint8_t*)(_draw) + (_stride)))
337 
338 #define STACK_ARRAY_SIZE 8
339 
340 #ifdef __cplusplus
341 #define STACK_ARRAY_ZERO_INIT {}
342 #else
343 #define STACK_ARRAY_ZERO_INIT {0}
344 #endif
345 
346 #define STACK_ARRAY(type, name, size) \
347    type _stack_##name[STACK_ARRAY_SIZE] = STACK_ARRAY_ZERO_INIT; \
348    type *const name = \
349      ((size) <= STACK_ARRAY_SIZE ? _stack_##name : (type *)malloc((size) * sizeof(type)))
350 
351 #define STACK_ARRAY_FINISH(name) \
352    if (name != _stack_##name) free(name)
353 
354 static inline uint8_t
vk_index_type_to_bytes(enum VkIndexType type)355 vk_index_type_to_bytes(enum VkIndexType type)
356 {
357    switch (type) {
358    case VK_INDEX_TYPE_NONE_KHR:  return 0;
359    case VK_INDEX_TYPE_UINT8_KHR: return 1;
360    case VK_INDEX_TYPE_UINT16:    return 2;
361    case VK_INDEX_TYPE_UINT32:    return 4;
362    default:                      unreachable("Invalid index type");
363    }
364 }
365 
366 static inline uint32_t
vk_index_to_restart(enum VkIndexType type)367 vk_index_to_restart(enum VkIndexType type)
368 {
369    switch (type) {
370    case VK_INDEX_TYPE_UINT8_KHR: return 0xff;
371    case VK_INDEX_TYPE_UINT16:    return 0xffff;
372    case VK_INDEX_TYPE_UINT32:    return 0xffffffff;
373    default:                      unreachable("unexpected index type");
374    }
375 }
376 
377 #ifdef __cplusplus
378 }
379 #endif
380 
381 #endif /* VK_UTIL_H */
382