1 /*
2 * Copyright © 2017 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23 #ifndef VK_UTIL_H
24 #define VK_UTIL_H
25
26 #include "util/bitscan.h"
27 #include "util/macros.h"
28 #include "compiler/shader_enums.h"
29 #include <stdlib.h>
30 #include <string.h>
31
32 #ifdef __cplusplus
33 extern "C" {
34 #endif
35
36 /* common inlines and macros for vulkan drivers */
37
38 #include <vulkan/vulkan.h>
39
40 #define vk_foreach_struct(__iter, __start) \
41 for (struct VkBaseOutStructure *__iter = (struct VkBaseOutStructure *)(__start); \
42 __iter; __iter = __iter->pNext)
43
44 #define vk_foreach_struct_const(__iter, __start) \
45 for (const struct VkBaseInStructure *__iter = (const struct VkBaseInStructure *)(__start); \
46 __iter; __iter = __iter->pNext)
47
48 /**
49 * A wrapper for a Vulkan output array. A Vulkan output array is one that
50 * follows the convention of the parameters to
51 * vkGetPhysicalDeviceQueueFamilyProperties().
52 *
53 * Example Usage:
54 *
55 * VkResult
56 * vkGetPhysicalDeviceQueueFamilyProperties(
57 * VkPhysicalDevice physicalDevice,
58 * uint32_t* pQueueFamilyPropertyCount,
59 * VkQueueFamilyProperties* pQueueFamilyProperties)
60 * {
61 * VK_OUTARRAY_MAKE(props, pQueueFamilyProperties,
62 * pQueueFamilyPropertyCount);
63 *
64 * vk_outarray_append(&props, p) {
65 * p->queueFlags = ...;
66 * p->queueCount = ...;
67 * }
68 *
69 * vk_outarray_append(&props, p) {
70 * p->queueFlags = ...;
71 * p->queueCount = ...;
72 * }
73 *
74 * return vk_outarray_status(&props);
75 * }
76 */
77 struct __vk_outarray {
78 /** May be null. */
79 void *data;
80
81 /**
82 * Capacity, in number of elements. Capacity is unlimited (UINT32_MAX) if
83 * data is null.
84 */
85 uint32_t cap;
86
87 /**
88 * Count of elements successfully written to the array. Every write is
89 * considered successful if data is null.
90 */
91 uint32_t *filled_len;
92
93 /**
94 * Count of elements that would have been written to the array if its
95 * capacity were sufficient. Vulkan functions often return VK_INCOMPLETE
96 * when `*filled_len < wanted_len`.
97 */
98 uint32_t wanted_len;
99 };
100
101 static inline void
__vk_outarray_init(struct __vk_outarray * a,void * data,uint32_t * restrict len)102 __vk_outarray_init(struct __vk_outarray *a,
103 void *data, uint32_t *restrict len)
104 {
105 a->data = data;
106 a->cap = *len;
107 a->filled_len = len;
108 *a->filled_len = 0;
109 a->wanted_len = 0;
110
111 if (a->data == NULL)
112 a->cap = UINT32_MAX;
113 }
114
115 static inline VkResult
__vk_outarray_status(const struct __vk_outarray * a)116 __vk_outarray_status(const struct __vk_outarray *a)
117 {
118 if (*a->filled_len < a->wanted_len)
119 return VK_INCOMPLETE;
120 else
121 return VK_SUCCESS;
122 }
123
124 static inline void *
__vk_outarray_next(struct __vk_outarray * a,size_t elem_size)125 __vk_outarray_next(struct __vk_outarray *a, size_t elem_size)
126 {
127 void *p = NULL;
128
129 a->wanted_len += 1;
130
131 if (*a->filled_len >= a->cap)
132 return NULL;
133
134 if (a->data != NULL)
135 p = (uint8_t *)a->data + (*a->filled_len) * elem_size;
136
137 *a->filled_len += 1;
138
139 return p;
140 }
141
142 #define vk_outarray(elem_t) \
143 struct { \
144 struct __vk_outarray base; \
145 elem_t meta[]; \
146 }
147
148 #define vk_outarray_typeof_elem(a) __typeof__((a)->meta[0])
149 #define vk_outarray_sizeof_elem(a) sizeof((a)->meta[0])
150
151 #define vk_outarray_init(a, data, len) \
152 __vk_outarray_init(&(a)->base, (data), (len))
153
154 #define VK_OUTARRAY_MAKE(name, data, len) \
155 VK_OUTARRAY_MAKE_TYPED(__typeof__((data)[0]), name, data, len)
156 #define VK_OUTARRAY_MAKE_TYPED(type, name, data, len) \
157 vk_outarray(type) name; \
158 vk_outarray_init(&name, (data), (len))
159
160 #define vk_outarray_status(a) \
161 __vk_outarray_status(&(a)->base)
162
163 #define vk_outarray_next(a) \
164 vk_outarray_next_typed(vk_outarray_typeof_elem(a), a)
165 #define vk_outarray_next_typed(type, a) \
166 ((type *) \
167 __vk_outarray_next(&(a)->base, vk_outarray_sizeof_elem(a)))
168
169 /**
170 * Append to a Vulkan output array.
171 *
172 * This is a block-based macro. For example:
173 *
174 * vk_outarray_append(&a, elem) {
175 * elem->foo = ...;
176 * elem->bar = ...;
177 * }
178 *
179 * The array `a` has type `vk_outarray(elem_t) *`. It is usually declared with
180 * VK_OUTARRAY_MAKE(). The variable `elem` is block-scoped and has type
181 * `elem_t *`.
182 *
183 * The macro unconditionally increments the array's `wanted_len`. If the array
184 * is not full, then the macro also increment its `filled_len` and then
185 * executes the block. When the block is executed, `elem` is non-null and
186 * points to the newly appended element.
187 */
188 #define vk_outarray_append(a, elem) \
189 vk_outarray_append_typed(vk_outarray_typeof_elem(a), a, elem)
190 #define vk_outarray_append_typed(type, a, elem) \
191 for (type *elem = vk_outarray_next_typed(type, a); \
192 elem != NULL; elem = NULL)
193
194 static inline void *
__vk_find_struct(void * start,VkStructureType sType)195 __vk_find_struct(void *start, VkStructureType sType)
196 {
197 vk_foreach_struct(s, start) {
198 if (s->sType == sType)
199 return s;
200 }
201
202 return NULL;
203 }
204
205 #define vk_find_struct(__start, __sType) \
206 __vk_find_struct((__start), VK_STRUCTURE_TYPE_##__sType)
207
208 #define vk_find_struct_const(__start, __sType) \
209 (const void *)__vk_find_struct((void *)(__start), VK_STRUCTURE_TYPE_##__sType)
210
211 static inline void
__vk_append_struct(void * start,void * element)212 __vk_append_struct(void *start, void *element)
213 {
214 vk_foreach_struct(s, start) {
215 if (s->pNext)
216 continue;
217
218 s->pNext = (struct VkBaseOutStructure *) element;
219 break;
220 }
221 }
222
223 uint32_t vk_get_driver_version(void);
224
225 uint32_t vk_get_version_override(void);
226
227 void vk_warn_non_conformant_implementation(const char *driver_name);
228
229 struct vk_pipeline_cache_header {
230 uint32_t header_size;
231 uint32_t header_version;
232 uint32_t vendor_id;
233 uint32_t device_id;
234 uint8_t uuid[VK_UUID_SIZE];
235 };
236
237 #define VK_EXT_OFFSET (1000000000UL)
238 #define VK_ENUM_EXTENSION(__enum) \
239 ((__enum) >= VK_EXT_OFFSET ? ((((__enum) - VK_EXT_OFFSET) / 1000UL) + 1) : 0)
240 #define VK_ENUM_OFFSET(__enum) \
241 ((__enum) >= VK_EXT_OFFSET ? ((__enum) % 1000) : (__enum))
242
243 #define typed_memcpy(dest, src, count) do { \
244 STATIC_ASSERT(sizeof(*(src)) == sizeof(*(dest))); \
245 memcpy((dest), (src), (count) * sizeof(*(src))); \
246 } while (0)
247
248 static inline gl_shader_stage
vk_to_mesa_shader_stage(VkShaderStageFlagBits vk_stage)249 vk_to_mesa_shader_stage(VkShaderStageFlagBits vk_stage)
250 {
251 assert(util_bitcount((uint32_t) vk_stage) == 1);
252 return (gl_shader_stage) (ffs((uint32_t) vk_stage) - 1);
253 }
254
255 static inline VkShaderStageFlagBits
mesa_to_vk_shader_stage(gl_shader_stage mesa_stage)256 mesa_to_vk_shader_stage(gl_shader_stage mesa_stage)
257 {
258 return (VkShaderStageFlagBits) (1 << ((uint32_t) mesa_stage));
259 }
260
261 /* iterate over a sequence of indexed multidraws for VK_EXT_multi_draw extension */
262 /* 'i' must be explicitly declared */
263 #define vk_foreach_multi_draw_indexed(_draw, _i, _pDrawInfo, _num_draws, _stride) \
264 for (const VkMultiDrawIndexedInfoEXT *_draw = (const void*)(_pDrawInfo); \
265 (_i) < (_num_draws); \
266 (_i)++, (_draw) = (const VkMultiDrawIndexedInfoEXT*)((const uint8_t*)(_draw) + (_stride)))
267
268 /* iterate over a sequence of multidraws for VK_EXT_multi_draw extension */
269 /* 'i' must be explicitly declared */
270 #define vk_foreach_multi_draw(_draw, _i, _pDrawInfo, _num_draws, _stride) \
271 for (const VkMultiDrawInfoEXT *_draw = (const void*)(_pDrawInfo); \
272 (_i) < (_num_draws); \
273 (_i)++, (_draw) = (const VkMultiDrawInfoEXT*)((const uint8_t*)(_draw) + (_stride)))
274
275
276 struct nir_spirv_specialization;
277
278 struct nir_spirv_specialization*
279 vk_spec_info_to_nir_spirv(const VkSpecializationInfo *spec_info,
280 uint32_t *out_num_spec_entries);
281
282 #define STACK_ARRAY_SIZE 8
283
284 #define STACK_ARRAY(type, name, size) \
285 type _stack_##name[STACK_ARRAY_SIZE], *const name = \
286 (size) <= STACK_ARRAY_SIZE ? _stack_##name : malloc((size) * sizeof(type))
287
288 #define STACK_ARRAY_FINISH(name) \
289 if (name != _stack_##name) free(name)
290
291 #ifdef __cplusplus
292 }
293 #endif
294
295 #endif /* VK_UTIL_H */
296