1 /*
2 * Copyright © 2019 Red Hat.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "lvp_private.h"
25
26 #include "pipe-loader/pipe_loader.h"
27 #include "git_sha1.h"
28 #include "vk_util.h"
29 #include "pipe/p_state.h"
30 #include "pipe/p_context.h"
31 #include "frontend/drisw_api.h"
32
33 #include "compiler/glsl_types.h"
34 #include "util/u_inlines.h"
35 #include "util/os_memory.h"
36 #include "util/u_thread.h"
37 #include "util/u_atomic.h"
38 #include "util/timespec.h"
39 #include "os_time.h"
40
41 static VkResult
lvp_physical_device_init(struct lvp_physical_device * device,struct lvp_instance * instance,struct pipe_loader_device * pld)42 lvp_physical_device_init(struct lvp_physical_device *device,
43 struct lvp_instance *instance,
44 struct pipe_loader_device *pld)
45 {
46 VkResult result;
47 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
48 device->instance = instance;
49 device->pld = pld;
50
51 device->pscreen = pipe_loader_create_screen(device->pld);
52 if (!device->pscreen)
53 return vk_error(instance, VK_ERROR_OUT_OF_HOST_MEMORY);
54
55 device->max_images = device->pscreen->get_shader_param(device->pscreen, PIPE_SHADER_FRAGMENT, PIPE_SHADER_CAP_MAX_SHADER_IMAGES);
56 lvp_physical_device_get_supported_extensions(device, &device->supported_extensions);
57 result = lvp_init_wsi(device);
58 if (result != VK_SUCCESS) {
59 vk_error(instance, result);
60 goto fail;
61 }
62
63 return VK_SUCCESS;
64 fail:
65 return result;
66 }
67
68 static void
lvp_physical_device_finish(struct lvp_physical_device * device)69 lvp_physical_device_finish(struct lvp_physical_device *device)
70 {
71 lvp_finish_wsi(device);
72 device->pscreen->destroy(device->pscreen);
73 }
74
75 static void *
default_alloc_func(void * pUserData,size_t size,size_t align,VkSystemAllocationScope allocationScope)76 default_alloc_func(void *pUserData, size_t size, size_t align,
77 VkSystemAllocationScope allocationScope)
78 {
79 return os_malloc_aligned(size, align);
80 }
81
82 static void *
default_realloc_func(void * pUserData,void * pOriginal,size_t size,size_t align,VkSystemAllocationScope allocationScope)83 default_realloc_func(void *pUserData, void *pOriginal, size_t size,
84 size_t align, VkSystemAllocationScope allocationScope)
85 {
86 return realloc(pOriginal, size);
87 }
88
89 static void
default_free_func(void * pUserData,void * pMemory)90 default_free_func(void *pUserData, void *pMemory)
91 {
92 os_free_aligned(pMemory);
93 }
94
95 static const VkAllocationCallbacks default_alloc = {
96 .pUserData = NULL,
97 .pfnAllocation = default_alloc_func,
98 .pfnReallocation = default_realloc_func,
99 .pfnFree = default_free_func,
100 };
101
lvp_CreateInstance(const VkInstanceCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkInstance * pInstance)102 VkResult lvp_CreateInstance(
103 const VkInstanceCreateInfo* pCreateInfo,
104 const VkAllocationCallbacks* pAllocator,
105 VkInstance* pInstance)
106 {
107 struct lvp_instance *instance;
108
109 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
110
111 uint32_t client_version;
112 if (pCreateInfo->pApplicationInfo &&
113 pCreateInfo->pApplicationInfo->apiVersion != 0) {
114 client_version = pCreateInfo->pApplicationInfo->apiVersion;
115 } else {
116 client_version = VK_API_VERSION_1_0;
117 }
118
119 instance = vk_zalloc2(&default_alloc, pAllocator, sizeof(*instance), 8,
120 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
121 if (!instance)
122 return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
123
124 vk_object_base_init(NULL, &instance->base, VK_OBJECT_TYPE_INSTANCE);
125
126 if (pAllocator)
127 instance->alloc = *pAllocator;
128 else
129 instance->alloc = default_alloc;
130
131 instance->apiVersion = client_version;
132 instance->physicalDeviceCount = -1;
133
134 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
135 int idx;
136 for (idx = 0; idx < LVP_INSTANCE_EXTENSION_COUNT; idx++) {
137 if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i],
138 lvp_instance_extensions[idx].extensionName))
139 break;
140 }
141
142 if (idx >= LVP_INSTANCE_EXTENSION_COUNT ||
143 !lvp_instance_extensions_supported.extensions[idx]) {
144 vk_free2(&default_alloc, pAllocator, instance);
145 return vk_error(instance, VK_ERROR_EXTENSION_NOT_PRESENT);
146 }
147 instance->enabled_extensions.extensions[idx] = true;
148 }
149
150 bool unchecked = instance->debug_flags & LVP_DEBUG_ALL_ENTRYPOINTS;
151 for (unsigned i = 0; i < ARRAY_SIZE(instance->dispatch.entrypoints); i++) {
152 /* Vulkan requires that entrypoints for extensions which have
153 * not been enabled must not be advertised.
154 */
155 if (!unchecked &&
156 !lvp_instance_entrypoint_is_enabled(i, instance->apiVersion,
157 &instance->enabled_extensions)) {
158 instance->dispatch.entrypoints[i] = NULL;
159 } else {
160 instance->dispatch.entrypoints[i] =
161 lvp_instance_dispatch_table.entrypoints[i];
162 }
163 }
164
165 for (unsigned i = 0; i < ARRAY_SIZE(instance->physical_device_dispatch.entrypoints); i++) {
166 /* Vulkan requires that entrypoints for extensions which have
167 * not been enabled must not be advertised.
168 */
169 if (!unchecked &&
170 !lvp_physical_device_entrypoint_is_enabled(i, instance->apiVersion,
171 &instance->enabled_extensions)) {
172 instance->physical_device_dispatch.entrypoints[i] = NULL;
173 } else {
174 instance->physical_device_dispatch.entrypoints[i] =
175 lvp_physical_device_dispatch_table.entrypoints[i];
176 }
177 }
178
179 for (unsigned i = 0; i < ARRAY_SIZE(instance->device_dispatch.entrypoints); i++) {
180 /* Vulkan requires that entrypoints for extensions which have
181 * not been enabled must not be advertised.
182 */
183 if (!unchecked &&
184 !lvp_device_entrypoint_is_enabled(i, instance->apiVersion,
185 &instance->enabled_extensions, NULL)) {
186 instance->device_dispatch.entrypoints[i] = NULL;
187 } else {
188 instance->device_dispatch.entrypoints[i] =
189 lvp_device_dispatch_table.entrypoints[i];
190 }
191 }
192
193 // _mesa_locale_init();
194 glsl_type_singleton_init_or_ref();
195 // VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false));
196
197 *pInstance = lvp_instance_to_handle(instance);
198
199 return VK_SUCCESS;
200 }
201
lvp_DestroyInstance(VkInstance _instance,const VkAllocationCallbacks * pAllocator)202 void lvp_DestroyInstance(
203 VkInstance _instance,
204 const VkAllocationCallbacks* pAllocator)
205 {
206 LVP_FROM_HANDLE(lvp_instance, instance, _instance);
207
208 if (!instance)
209 return;
210 glsl_type_singleton_decref();
211 if (instance->physicalDeviceCount > 0)
212 lvp_physical_device_finish(&instance->physicalDevice);
213 // _mesa_locale_fini();
214
215 pipe_loader_release(&instance->devs, instance->num_devices);
216
217 vk_object_base_finish(&instance->base);
218 vk_free(&instance->alloc, instance);
219 }
220
lvp_get_image(struct dri_drawable * dri_drawable,int x,int y,unsigned width,unsigned height,unsigned stride,void * data)221 static void lvp_get_image(struct dri_drawable *dri_drawable,
222 int x, int y, unsigned width, unsigned height, unsigned stride,
223 void *data)
224 {
225
226 }
227
lvp_put_image(struct dri_drawable * dri_drawable,void * data,unsigned width,unsigned height)228 static void lvp_put_image(struct dri_drawable *dri_drawable,
229 void *data, unsigned width, unsigned height)
230 {
231 fprintf(stderr, "put image %dx%d\n", width, height);
232 }
233
lvp_put_image2(struct dri_drawable * dri_drawable,void * data,int x,int y,unsigned width,unsigned height,unsigned stride)234 static void lvp_put_image2(struct dri_drawable *dri_drawable,
235 void *data, int x, int y, unsigned width, unsigned height,
236 unsigned stride)
237 {
238 fprintf(stderr, "put image 2 %d,%d %dx%d\n", x, y, width, height);
239 }
240
241 static struct drisw_loader_funcs lvp_sw_lf = {
242 .get_image = lvp_get_image,
243 .put_image = lvp_put_image,
244 .put_image2 = lvp_put_image2,
245 };
246
lvp_EnumeratePhysicalDevices(VkInstance _instance,uint32_t * pPhysicalDeviceCount,VkPhysicalDevice * pPhysicalDevices)247 VkResult lvp_EnumeratePhysicalDevices(
248 VkInstance _instance,
249 uint32_t* pPhysicalDeviceCount,
250 VkPhysicalDevice* pPhysicalDevices)
251 {
252 LVP_FROM_HANDLE(lvp_instance, instance, _instance);
253 VkResult result;
254
255 if (instance->physicalDeviceCount < 0) {
256
257 /* sw only for now */
258 instance->num_devices = pipe_loader_sw_probe(NULL, 0);
259
260 assert(instance->num_devices == 1);
261
262 pipe_loader_sw_probe_dri(&instance->devs, &lvp_sw_lf);
263
264
265 result = lvp_physical_device_init(&instance->physicalDevice,
266 instance, &instance->devs[0]);
267 if (result == VK_ERROR_INCOMPATIBLE_DRIVER) {
268 instance->physicalDeviceCount = 0;
269 } else if (result == VK_SUCCESS) {
270 instance->physicalDeviceCount = 1;
271 } else {
272 return result;
273 }
274 }
275
276 if (!pPhysicalDevices) {
277 *pPhysicalDeviceCount = instance->physicalDeviceCount;
278 } else if (*pPhysicalDeviceCount >= 1) {
279 pPhysicalDevices[0] = lvp_physical_device_to_handle(&instance->physicalDevice);
280 *pPhysicalDeviceCount = 1;
281 } else {
282 *pPhysicalDeviceCount = 0;
283 }
284
285 return VK_SUCCESS;
286 }
287
lvp_GetPhysicalDeviceFeatures(VkPhysicalDevice physicalDevice,VkPhysicalDeviceFeatures * pFeatures)288 void lvp_GetPhysicalDeviceFeatures(
289 VkPhysicalDevice physicalDevice,
290 VkPhysicalDeviceFeatures* pFeatures)
291 {
292 LVP_FROM_HANDLE(lvp_physical_device, pdevice, physicalDevice);
293 bool indirect = false;//pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_GLSL_FEATURE_LEVEL) >= 400;
294 memset(pFeatures, 0, sizeof(*pFeatures));
295 *pFeatures = (VkPhysicalDeviceFeatures) {
296 .robustBufferAccess = true,
297 .fullDrawIndexUint32 = true,
298 .imageCubeArray = (pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_CUBE_MAP_ARRAY) != 0),
299 .independentBlend = true,
300 .geometryShader = (pdevice->pscreen->get_shader_param(pdevice->pscreen, PIPE_SHADER_GEOMETRY, PIPE_SHADER_CAP_MAX_INSTRUCTIONS) != 0),
301 .tessellationShader = (pdevice->pscreen->get_shader_param(pdevice->pscreen, PIPE_SHADER_TESS_EVAL, PIPE_SHADER_CAP_MAX_INSTRUCTIONS) != 0),
302 .sampleRateShading = (pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_SAMPLE_SHADING) != 0),
303 .dualSrcBlend = (pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_MAX_DUAL_SOURCE_RENDER_TARGETS) != 0),
304 .logicOp = true,
305 .multiDrawIndirect = (pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_MULTI_DRAW_INDIRECT) != 0),
306 .drawIndirectFirstInstance = true,
307 .depthClamp = (pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_DEPTH_CLIP_DISABLE) != 0),
308 .depthBiasClamp = true,
309 .fillModeNonSolid = true,
310 .depthBounds = (pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_DEPTH_BOUNDS_TEST) != 0),
311 .wideLines = false,
312 .largePoints = true,
313 .alphaToOne = true,
314 .multiViewport = true,
315 .samplerAnisotropy = false, /* FINISHME */
316 .textureCompressionETC2 = false,
317 .textureCompressionASTC_LDR = false,
318 .textureCompressionBC = true,
319 .occlusionQueryPrecise = true,
320 .pipelineStatisticsQuery = true,
321 .vertexPipelineStoresAndAtomics = (pdevice->pscreen->get_shader_param(pdevice->pscreen, PIPE_SHADER_VERTEX, PIPE_SHADER_CAP_MAX_SHADER_BUFFERS) != 0),
322 .fragmentStoresAndAtomics = (pdevice->pscreen->get_shader_param(pdevice->pscreen, PIPE_SHADER_FRAGMENT, PIPE_SHADER_CAP_MAX_SHADER_BUFFERS) != 0),
323 .shaderTessellationAndGeometryPointSize = true,
324 .shaderImageGatherExtended = true,
325 .shaderStorageImageExtendedFormats = false,
326 .shaderStorageImageMultisample = (pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_TEXTURE_MULTISAMPLE) != 0),
327 .shaderUniformBufferArrayDynamicIndexing = indirect,
328 .shaderSampledImageArrayDynamicIndexing = indirect,
329 .shaderStorageBufferArrayDynamicIndexing = indirect,
330 .shaderStorageImageArrayDynamicIndexing = indirect,
331 .shaderStorageImageReadWithoutFormat = false,
332 .shaderStorageImageWriteWithoutFormat = true,
333 .shaderClipDistance = true,
334 .shaderCullDistance = (pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_CULL_DISTANCE) == 1),
335 .shaderFloat64 = (pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_DOUBLES) == 1),
336 .shaderInt64 = (pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_INT64) == 1),
337 .shaderInt16 = true,
338 .alphaToOne = true,
339 .variableMultisampleRate = false,
340 .inheritedQueries = false,
341 };
342 }
343
lvp_GetPhysicalDeviceFeatures2(VkPhysicalDevice physicalDevice,VkPhysicalDeviceFeatures2 * pFeatures)344 void lvp_GetPhysicalDeviceFeatures2(
345 VkPhysicalDevice physicalDevice,
346 VkPhysicalDeviceFeatures2 *pFeatures)
347 {
348 lvp_GetPhysicalDeviceFeatures(physicalDevice, &pFeatures->features);
349
350 vk_foreach_struct(ext, pFeatures->pNext) {
351 switch (ext->sType) {
352 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES: {
353 VkPhysicalDeviceVariablePointersFeatures *features = (void *)ext;
354 features->variablePointers = true;
355 features->variablePointersStorageBuffer = true;
356 break;
357 }
358 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES: {
359 VkPhysicalDevice16BitStorageFeatures *features =
360 (VkPhysicalDevice16BitStorageFeatures*)ext;
361 features->storageBuffer16BitAccess = true;
362 features->uniformAndStorageBuffer16BitAccess = true;
363 features->storagePushConstant16 = true;
364 features->storageInputOutput16 = false;
365 break;
366 }
367 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIVATE_DATA_FEATURES_EXT: {
368 VkPhysicalDevicePrivateDataFeaturesEXT *features =
369 (VkPhysicalDevicePrivateDataFeaturesEXT *)ext;
370 features->privateData = true;
371 break;
372 }
373 default:
374 break;
375 }
376 }
377 }
378
379 void
lvp_device_get_cache_uuid(void * uuid)380 lvp_device_get_cache_uuid(void *uuid)
381 {
382 memset(uuid, 0, VK_UUID_SIZE);
383 snprintf(uuid, VK_UUID_SIZE, "val-%s", MESA_GIT_SHA1 + 4);
384 }
385
lvp_GetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice,VkPhysicalDeviceProperties * pProperties)386 void lvp_GetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice,
387 VkPhysicalDeviceProperties *pProperties)
388 {
389 LVP_FROM_HANDLE(lvp_physical_device, pdevice, physicalDevice);
390
391 VkSampleCountFlags sample_counts = VK_SAMPLE_COUNT_1_BIT | VK_SAMPLE_COUNT_4_BIT;
392
393 uint64_t grid_size[3], block_size[3];
394 uint64_t max_threads_per_block, max_local_size;
395
396 pdevice->pscreen->get_compute_param(pdevice->pscreen, PIPE_SHADER_IR_NIR,
397 PIPE_COMPUTE_CAP_MAX_GRID_SIZE, grid_size);
398 pdevice->pscreen->get_compute_param(pdevice->pscreen, PIPE_SHADER_IR_NIR,
399 PIPE_COMPUTE_CAP_MAX_BLOCK_SIZE, block_size);
400 pdevice->pscreen->get_compute_param(pdevice->pscreen, PIPE_SHADER_IR_NIR,
401 PIPE_COMPUTE_CAP_MAX_THREADS_PER_BLOCK,
402 &max_threads_per_block);
403 pdevice->pscreen->get_compute_param(pdevice->pscreen, PIPE_SHADER_IR_NIR,
404 PIPE_COMPUTE_CAP_MAX_LOCAL_SIZE,
405 &max_local_size);
406
407 VkPhysicalDeviceLimits limits = {
408 .maxImageDimension1D = pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_MAX_TEXTURE_2D_SIZE),
409 .maxImageDimension2D = pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_MAX_TEXTURE_2D_SIZE),
410 .maxImageDimension3D = (1 << pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_MAX_TEXTURE_3D_LEVELS)),
411 .maxImageDimensionCube = (1 << pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_MAX_TEXTURE_CUBE_LEVELS)),
412 .maxImageArrayLayers = pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_MAX_TEXTURE_ARRAY_LAYERS),
413 .maxTexelBufferElements = 128 * 1024 * 1024,
414 .maxUniformBufferRange = pdevice->pscreen->get_shader_param(pdevice->pscreen, PIPE_SHADER_FRAGMENT, PIPE_SHADER_CAP_MAX_CONST_BUFFER_SIZE),
415 .maxStorageBufferRange = pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_MAX_SHADER_BUFFER_SIZE),
416 .maxPushConstantsSize = MAX_PUSH_CONSTANTS_SIZE,
417 .maxMemoryAllocationCount = 4096,
418 .maxSamplerAllocationCount = 32 * 1024,
419 .bufferImageGranularity = 64, /* A cache line */
420 .sparseAddressSpaceSize = 0,
421 .maxBoundDescriptorSets = MAX_SETS,
422 .maxPerStageDescriptorSamplers = 32,
423 .maxPerStageDescriptorUniformBuffers = pdevice->pscreen->get_shader_param(pdevice->pscreen, PIPE_SHADER_FRAGMENT, PIPE_SHADER_CAP_MAX_CONST_BUFFERS),
424 .maxPerStageDescriptorStorageBuffers = pdevice->pscreen->get_shader_param(pdevice->pscreen, PIPE_SHADER_FRAGMENT, PIPE_SHADER_CAP_MAX_SHADER_BUFFERS),
425 .maxPerStageDescriptorSampledImages = pdevice->pscreen->get_shader_param(pdevice->pscreen, PIPE_SHADER_FRAGMENT, PIPE_SHADER_CAP_MAX_SAMPLER_VIEWS),
426 .maxPerStageDescriptorStorageImages = pdevice->pscreen->get_shader_param(pdevice->pscreen, PIPE_SHADER_FRAGMENT, PIPE_SHADER_CAP_MAX_SHADER_IMAGES - 8),
427 .maxPerStageDescriptorInputAttachments = 8,
428 .maxPerStageResources = 128,
429 .maxDescriptorSetSamplers = 32 * 1024,
430 .maxDescriptorSetUniformBuffers = 256,
431 .maxDescriptorSetUniformBuffersDynamic = 256,
432 .maxDescriptorSetStorageBuffers = 256,
433 .maxDescriptorSetStorageBuffersDynamic = 256,
434 .maxDescriptorSetSampledImages = 256,
435 .maxDescriptorSetStorageImages = 256,
436 .maxDescriptorSetInputAttachments = 256,
437 .maxVertexInputAttributes = 32,
438 .maxVertexInputBindings = 32,
439 .maxVertexInputAttributeOffset = 2047,
440 .maxVertexInputBindingStride = 2048,
441 .maxVertexOutputComponents = 128,
442 .maxTessellationGenerationLevel = 64,
443 .maxTessellationPatchSize = 32,
444 .maxTessellationControlPerVertexInputComponents = 128,
445 .maxTessellationControlPerVertexOutputComponents = 128,
446 .maxTessellationControlPerPatchOutputComponents = 128,
447 .maxTessellationControlTotalOutputComponents = 4096,
448 .maxTessellationEvaluationInputComponents = 128,
449 .maxTessellationEvaluationOutputComponents = 128,
450 .maxGeometryShaderInvocations = pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_MAX_GS_INVOCATIONS),
451 .maxGeometryInputComponents = 64,
452 .maxGeometryOutputComponents = 128,
453 .maxGeometryOutputVertices = pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_MAX_GEOMETRY_OUTPUT_VERTICES),
454 .maxGeometryTotalOutputComponents = pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_MAX_GEOMETRY_TOTAL_OUTPUT_COMPONENTS),
455 .maxFragmentInputComponents = 128,
456 .maxFragmentOutputAttachments = 8,
457 .maxFragmentDualSrcAttachments = 2,
458 .maxFragmentCombinedOutputResources = 8,
459 .maxComputeSharedMemorySize = max_local_size,
460 .maxComputeWorkGroupCount = { grid_size[0], grid_size[1], grid_size[2] },
461 .maxComputeWorkGroupInvocations = max_threads_per_block,
462 .maxComputeWorkGroupSize = { block_size[0], block_size[1], block_size[2] },
463 .subPixelPrecisionBits = pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_RASTERIZER_SUBPIXEL_BITS),
464 .subTexelPrecisionBits = 8,
465 .mipmapPrecisionBits = 8,
466 .maxDrawIndexedIndexValue = UINT32_MAX,
467 .maxDrawIndirectCount = UINT32_MAX,
468 .maxSamplerLodBias = 16,
469 .maxSamplerAnisotropy = 16,
470 .maxViewports = pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_MAX_VIEWPORTS),
471 .maxViewportDimensions = { (1 << 14), (1 << 14) },
472 .viewportBoundsRange = { -32768.0, 32768.0 },
473 .viewportSubPixelBits = pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_VIEWPORT_SUBPIXEL_BITS),
474 .minMemoryMapAlignment = 4096, /* A page */
475 .minTexelBufferOffsetAlignment = pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_TEXTURE_BUFFER_OFFSET_ALIGNMENT),
476 .minUniformBufferOffsetAlignment = pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_CONSTANT_BUFFER_OFFSET_ALIGNMENT),
477 .minStorageBufferOffsetAlignment = pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_SHADER_BUFFER_OFFSET_ALIGNMENT),
478 .minTexelOffset = pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_MIN_TEXEL_OFFSET),
479 .maxTexelOffset = pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_MAX_TEXEL_OFFSET),
480 .minTexelGatherOffset = pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_MIN_TEXTURE_GATHER_OFFSET),
481 .maxTexelGatherOffset = pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_MAX_TEXTURE_GATHER_OFFSET),
482 .minInterpolationOffset = -2, /* FIXME */
483 .maxInterpolationOffset = 2, /* FIXME */
484 .subPixelInterpolationOffsetBits = 8, /* FIXME */
485 .maxFramebufferWidth = pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_MAX_TEXTURE_2D_SIZE),
486 .maxFramebufferHeight = pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_MAX_TEXTURE_2D_SIZE),
487 .maxFramebufferLayers = pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_MAX_TEXTURE_ARRAY_LAYERS),
488 .framebufferColorSampleCounts = sample_counts,
489 .framebufferDepthSampleCounts = sample_counts,
490 .framebufferStencilSampleCounts = sample_counts,
491 .framebufferNoAttachmentsSampleCounts = sample_counts,
492 .maxColorAttachments = pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_MAX_RENDER_TARGETS),
493 .sampledImageColorSampleCounts = sample_counts,
494 .sampledImageIntegerSampleCounts = sample_counts,
495 .sampledImageDepthSampleCounts = sample_counts,
496 .sampledImageStencilSampleCounts = sample_counts,
497 .storageImageSampleCounts = sample_counts,
498 .maxSampleMaskWords = 1,
499 .timestampComputeAndGraphics = true,
500 .timestampPeriod = 1,
501 .maxClipDistances = 8,
502 .maxCullDistances = 8,
503 .maxCombinedClipAndCullDistances = 8,
504 .discreteQueuePriorities = 2,
505 .pointSizeRange = { 0.0, pdevice->pscreen->get_paramf(pdevice->pscreen, PIPE_CAPF_MAX_POINT_WIDTH) },
506 .lineWidthRange = { 1.0, 1.0 },
507 .pointSizeGranularity = (1.0 / 8.0),
508 .lineWidthGranularity = 0.0,
509 .strictLines = false, /* FINISHME */
510 .standardSampleLocations = true,
511 .optimalBufferCopyOffsetAlignment = 128,
512 .optimalBufferCopyRowPitchAlignment = 128,
513 .nonCoherentAtomSize = 64,
514 };
515
516 *pProperties = (VkPhysicalDeviceProperties) {
517 .apiVersion = VK_MAKE_VERSION(1, 0, 2),
518 .driverVersion = 1,
519 .vendorID = VK_VENDOR_ID_MESA,
520 .deviceID = 0,
521 .deviceType = VK_PHYSICAL_DEVICE_TYPE_CPU,
522 .limits = limits,
523 .sparseProperties = {0},
524 };
525
526 strcpy(pProperties->deviceName, pdevice->pscreen->get_name(pdevice->pscreen));
527 lvp_device_get_cache_uuid(pProperties->pipelineCacheUUID);
528
529 }
530
lvp_GetPhysicalDeviceProperties2(VkPhysicalDevice physicalDevice,VkPhysicalDeviceProperties2 * pProperties)531 void lvp_GetPhysicalDeviceProperties2(
532 VkPhysicalDevice physicalDevice,
533 VkPhysicalDeviceProperties2 *pProperties)
534 {
535 lvp_GetPhysicalDeviceProperties(physicalDevice, &pProperties->properties);
536
537 vk_foreach_struct(ext, pProperties->pNext) {
538 switch (ext->sType) {
539
540 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES: {
541 VkPhysicalDeviceMaintenance3Properties *properties =
542 (VkPhysicalDeviceMaintenance3Properties*)ext;
543 properties->maxPerSetDescriptors = 1024;
544 properties->maxMemoryAllocationSize = (1u << 31);
545 break;
546 }
547 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRIVER_PROPERTIES_KHR: {
548 VkPhysicalDeviceDriverPropertiesKHR *driver_props =
549 (VkPhysicalDeviceDriverPropertiesKHR *) ext;
550 driver_props->driverID = VK_DRIVER_ID_MESA_LLVMPIPE;
551 snprintf(driver_props->driverName, VK_MAX_DRIVER_NAME_SIZE_KHR, "llvmpipe");
552 snprintf(driver_props->driverInfo, VK_MAX_DRIVER_INFO_SIZE_KHR,
553 "Mesa " PACKAGE_VERSION MESA_GIT_SHA1
554 #ifdef MESA_LLVM_VERSION_STRING
555 " (LLVM " MESA_LLVM_VERSION_STRING ")"
556 #endif
557 );
558 driver_props->conformanceVersion.major = 1;
559 driver_props->conformanceVersion.minor = 0;
560 driver_props->conformanceVersion.subminor = 0;
561 driver_props->conformanceVersion.patch = 0;;
562 break;
563 }
564 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES: {
565 VkPhysicalDevicePointClippingProperties *properties =
566 (VkPhysicalDevicePointClippingProperties*)ext;
567 properties->pointClippingBehavior = VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES;
568 break;
569 }
570 default:
571 break;
572 }
573 }
574 }
575
lvp_get_physical_device_queue_family_properties(VkQueueFamilyProperties * pQueueFamilyProperties)576 static void lvp_get_physical_device_queue_family_properties(
577 VkQueueFamilyProperties* pQueueFamilyProperties)
578 {
579 *pQueueFamilyProperties = (VkQueueFamilyProperties) {
580 .queueFlags = VK_QUEUE_GRAPHICS_BIT |
581 VK_QUEUE_COMPUTE_BIT |
582 VK_QUEUE_TRANSFER_BIT,
583 .queueCount = 1,
584 .timestampValidBits = 64,
585 .minImageTransferGranularity = (VkExtent3D) { 1, 1, 1 },
586 };
587 }
588
lvp_GetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice physicalDevice,uint32_t * pCount,VkQueueFamilyProperties * pQueueFamilyProperties)589 void lvp_GetPhysicalDeviceQueueFamilyProperties(
590 VkPhysicalDevice physicalDevice,
591 uint32_t* pCount,
592 VkQueueFamilyProperties* pQueueFamilyProperties)
593 {
594 if (pQueueFamilyProperties == NULL) {
595 *pCount = 1;
596 return;
597 }
598
599 assert(*pCount >= 1);
600 lvp_get_physical_device_queue_family_properties(pQueueFamilyProperties);
601 }
602
lvp_GetPhysicalDeviceQueueFamilyProperties2(VkPhysicalDevice physicalDevice,uint32_t * pCount,VkQueueFamilyProperties2 * pQueueFamilyProperties)603 void lvp_GetPhysicalDeviceQueueFamilyProperties2(
604 VkPhysicalDevice physicalDevice,
605 uint32_t* pCount,
606 VkQueueFamilyProperties2 *pQueueFamilyProperties)
607 {
608 if (pQueueFamilyProperties == NULL) {
609 *pCount = 1;
610 return;
611 }
612
613 assert(*pCount >= 1);
614 lvp_get_physical_device_queue_family_properties(&pQueueFamilyProperties->queueFamilyProperties);
615 }
616
lvp_GetPhysicalDeviceMemoryProperties(VkPhysicalDevice physicalDevice,VkPhysicalDeviceMemoryProperties * pMemoryProperties)617 void lvp_GetPhysicalDeviceMemoryProperties(
618 VkPhysicalDevice physicalDevice,
619 VkPhysicalDeviceMemoryProperties* pMemoryProperties)
620 {
621 pMemoryProperties->memoryTypeCount = 1;
622 pMemoryProperties->memoryTypes[0] = (VkMemoryType) {
623 .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
624 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
625 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
626 VK_MEMORY_PROPERTY_HOST_CACHED_BIT,
627 .heapIndex = 0,
628 };
629
630 pMemoryProperties->memoryHeapCount = 1;
631 pMemoryProperties->memoryHeaps[0] = (VkMemoryHeap) {
632 .size = 2ULL*1024*1024*1024,
633 .flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT,
634 };
635 }
636
lvp_GetPhysicalDeviceMemoryProperties2(VkPhysicalDevice physicalDevice,VkPhysicalDeviceMemoryProperties2 * pMemoryProperties)637 void lvp_GetPhysicalDeviceMemoryProperties2(
638 VkPhysicalDevice physicalDevice,
639 VkPhysicalDeviceMemoryProperties2 *pMemoryProperties)
640 {
641 lvp_GetPhysicalDeviceMemoryProperties(physicalDevice,
642 &pMemoryProperties->memoryProperties);
643 }
644
lvp_GetInstanceProcAddr(VkInstance _instance,const char * pName)645 PFN_vkVoidFunction lvp_GetInstanceProcAddr(
646 VkInstance _instance,
647 const char* pName)
648 {
649 LVP_FROM_HANDLE(lvp_instance, instance, _instance);
650
651 /* The Vulkan 1.0 spec for vkGetInstanceProcAddr has a table of exactly
652 * when we have to return valid function pointers, NULL, or it's left
653 * undefined. See the table for exact details.
654 */
655 if (pName == NULL)
656 return NULL;
657
658 #define LOOKUP_LVP_ENTRYPOINT(entrypoint) \
659 if (strcmp(pName, "vk" #entrypoint) == 0) \
660 return (PFN_vkVoidFunction)lvp_##entrypoint
661
662 LOOKUP_LVP_ENTRYPOINT(EnumerateInstanceExtensionProperties);
663 LOOKUP_LVP_ENTRYPOINT(EnumerateInstanceLayerProperties);
664 LOOKUP_LVP_ENTRYPOINT(EnumerateInstanceVersion);
665 LOOKUP_LVP_ENTRYPOINT(CreateInstance);
666
667 /* GetInstanceProcAddr() can also be called with a NULL instance.
668 * See https://gitlab.khronos.org/vulkan/vulkan/issues/2057
669 */
670 LOOKUP_LVP_ENTRYPOINT(GetInstanceProcAddr);
671
672 #undef LOOKUP_LVP_ENTRYPOINT
673
674 if (instance == NULL)
675 return NULL;
676
677 int idx = lvp_get_instance_entrypoint_index(pName);
678 if (idx >= 0)
679 return instance->dispatch.entrypoints[idx];
680
681 idx = lvp_get_physical_device_entrypoint_index(pName);
682 if (idx >= 0)
683 return instance->physical_device_dispatch.entrypoints[idx];
684
685 idx = lvp_get_device_entrypoint_index(pName);
686 if (idx >= 0)
687 return instance->device_dispatch.entrypoints[idx];
688
689 return NULL;
690 }
691
692 /* The loader wants us to expose a second GetInstanceProcAddr function
693 * to work around certain LD_PRELOAD issues seen in apps.
694 */
695 PUBLIC
696 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_icdGetInstanceProcAddr(
697 VkInstance instance,
698 const char* pName);
699
700 PUBLIC
vk_icdGetInstanceProcAddr(VkInstance instance,const char * pName)701 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_icdGetInstanceProcAddr(
702 VkInstance instance,
703 const char* pName)
704 {
705 return lvp_GetInstanceProcAddr(instance, pName);
706 }
707
708 PUBLIC
709 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_icdGetPhysicalDeviceProcAddr(
710 VkInstance _instance,
711 const char* pName);
712
713 PUBLIC
vk_icdGetPhysicalDeviceProcAddr(VkInstance _instance,const char * pName)714 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_icdGetPhysicalDeviceProcAddr(
715 VkInstance _instance,
716 const char* pName)
717 {
718 LVP_FROM_HANDLE(lvp_instance, instance, _instance);
719
720 if (!pName || !instance)
721 return NULL;
722
723 int idx = lvp_get_physical_device_entrypoint_index(pName);
724 if (idx < 0)
725 return NULL;
726
727 return instance->physical_device_dispatch.entrypoints[idx];
728 }
729
lvp_GetDeviceProcAddr(VkDevice _device,const char * pName)730 PFN_vkVoidFunction lvp_GetDeviceProcAddr(
731 VkDevice _device,
732 const char* pName)
733 {
734 LVP_FROM_HANDLE(lvp_device, device, _device);
735 if (!device || !pName)
736 return NULL;
737
738 int idx = lvp_get_device_entrypoint_index(pName);
739 if (idx < 0)
740 return NULL;
741
742 return device->dispatch.entrypoints[idx];
743 }
744
queue_thread(void * data)745 static int queue_thread(void *data)
746 {
747 struct lvp_queue *queue = data;
748
749 mtx_lock(&queue->m);
750 while (!queue->shutdown) {
751 struct lvp_queue_work *task;
752 while (list_is_empty(&queue->workqueue) && !queue->shutdown)
753 cnd_wait(&queue->new_work, &queue->m);
754
755 if (queue->shutdown)
756 break;
757
758 task = list_first_entry(&queue->workqueue, struct lvp_queue_work,
759 list);
760
761 mtx_unlock(&queue->m);
762 //execute
763 for (unsigned i = 0; i < task->cmd_buffer_count; i++) {
764 lvp_execute_cmds(queue->device, queue, task->fence, task->cmd_buffers[i]);
765 }
766 if (!task->cmd_buffer_count && task->fence)
767 task->fence->signaled = true;
768 p_atomic_dec(&queue->count);
769 mtx_lock(&queue->m);
770 list_del(&task->list);
771 free(task);
772 }
773 mtx_unlock(&queue->m);
774 return 0;
775 }
776
777 static VkResult
lvp_queue_init(struct lvp_device * device,struct lvp_queue * queue)778 lvp_queue_init(struct lvp_device *device, struct lvp_queue *queue)
779 {
780 queue->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
781 queue->device = device;
782
783 queue->flags = 0;
784 queue->ctx = device->pscreen->context_create(device->pscreen, NULL, PIPE_CONTEXT_ROBUST_BUFFER_ACCESS);
785 list_inithead(&queue->workqueue);
786 p_atomic_set(&queue->count, 0);
787 mtx_init(&queue->m, mtx_plain);
788 queue->exec_thread = u_thread_create(queue_thread, queue);
789
790 return VK_SUCCESS;
791 }
792
793 static void
lvp_queue_finish(struct lvp_queue * queue)794 lvp_queue_finish(struct lvp_queue *queue)
795 {
796 mtx_lock(&queue->m);
797 queue->shutdown = true;
798 cnd_broadcast(&queue->new_work);
799 mtx_unlock(&queue->m);
800
801 thrd_join(queue->exec_thread, NULL);
802
803 cnd_destroy(&queue->new_work);
804 mtx_destroy(&queue->m);
805 queue->ctx->destroy(queue->ctx);
806 }
807
lvp_get_device_extension_index(const char * name)808 static int lvp_get_device_extension_index(const char *name)
809 {
810 for (unsigned i = 0; i < LVP_DEVICE_EXTENSION_COUNT; ++i) {
811 if (strcmp(name, lvp_device_extensions[i].extensionName) == 0)
812 return i;
813 }
814 return -1;
815 }
816
817 static void
lvp_device_init_dispatch(struct lvp_device * device)818 lvp_device_init_dispatch(struct lvp_device *device)
819 {
820 const struct lvp_instance *instance = device->physical_device->instance;
821 const struct lvp_device_dispatch_table *dispatch_table_layer = NULL;
822 bool unchecked = instance->debug_flags & LVP_DEBUG_ALL_ENTRYPOINTS;
823
824 for (unsigned i = 0; i < ARRAY_SIZE(device->dispatch.entrypoints); i++) {
825 /* Vulkan requires that entrypoints for extensions which have not been
826 * enabled must not be advertised.
827 */
828 if (!unchecked &&
829 !lvp_device_entrypoint_is_enabled(i, instance->apiVersion,
830 &instance->enabled_extensions,
831 &device->enabled_extensions)) {
832 device->dispatch.entrypoints[i] = NULL;
833 } else if (dispatch_table_layer &&
834 dispatch_table_layer->entrypoints[i]) {
835 device->dispatch.entrypoints[i] =
836 dispatch_table_layer->entrypoints[i];
837 } else {
838 device->dispatch.entrypoints[i] =
839 lvp_device_dispatch_table.entrypoints[i];
840 }
841 }
842 }
843
lvp_CreateDevice(VkPhysicalDevice physicalDevice,const VkDeviceCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDevice * pDevice)844 VkResult lvp_CreateDevice(
845 VkPhysicalDevice physicalDevice,
846 const VkDeviceCreateInfo* pCreateInfo,
847 const VkAllocationCallbacks* pAllocator,
848 VkDevice* pDevice)
849 {
850 fprintf(stderr, "WARNING: lavapipe is not a conformant vulkan implementation, testing use only.\n");
851
852 LVP_FROM_HANDLE(lvp_physical_device, physical_device, physicalDevice);
853 struct lvp_device *device;
854
855 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO);
856
857 /* Check enabled features */
858 if (pCreateInfo->pEnabledFeatures) {
859 VkPhysicalDeviceFeatures supported_features;
860 lvp_GetPhysicalDeviceFeatures(physicalDevice, &supported_features);
861 VkBool32 *supported_feature = (VkBool32 *)&supported_features;
862 VkBool32 *enabled_feature = (VkBool32 *)pCreateInfo->pEnabledFeatures;
863 unsigned num_features = sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
864 for (uint32_t i = 0; i < num_features; i++) {
865 if (enabled_feature[i] && !supported_feature[i])
866 return vk_error(physical_device->instance, VK_ERROR_FEATURE_NOT_PRESENT);
867 }
868 }
869
870 device = vk_zalloc2(&physical_device->instance->alloc, pAllocator,
871 sizeof(*device), 8,
872 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
873 if (!device)
874 return vk_error(physical_device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
875
876 vk_device_init(&device->vk, pCreateInfo,
877 &physical_device->instance->alloc, pAllocator);
878
879 device->instance = physical_device->instance;
880 device->physical_device = physical_device;
881
882 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
883 const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
884 int index = lvp_get_device_extension_index(ext_name);
885 if (index < 0 || !physical_device->supported_extensions.extensions[index]) {
886 vk_free(&device->vk.alloc, device);
887 return vk_error(physical_device->instance, VK_ERROR_EXTENSION_NOT_PRESENT);
888 }
889
890 device->enabled_extensions.extensions[index] = true;
891 }
892 lvp_device_init_dispatch(device);
893
894 mtx_init(&device->fence_lock, mtx_plain);
895 device->pscreen = physical_device->pscreen;
896
897 lvp_queue_init(device, &device->queue);
898
899 *pDevice = lvp_device_to_handle(device);
900
901 return VK_SUCCESS;
902
903 }
904
lvp_DestroyDevice(VkDevice _device,const VkAllocationCallbacks * pAllocator)905 void lvp_DestroyDevice(
906 VkDevice _device,
907 const VkAllocationCallbacks* pAllocator)
908 {
909 LVP_FROM_HANDLE(lvp_device, device, _device);
910
911 lvp_queue_finish(&device->queue);
912 vk_free(&device->vk.alloc, device);
913 }
914
lvp_EnumerateInstanceExtensionProperties(const char * pLayerName,uint32_t * pPropertyCount,VkExtensionProperties * pProperties)915 VkResult lvp_EnumerateInstanceExtensionProperties(
916 const char* pLayerName,
917 uint32_t* pPropertyCount,
918 VkExtensionProperties* pProperties)
919 {
920 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
921
922 for (int i = 0; i < LVP_INSTANCE_EXTENSION_COUNT; i++) {
923 if (lvp_instance_extensions_supported.extensions[i]) {
924 vk_outarray_append(&out, prop) {
925 *prop = lvp_instance_extensions[i];
926 }
927 }
928 }
929
930 return vk_outarray_status(&out);
931 }
932
lvp_EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,const char * pLayerName,uint32_t * pPropertyCount,VkExtensionProperties * pProperties)933 VkResult lvp_EnumerateDeviceExtensionProperties(
934 VkPhysicalDevice physicalDevice,
935 const char* pLayerName,
936 uint32_t* pPropertyCount,
937 VkExtensionProperties* pProperties)
938 {
939 LVP_FROM_HANDLE(lvp_physical_device, device, physicalDevice);
940 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
941
942 for (int i = 0; i < LVP_DEVICE_EXTENSION_COUNT; i++) {
943 if (device->supported_extensions.extensions[i]) {
944 vk_outarray_append(&out, prop) {
945 *prop = lvp_device_extensions[i];
946 }
947 }
948 }
949 return vk_outarray_status(&out);
950 }
951
lvp_EnumerateInstanceLayerProperties(uint32_t * pPropertyCount,VkLayerProperties * pProperties)952 VkResult lvp_EnumerateInstanceLayerProperties(
953 uint32_t* pPropertyCount,
954 VkLayerProperties* pProperties)
955 {
956 if (pProperties == NULL) {
957 *pPropertyCount = 0;
958 return VK_SUCCESS;
959 }
960
961 /* None supported at this time */
962 return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
963 }
964
lvp_EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice,uint32_t * pPropertyCount,VkLayerProperties * pProperties)965 VkResult lvp_EnumerateDeviceLayerProperties(
966 VkPhysicalDevice physicalDevice,
967 uint32_t* pPropertyCount,
968 VkLayerProperties* pProperties)
969 {
970 if (pProperties == NULL) {
971 *pPropertyCount = 0;
972 return VK_SUCCESS;
973 }
974
975 /* None supported at this time */
976 return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
977 }
978
lvp_GetDeviceQueue2(VkDevice _device,const VkDeviceQueueInfo2 * pQueueInfo,VkQueue * pQueue)979 void lvp_GetDeviceQueue2(
980 VkDevice _device,
981 const VkDeviceQueueInfo2* pQueueInfo,
982 VkQueue* pQueue)
983 {
984 LVP_FROM_HANDLE(lvp_device, device, _device);
985 struct lvp_queue *queue;
986
987 queue = &device->queue;
988 if (pQueueInfo->flags != queue->flags) {
989 /* From the Vulkan 1.1.70 spec:
990 *
991 * "The queue returned by vkGetDeviceQueue2 must have the same
992 * flags value from this structure as that used at device
993 * creation time in a VkDeviceQueueCreateInfo instance. If no
994 * matching flags were specified at device creation time then
995 * pQueue will return VK_NULL_HANDLE."
996 */
997 *pQueue = VK_NULL_HANDLE;
998 return;
999 }
1000
1001 *pQueue = lvp_queue_to_handle(queue);
1002 }
1003
1004
lvp_GetDeviceQueue(VkDevice _device,uint32_t queueFamilyIndex,uint32_t queueIndex,VkQueue * pQueue)1005 void lvp_GetDeviceQueue(
1006 VkDevice _device,
1007 uint32_t queueFamilyIndex,
1008 uint32_t queueIndex,
1009 VkQueue* pQueue)
1010 {
1011 const VkDeviceQueueInfo2 info = (VkDeviceQueueInfo2) {
1012 .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2,
1013 .queueFamilyIndex = queueFamilyIndex,
1014 .queueIndex = queueIndex
1015 };
1016
1017 lvp_GetDeviceQueue2(_device, &info, pQueue);
1018 }
1019
1020
lvp_QueueSubmit(VkQueue _queue,uint32_t submitCount,const VkSubmitInfo * pSubmits,VkFence _fence)1021 VkResult lvp_QueueSubmit(
1022 VkQueue _queue,
1023 uint32_t submitCount,
1024 const VkSubmitInfo* pSubmits,
1025 VkFence _fence)
1026 {
1027 LVP_FROM_HANDLE(lvp_queue, queue, _queue);
1028 LVP_FROM_HANDLE(lvp_fence, fence, _fence);
1029
1030 if (submitCount == 0)
1031 goto just_signal_fence;
1032 for (uint32_t i = 0; i < submitCount; i++) {
1033 uint32_t task_size = sizeof(struct lvp_queue_work) + pSubmits[i].commandBufferCount * sizeof(struct lvp_cmd_buffer *);
1034 struct lvp_queue_work *task = malloc(task_size);
1035
1036 task->cmd_buffer_count = pSubmits[i].commandBufferCount;
1037 task->fence = fence;
1038 task->cmd_buffers = (struct lvp_cmd_buffer **)(task + 1);
1039 for (uint32_t j = 0; j < pSubmits[i].commandBufferCount; j++) {
1040 task->cmd_buffers[j] = lvp_cmd_buffer_from_handle(pSubmits[i].pCommandBuffers[j]);
1041 }
1042
1043 mtx_lock(&queue->m);
1044 p_atomic_inc(&queue->count);
1045 list_addtail(&task->list, &queue->workqueue);
1046 cnd_signal(&queue->new_work);
1047 mtx_unlock(&queue->m);
1048 }
1049 return VK_SUCCESS;
1050 just_signal_fence:
1051 fence->signaled = true;
1052 return VK_SUCCESS;
1053 }
1054
queue_wait_idle(struct lvp_queue * queue,uint64_t timeout)1055 static VkResult queue_wait_idle(struct lvp_queue *queue, uint64_t timeout)
1056 {
1057 if (timeout == 0)
1058 return p_atomic_read(&queue->count) == 0 ? VK_SUCCESS : VK_TIMEOUT;
1059 if (timeout == UINT64_MAX)
1060 while (p_atomic_read(&queue->count))
1061 os_time_sleep(100);
1062 else {
1063 struct timespec t, current;
1064 clock_gettime(CLOCK_MONOTONIC, ¤t);
1065 timespec_add_nsec(&t, ¤t, timeout);
1066 bool timedout = false;
1067 while (p_atomic_read(&queue->count) && !(timedout = timespec_passed(CLOCK_MONOTONIC, &t)))
1068 os_time_sleep(10);
1069 if (timedout)
1070 return VK_TIMEOUT;
1071 }
1072 return VK_SUCCESS;
1073 }
1074
lvp_QueueWaitIdle(VkQueue _queue)1075 VkResult lvp_QueueWaitIdle(
1076 VkQueue _queue)
1077 {
1078 LVP_FROM_HANDLE(lvp_queue, queue, _queue);
1079
1080 return queue_wait_idle(queue, UINT64_MAX);
1081 }
1082
lvp_DeviceWaitIdle(VkDevice _device)1083 VkResult lvp_DeviceWaitIdle(
1084 VkDevice _device)
1085 {
1086 LVP_FROM_HANDLE(lvp_device, device, _device);
1087
1088 return queue_wait_idle(&device->queue, UINT64_MAX);
1089 }
1090
lvp_AllocateMemory(VkDevice _device,const VkMemoryAllocateInfo * pAllocateInfo,const VkAllocationCallbacks * pAllocator,VkDeviceMemory * pMem)1091 VkResult lvp_AllocateMemory(
1092 VkDevice _device,
1093 const VkMemoryAllocateInfo* pAllocateInfo,
1094 const VkAllocationCallbacks* pAllocator,
1095 VkDeviceMemory* pMem)
1096 {
1097 LVP_FROM_HANDLE(lvp_device, device, _device);
1098 struct lvp_device_memory *mem;
1099 assert(pAllocateInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO);
1100
1101 if (pAllocateInfo->allocationSize == 0) {
1102 /* Apparently, this is allowed */
1103 *pMem = VK_NULL_HANDLE;
1104 return VK_SUCCESS;
1105 }
1106
1107 mem = vk_alloc2(&device->vk.alloc, pAllocator, sizeof(*mem), 8,
1108 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1109 if (mem == NULL)
1110 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1111
1112 vk_object_base_init(&device->vk, &mem->base,
1113 VK_OBJECT_TYPE_DEVICE_MEMORY);
1114 mem->pmem = device->pscreen->allocate_memory(device->pscreen, pAllocateInfo->allocationSize);
1115 if (!mem->pmem) {
1116 vk_free2(&device->vk.alloc, pAllocator, mem);
1117 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1118 }
1119
1120 mem->type_index = pAllocateInfo->memoryTypeIndex;
1121
1122 *pMem = lvp_device_memory_to_handle(mem);
1123
1124 return VK_SUCCESS;
1125 }
1126
lvp_FreeMemory(VkDevice _device,VkDeviceMemory _mem,const VkAllocationCallbacks * pAllocator)1127 void lvp_FreeMemory(
1128 VkDevice _device,
1129 VkDeviceMemory _mem,
1130 const VkAllocationCallbacks* pAllocator)
1131 {
1132 LVP_FROM_HANDLE(lvp_device, device, _device);
1133 LVP_FROM_HANDLE(lvp_device_memory, mem, _mem);
1134
1135 if (mem == NULL)
1136 return;
1137
1138 device->pscreen->free_memory(device->pscreen, mem->pmem);
1139 vk_object_base_finish(&mem->base);
1140 vk_free2(&device->vk.alloc, pAllocator, mem);
1141
1142 }
1143
lvp_MapMemory(VkDevice _device,VkDeviceMemory _memory,VkDeviceSize offset,VkDeviceSize size,VkMemoryMapFlags flags,void ** ppData)1144 VkResult lvp_MapMemory(
1145 VkDevice _device,
1146 VkDeviceMemory _memory,
1147 VkDeviceSize offset,
1148 VkDeviceSize size,
1149 VkMemoryMapFlags flags,
1150 void** ppData)
1151 {
1152 LVP_FROM_HANDLE(lvp_device, device, _device);
1153 LVP_FROM_HANDLE(lvp_device_memory, mem, _memory);
1154 void *map;
1155 if (mem == NULL) {
1156 *ppData = NULL;
1157 return VK_SUCCESS;
1158 }
1159
1160 map = device->pscreen->map_memory(device->pscreen, mem->pmem);
1161
1162 *ppData = map + offset;
1163 return VK_SUCCESS;
1164 }
1165
lvp_UnmapMemory(VkDevice _device,VkDeviceMemory _memory)1166 void lvp_UnmapMemory(
1167 VkDevice _device,
1168 VkDeviceMemory _memory)
1169 {
1170 LVP_FROM_HANDLE(lvp_device, device, _device);
1171 LVP_FROM_HANDLE(lvp_device_memory, mem, _memory);
1172
1173 if (mem == NULL)
1174 return;
1175
1176 device->pscreen->unmap_memory(device->pscreen, mem->pmem);
1177 }
1178
lvp_FlushMappedMemoryRanges(VkDevice _device,uint32_t memoryRangeCount,const VkMappedMemoryRange * pMemoryRanges)1179 VkResult lvp_FlushMappedMemoryRanges(
1180 VkDevice _device,
1181 uint32_t memoryRangeCount,
1182 const VkMappedMemoryRange* pMemoryRanges)
1183 {
1184 return VK_SUCCESS;
1185 }
lvp_InvalidateMappedMemoryRanges(VkDevice _device,uint32_t memoryRangeCount,const VkMappedMemoryRange * pMemoryRanges)1186 VkResult lvp_InvalidateMappedMemoryRanges(
1187 VkDevice _device,
1188 uint32_t memoryRangeCount,
1189 const VkMappedMemoryRange* pMemoryRanges)
1190 {
1191 return VK_SUCCESS;
1192 }
1193
lvp_GetBufferMemoryRequirements(VkDevice device,VkBuffer _buffer,VkMemoryRequirements * pMemoryRequirements)1194 void lvp_GetBufferMemoryRequirements(
1195 VkDevice device,
1196 VkBuffer _buffer,
1197 VkMemoryRequirements* pMemoryRequirements)
1198 {
1199 LVP_FROM_HANDLE(lvp_buffer, buffer, _buffer);
1200
1201 /* The Vulkan spec (git aaed022) says:
1202 *
1203 * memoryTypeBits is a bitfield and contains one bit set for every
1204 * supported memory type for the resource. The bit `1<<i` is set if and
1205 * only if the memory type `i` in the VkPhysicalDeviceMemoryProperties
1206 * structure for the physical device is supported.
1207 *
1208 * We support exactly one memory type.
1209 */
1210 pMemoryRequirements->memoryTypeBits = 1;
1211
1212 pMemoryRequirements->size = buffer->total_size;
1213 pMemoryRequirements->alignment = 64;
1214 }
1215
lvp_GetBufferMemoryRequirements2(VkDevice device,const VkBufferMemoryRequirementsInfo2 * pInfo,VkMemoryRequirements2 * pMemoryRequirements)1216 void lvp_GetBufferMemoryRequirements2(
1217 VkDevice device,
1218 const VkBufferMemoryRequirementsInfo2 *pInfo,
1219 VkMemoryRequirements2 *pMemoryRequirements)
1220 {
1221 lvp_GetBufferMemoryRequirements(device, pInfo->buffer,
1222 &pMemoryRequirements->memoryRequirements);
1223 vk_foreach_struct(ext, pMemoryRequirements->pNext) {
1224 switch (ext->sType) {
1225 case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS: {
1226 VkMemoryDedicatedRequirements *req =
1227 (VkMemoryDedicatedRequirements *) ext;
1228 req->requiresDedicatedAllocation = false;
1229 req->prefersDedicatedAllocation = req->requiresDedicatedAllocation;
1230 break;
1231 }
1232 default:
1233 break;
1234 }
1235 }
1236 }
1237
lvp_GetImageMemoryRequirements(VkDevice device,VkImage _image,VkMemoryRequirements * pMemoryRequirements)1238 void lvp_GetImageMemoryRequirements(
1239 VkDevice device,
1240 VkImage _image,
1241 VkMemoryRequirements* pMemoryRequirements)
1242 {
1243 LVP_FROM_HANDLE(lvp_image, image, _image);
1244 pMemoryRequirements->memoryTypeBits = 1;
1245
1246 pMemoryRequirements->size = image->size;
1247 pMemoryRequirements->alignment = image->alignment;
1248 }
1249
lvp_GetImageMemoryRequirements2(VkDevice device,const VkImageMemoryRequirementsInfo2 * pInfo,VkMemoryRequirements2 * pMemoryRequirements)1250 void lvp_GetImageMemoryRequirements2(
1251 VkDevice device,
1252 const VkImageMemoryRequirementsInfo2 *pInfo,
1253 VkMemoryRequirements2 *pMemoryRequirements)
1254 {
1255 lvp_GetImageMemoryRequirements(device, pInfo->image,
1256 &pMemoryRequirements->memoryRequirements);
1257
1258 vk_foreach_struct(ext, pMemoryRequirements->pNext) {
1259 switch (ext->sType) {
1260 case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS: {
1261 VkMemoryDedicatedRequirements *req =
1262 (VkMemoryDedicatedRequirements *) ext;
1263 req->requiresDedicatedAllocation = false;
1264 req->prefersDedicatedAllocation = req->requiresDedicatedAllocation;
1265 break;
1266 }
1267 default:
1268 break;
1269 }
1270 }
1271 }
1272
lvp_GetImageSparseMemoryRequirements(VkDevice device,VkImage image,uint32_t * pSparseMemoryRequirementCount,VkSparseImageMemoryRequirements * pSparseMemoryRequirements)1273 void lvp_GetImageSparseMemoryRequirements(
1274 VkDevice device,
1275 VkImage image,
1276 uint32_t* pSparseMemoryRequirementCount,
1277 VkSparseImageMemoryRequirements* pSparseMemoryRequirements)
1278 {
1279 stub();
1280 }
1281
lvp_GetImageSparseMemoryRequirements2(VkDevice device,const VkImageSparseMemoryRequirementsInfo2 * pInfo,uint32_t * pSparseMemoryRequirementCount,VkSparseImageMemoryRequirements2 * pSparseMemoryRequirements)1282 void lvp_GetImageSparseMemoryRequirements2(
1283 VkDevice device,
1284 const VkImageSparseMemoryRequirementsInfo2* pInfo,
1285 uint32_t* pSparseMemoryRequirementCount,
1286 VkSparseImageMemoryRequirements2* pSparseMemoryRequirements)
1287 {
1288 stub();
1289 }
1290
lvp_GetDeviceMemoryCommitment(VkDevice device,VkDeviceMemory memory,VkDeviceSize * pCommittedMemoryInBytes)1291 void lvp_GetDeviceMemoryCommitment(
1292 VkDevice device,
1293 VkDeviceMemory memory,
1294 VkDeviceSize* pCommittedMemoryInBytes)
1295 {
1296 *pCommittedMemoryInBytes = 0;
1297 }
1298
lvp_BindBufferMemory2(VkDevice _device,uint32_t bindInfoCount,const VkBindBufferMemoryInfo * pBindInfos)1299 VkResult lvp_BindBufferMemory2(VkDevice _device,
1300 uint32_t bindInfoCount,
1301 const VkBindBufferMemoryInfo *pBindInfos)
1302 {
1303 LVP_FROM_HANDLE(lvp_device, device, _device);
1304 for (uint32_t i = 0; i < bindInfoCount; ++i) {
1305 LVP_FROM_HANDLE(lvp_device_memory, mem, pBindInfos[i].memory);
1306 LVP_FROM_HANDLE(lvp_buffer, buffer, pBindInfos[i].buffer);
1307
1308 device->pscreen->resource_bind_backing(device->pscreen,
1309 buffer->bo,
1310 mem->pmem,
1311 pBindInfos[i].memoryOffset);
1312 }
1313 return VK_SUCCESS;
1314 }
1315
lvp_BindBufferMemory(VkDevice _device,VkBuffer _buffer,VkDeviceMemory _memory,VkDeviceSize memoryOffset)1316 VkResult lvp_BindBufferMemory(
1317 VkDevice _device,
1318 VkBuffer _buffer,
1319 VkDeviceMemory _memory,
1320 VkDeviceSize memoryOffset)
1321 {
1322 LVP_FROM_HANDLE(lvp_device, device, _device);
1323 LVP_FROM_HANDLE(lvp_device_memory, mem, _memory);
1324 LVP_FROM_HANDLE(lvp_buffer, buffer, _buffer);
1325
1326 device->pscreen->resource_bind_backing(device->pscreen,
1327 buffer->bo,
1328 mem->pmem,
1329 memoryOffset);
1330 return VK_SUCCESS;
1331 }
1332
lvp_BindImageMemory2(VkDevice _device,uint32_t bindInfoCount,const VkBindImageMemoryInfo * pBindInfos)1333 VkResult lvp_BindImageMemory2(VkDevice _device,
1334 uint32_t bindInfoCount,
1335 const VkBindImageMemoryInfo *pBindInfos)
1336 {
1337 LVP_FROM_HANDLE(lvp_device, device, _device);
1338 for (uint32_t i = 0; i < bindInfoCount; ++i) {
1339 LVP_FROM_HANDLE(lvp_device_memory, mem, pBindInfos[i].memory);
1340 LVP_FROM_HANDLE(lvp_image, image, pBindInfos[i].image);
1341
1342 device->pscreen->resource_bind_backing(device->pscreen,
1343 image->bo,
1344 mem->pmem,
1345 pBindInfos[i].memoryOffset);
1346 }
1347 return VK_SUCCESS;
1348 }
1349
lvp_BindImageMemory(VkDevice _device,VkImage _image,VkDeviceMemory _memory,VkDeviceSize memoryOffset)1350 VkResult lvp_BindImageMemory(
1351 VkDevice _device,
1352 VkImage _image,
1353 VkDeviceMemory _memory,
1354 VkDeviceSize memoryOffset)
1355 {
1356 LVP_FROM_HANDLE(lvp_device, device, _device);
1357 LVP_FROM_HANDLE(lvp_device_memory, mem, _memory);
1358 LVP_FROM_HANDLE(lvp_image, image, _image);
1359
1360 device->pscreen->resource_bind_backing(device->pscreen,
1361 image->bo,
1362 mem->pmem,
1363 memoryOffset);
1364 return VK_SUCCESS;
1365 }
1366
lvp_QueueBindSparse(VkQueue queue,uint32_t bindInfoCount,const VkBindSparseInfo * pBindInfo,VkFence fence)1367 VkResult lvp_QueueBindSparse(
1368 VkQueue queue,
1369 uint32_t bindInfoCount,
1370 const VkBindSparseInfo* pBindInfo,
1371 VkFence fence)
1372 {
1373 stub_return(VK_ERROR_INCOMPATIBLE_DRIVER);
1374 }
1375
1376
lvp_CreateFence(VkDevice _device,const VkFenceCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkFence * pFence)1377 VkResult lvp_CreateFence(
1378 VkDevice _device,
1379 const VkFenceCreateInfo* pCreateInfo,
1380 const VkAllocationCallbacks* pAllocator,
1381 VkFence* pFence)
1382 {
1383 LVP_FROM_HANDLE(lvp_device, device, _device);
1384 struct lvp_fence *fence;
1385
1386 fence = vk_alloc2(&device->vk.alloc, pAllocator, sizeof(*fence), 8,
1387 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1388 if (fence == NULL)
1389 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1390
1391 vk_object_base_init(&device->vk, &fence->base, VK_OBJECT_TYPE_FENCE);
1392 fence->signaled = pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT;
1393
1394 fence->handle = NULL;
1395 *pFence = lvp_fence_to_handle(fence);
1396
1397 return VK_SUCCESS;
1398 }
1399
lvp_DestroyFence(VkDevice _device,VkFence _fence,const VkAllocationCallbacks * pAllocator)1400 void lvp_DestroyFence(
1401 VkDevice _device,
1402 VkFence _fence,
1403 const VkAllocationCallbacks* pAllocator)
1404 {
1405 LVP_FROM_HANDLE(lvp_device, device, _device);
1406 LVP_FROM_HANDLE(lvp_fence, fence, _fence);
1407
1408 if (!_fence)
1409 return;
1410 if (fence->handle)
1411 device->pscreen->fence_reference(device->pscreen, &fence->handle, NULL);
1412
1413 vk_object_base_finish(&fence->base);
1414 vk_free2(&device->vk.alloc, pAllocator, fence);
1415 }
1416
lvp_ResetFences(VkDevice _device,uint32_t fenceCount,const VkFence * pFences)1417 VkResult lvp_ResetFences(
1418 VkDevice _device,
1419 uint32_t fenceCount,
1420 const VkFence* pFences)
1421 {
1422 LVP_FROM_HANDLE(lvp_device, device, _device);
1423 for (unsigned i = 0; i < fenceCount; i++) {
1424 struct lvp_fence *fence = lvp_fence_from_handle(pFences[i]);
1425
1426 fence->signaled = false;
1427
1428 mtx_lock(&device->fence_lock);
1429 if (fence->handle)
1430 device->pscreen->fence_reference(device->pscreen, &fence->handle, NULL);
1431 mtx_unlock(&device->fence_lock);
1432 }
1433 return VK_SUCCESS;
1434 }
1435
lvp_GetFenceStatus(VkDevice _device,VkFence _fence)1436 VkResult lvp_GetFenceStatus(
1437 VkDevice _device,
1438 VkFence _fence)
1439 {
1440 LVP_FROM_HANDLE(lvp_device, device, _device);
1441 LVP_FROM_HANDLE(lvp_fence, fence, _fence);
1442
1443 if (fence->signaled)
1444 return VK_SUCCESS;
1445
1446 mtx_lock(&device->fence_lock);
1447
1448 if (!fence->handle) {
1449 mtx_unlock(&device->fence_lock);
1450 return VK_NOT_READY;
1451 }
1452
1453 bool signalled = device->pscreen->fence_finish(device->pscreen,
1454 NULL,
1455 fence->handle,
1456 0);
1457 mtx_unlock(&device->fence_lock);
1458 if (signalled)
1459 return VK_SUCCESS;
1460 else
1461 return VK_NOT_READY;
1462 }
1463
lvp_CreateFramebuffer(VkDevice _device,const VkFramebufferCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkFramebuffer * pFramebuffer)1464 VkResult lvp_CreateFramebuffer(
1465 VkDevice _device,
1466 const VkFramebufferCreateInfo* pCreateInfo,
1467 const VkAllocationCallbacks* pAllocator,
1468 VkFramebuffer* pFramebuffer)
1469 {
1470 LVP_FROM_HANDLE(lvp_device, device, _device);
1471 struct lvp_framebuffer *framebuffer;
1472
1473 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
1474
1475 size_t size = sizeof(*framebuffer) +
1476 sizeof(struct lvp_image_view *) * pCreateInfo->attachmentCount;
1477 framebuffer = vk_alloc2(&device->vk.alloc, pAllocator, size, 8,
1478 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1479 if (framebuffer == NULL)
1480 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1481
1482 vk_object_base_init(&device->vk, &framebuffer->base,
1483 VK_OBJECT_TYPE_FRAMEBUFFER);
1484 framebuffer->attachment_count = pCreateInfo->attachmentCount;
1485 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
1486 VkImageView _iview = pCreateInfo->pAttachments[i];
1487 framebuffer->attachments[i] = lvp_image_view_from_handle(_iview);
1488 }
1489
1490 framebuffer->width = pCreateInfo->width;
1491 framebuffer->height = pCreateInfo->height;
1492 framebuffer->layers = pCreateInfo->layers;
1493
1494 *pFramebuffer = lvp_framebuffer_to_handle(framebuffer);
1495
1496 return VK_SUCCESS;
1497 }
1498
lvp_DestroyFramebuffer(VkDevice _device,VkFramebuffer _fb,const VkAllocationCallbacks * pAllocator)1499 void lvp_DestroyFramebuffer(
1500 VkDevice _device,
1501 VkFramebuffer _fb,
1502 const VkAllocationCallbacks* pAllocator)
1503 {
1504 LVP_FROM_HANDLE(lvp_device, device, _device);
1505 LVP_FROM_HANDLE(lvp_framebuffer, fb, _fb);
1506
1507 if (!fb)
1508 return;
1509 vk_object_base_finish(&fb->base);
1510 vk_free2(&device->vk.alloc, pAllocator, fb);
1511 }
1512
lvp_WaitForFences(VkDevice _device,uint32_t fenceCount,const VkFence * pFences,VkBool32 waitAll,uint64_t timeout)1513 VkResult lvp_WaitForFences(
1514 VkDevice _device,
1515 uint32_t fenceCount,
1516 const VkFence* pFences,
1517 VkBool32 waitAll,
1518 uint64_t timeout)
1519 {
1520 LVP_FROM_HANDLE(lvp_device, device, _device);
1521
1522 VkResult qret = queue_wait_idle(&device->queue, timeout);
1523 bool timeout_status = false;
1524 if (qret == VK_TIMEOUT)
1525 return VK_TIMEOUT;
1526
1527 mtx_lock(&device->fence_lock);
1528 for (unsigned i = 0; i < fenceCount; i++) {
1529 struct lvp_fence *fence = lvp_fence_from_handle(pFences[i]);
1530
1531 if (fence->signaled)
1532 continue;
1533 if (!fence->handle) {
1534 timeout_status |= true;
1535 continue;
1536 }
1537 bool ret = device->pscreen->fence_finish(device->pscreen,
1538 NULL,
1539 fence->handle,
1540 timeout);
1541 if (ret && !waitAll) {
1542 timeout_status = false;
1543 break;
1544 }
1545
1546 if (!ret)
1547 timeout_status |= true;
1548 }
1549 mtx_unlock(&device->fence_lock);
1550 return timeout_status ? VK_TIMEOUT : VK_SUCCESS;
1551 }
1552
lvp_CreateSemaphore(VkDevice _device,const VkSemaphoreCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSemaphore * pSemaphore)1553 VkResult lvp_CreateSemaphore(
1554 VkDevice _device,
1555 const VkSemaphoreCreateInfo* pCreateInfo,
1556 const VkAllocationCallbacks* pAllocator,
1557 VkSemaphore* pSemaphore)
1558 {
1559 LVP_FROM_HANDLE(lvp_device, device, _device);
1560
1561 struct lvp_semaphore *sema = vk_alloc2(&device->vk.alloc, pAllocator,
1562 sizeof(*sema), 8,
1563 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1564
1565 if (!sema)
1566 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1567 vk_object_base_init(&device->vk, &sema->base,
1568 VK_OBJECT_TYPE_SEMAPHORE);
1569 *pSemaphore = lvp_semaphore_to_handle(sema);
1570
1571 return VK_SUCCESS;
1572 }
1573
lvp_DestroySemaphore(VkDevice _device,VkSemaphore _semaphore,const VkAllocationCallbacks * pAllocator)1574 void lvp_DestroySemaphore(
1575 VkDevice _device,
1576 VkSemaphore _semaphore,
1577 const VkAllocationCallbacks* pAllocator)
1578 {
1579 LVP_FROM_HANDLE(lvp_device, device, _device);
1580 LVP_FROM_HANDLE(lvp_semaphore, semaphore, _semaphore);
1581
1582 if (!_semaphore)
1583 return;
1584 vk_object_base_finish(&semaphore->base);
1585 vk_free2(&device->vk.alloc, pAllocator, semaphore);
1586 }
1587
lvp_CreateEvent(VkDevice _device,const VkEventCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkEvent * pEvent)1588 VkResult lvp_CreateEvent(
1589 VkDevice _device,
1590 const VkEventCreateInfo* pCreateInfo,
1591 const VkAllocationCallbacks* pAllocator,
1592 VkEvent* pEvent)
1593 {
1594 LVP_FROM_HANDLE(lvp_device, device, _device);
1595 struct lvp_event *event = vk_alloc2(&device->vk.alloc, pAllocator,
1596 sizeof(*event), 8,
1597 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1598
1599 if (!event)
1600 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1601
1602 vk_object_base_init(&device->vk, &event->base, VK_OBJECT_TYPE_EVENT);
1603 *pEvent = lvp_event_to_handle(event);
1604
1605 return VK_SUCCESS;
1606 }
1607
lvp_DestroyEvent(VkDevice _device,VkEvent _event,const VkAllocationCallbacks * pAllocator)1608 void lvp_DestroyEvent(
1609 VkDevice _device,
1610 VkEvent _event,
1611 const VkAllocationCallbacks* pAllocator)
1612 {
1613 LVP_FROM_HANDLE(lvp_device, device, _device);
1614 LVP_FROM_HANDLE(lvp_event, event, _event);
1615
1616 if (!event)
1617 return;
1618
1619 vk_object_base_finish(&event->base);
1620 vk_free2(&device->vk.alloc, pAllocator, event);
1621 }
1622
lvp_GetEventStatus(VkDevice _device,VkEvent _event)1623 VkResult lvp_GetEventStatus(
1624 VkDevice _device,
1625 VkEvent _event)
1626 {
1627 LVP_FROM_HANDLE(lvp_event, event, _event);
1628 if (event->event_storage == 1)
1629 return VK_EVENT_SET;
1630 return VK_EVENT_RESET;
1631 }
1632
lvp_SetEvent(VkDevice _device,VkEvent _event)1633 VkResult lvp_SetEvent(
1634 VkDevice _device,
1635 VkEvent _event)
1636 {
1637 LVP_FROM_HANDLE(lvp_event, event, _event);
1638 event->event_storage = 1;
1639
1640 return VK_SUCCESS;
1641 }
1642
lvp_ResetEvent(VkDevice _device,VkEvent _event)1643 VkResult lvp_ResetEvent(
1644 VkDevice _device,
1645 VkEvent _event)
1646 {
1647 LVP_FROM_HANDLE(lvp_event, event, _event);
1648 event->event_storage = 0;
1649
1650 return VK_SUCCESS;
1651 }
1652
lvp_CreateSampler(VkDevice _device,const VkSamplerCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSampler * pSampler)1653 VkResult lvp_CreateSampler(
1654 VkDevice _device,
1655 const VkSamplerCreateInfo* pCreateInfo,
1656 const VkAllocationCallbacks* pAllocator,
1657 VkSampler* pSampler)
1658 {
1659 LVP_FROM_HANDLE(lvp_device, device, _device);
1660 struct lvp_sampler *sampler;
1661
1662 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO);
1663
1664 sampler = vk_alloc2(&device->vk.alloc, pAllocator, sizeof(*sampler), 8,
1665 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1666 if (!sampler)
1667 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1668
1669 vk_object_base_init(&device->vk, &sampler->base,
1670 VK_OBJECT_TYPE_SAMPLER);
1671 sampler->create_info = *pCreateInfo;
1672 *pSampler = lvp_sampler_to_handle(sampler);
1673
1674 return VK_SUCCESS;
1675 }
1676
lvp_DestroySampler(VkDevice _device,VkSampler _sampler,const VkAllocationCallbacks * pAllocator)1677 void lvp_DestroySampler(
1678 VkDevice _device,
1679 VkSampler _sampler,
1680 const VkAllocationCallbacks* pAllocator)
1681 {
1682 LVP_FROM_HANDLE(lvp_device, device, _device);
1683 LVP_FROM_HANDLE(lvp_sampler, sampler, _sampler);
1684
1685 if (!_sampler)
1686 return;
1687 vk_object_base_finish(&sampler->base);
1688 vk_free2(&device->vk.alloc, pAllocator, sampler);
1689 }
1690
lvp_CreatePrivateDataSlotEXT(VkDevice _device,const VkPrivateDataSlotCreateInfoEXT * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkPrivateDataSlotEXT * pPrivateDataSlot)1691 VkResult lvp_CreatePrivateDataSlotEXT(
1692 VkDevice _device,
1693 const VkPrivateDataSlotCreateInfoEXT* pCreateInfo,
1694 const VkAllocationCallbacks* pAllocator,
1695 VkPrivateDataSlotEXT* pPrivateDataSlot)
1696 {
1697 LVP_FROM_HANDLE(lvp_device, device, _device);
1698 return vk_private_data_slot_create(&device->vk, pCreateInfo, pAllocator,
1699 pPrivateDataSlot);
1700 }
1701
lvp_DestroyPrivateDataSlotEXT(VkDevice _device,VkPrivateDataSlotEXT privateDataSlot,const VkAllocationCallbacks * pAllocator)1702 void lvp_DestroyPrivateDataSlotEXT(
1703 VkDevice _device,
1704 VkPrivateDataSlotEXT privateDataSlot,
1705 const VkAllocationCallbacks* pAllocator)
1706 {
1707 LVP_FROM_HANDLE(lvp_device, device, _device);
1708 vk_private_data_slot_destroy(&device->vk, privateDataSlot, pAllocator);
1709 }
1710
lvp_SetPrivateDataEXT(VkDevice _device,VkObjectType objectType,uint64_t objectHandle,VkPrivateDataSlotEXT privateDataSlot,uint64_t data)1711 VkResult lvp_SetPrivateDataEXT(
1712 VkDevice _device,
1713 VkObjectType objectType,
1714 uint64_t objectHandle,
1715 VkPrivateDataSlotEXT privateDataSlot,
1716 uint64_t data)
1717 {
1718 LVP_FROM_HANDLE(lvp_device, device, _device);
1719 return vk_object_base_set_private_data(&device->vk, objectType,
1720 objectHandle, privateDataSlot,
1721 data);
1722 }
1723
lvp_GetPrivateDataEXT(VkDevice _device,VkObjectType objectType,uint64_t objectHandle,VkPrivateDataSlotEXT privateDataSlot,uint64_t * pData)1724 void lvp_GetPrivateDataEXT(
1725 VkDevice _device,
1726 VkObjectType objectType,
1727 uint64_t objectHandle,
1728 VkPrivateDataSlotEXT privateDataSlot,
1729 uint64_t* pData)
1730 {
1731 LVP_FROM_HANDLE(lvp_device, device, _device);
1732 vk_object_base_get_private_data(&device->vk, objectType, objectHandle,
1733 privateDataSlot, pData);
1734 }
1735