• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**************************************************************************
2  *
3  * Copyright (C) 2022 Collabora Ltd
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included
13  * in all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  *
23  **************************************************************************/
24 
25 #include "vkr_allocator.h"
26 
27 #include <errno.h>
28 #include <stdlib.h>
29 #include <string.h>
30 #include <unistd.h>
31 
32 #include "util/list.h"
33 #include "venus-protocol/vulkan.h"
34 #include "virgl_resource.h"
35 
36 /* Assume that we will deal with at most 4 devices.
37  *  This is to avoid per-device resource dynamic allocations.
38  *  For now, `vkr_allocator` is designed for Mesa CI use which
39  *  uses lavapipe as the only Vulkan driver, but allow logic to
40  *  assume more for some leeway and felxibilty; especially if
41  *  this allocator is expanded to use whatever devices available.
42  */
43 #define VKR_ALLOCATOR_MAX_DEVICE_COUNT 4
44 
45 struct vkr_opaque_fd_mem_info {
46    VkDevice device;
47    VkDeviceMemory device_memory;
48    uint32_t res_id;
49    uint64_t size;
50 
51    struct list_head head;
52 };
53 
54 static struct vkr_allocator {
55    VkInstance instance;
56 
57    VkPhysicalDevice physical_devices[VKR_ALLOCATOR_MAX_DEVICE_COUNT];
58    VkDevice devices[VKR_ALLOCATOR_MAX_DEVICE_COUNT];
59    uint8_t device_uuids[VKR_ALLOCATOR_MAX_DEVICE_COUNT][VK_UUID_SIZE];
60    uint32_t device_count;
61 
62    struct list_head memories;
63 } vkr_allocator;
64 
65 static bool vkr_allocator_initialized;
66 
67 static void
vkr_allocator_free_memory(struct vkr_opaque_fd_mem_info * mem_info)68 vkr_allocator_free_memory(struct vkr_opaque_fd_mem_info *mem_info)
69 {
70    vkFreeMemory(mem_info->device, mem_info->device_memory, NULL);
71    list_del(&mem_info->head);
72    free(mem_info);
73 }
74 
75 static VkDevice
vkr_allocator_get_device(struct virgl_resource * res)76 vkr_allocator_get_device(struct virgl_resource *res)
77 {
78    for (uint32_t i = 0; i < vkr_allocator.device_count; ++i) {
79       if (memcmp(vkr_allocator.device_uuids[i], res->opaque_fd_metadata.device_uuid,
80                  VK_UUID_SIZE) == 0)
81          return vkr_allocator.devices[i];
82    }
83 
84    return VK_NULL_HANDLE;
85 }
86 
87 static struct vkr_opaque_fd_mem_info *
vkr_allocator_allocate_memory(struct virgl_resource * res)88 vkr_allocator_allocate_memory(struct virgl_resource *res)
89 {
90    VkDevice dev_handle = vkr_allocator_get_device(res);
91    if (dev_handle == VK_NULL_HANDLE)
92       return NULL;
93 
94    int fd = -1;
95    if (virgl_resource_export_fd(res, &fd) != VIRGL_RESOURCE_FD_OPAQUE) {
96       if (fd >= 0)
97          close(fd);
98       return NULL;
99    }
100 
101    VkMemoryAllocateInfo alloc_info = {
102       .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
103       .pNext =
104          &(VkImportMemoryFdInfoKHR){ .sType = VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR,
105                                      .handleType =
106                                         VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT,
107                                      .fd = fd },
108       .allocationSize = res->opaque_fd_metadata.allocation_size,
109       .memoryTypeIndex = res->opaque_fd_metadata.memory_type_index
110    };
111 
112    VkDeviceMemory mem_handle;
113    if (vkAllocateMemory(dev_handle, &alloc_info, NULL, &mem_handle) != VK_SUCCESS) {
114       close(fd);
115       return NULL;
116    }
117 
118    struct vkr_opaque_fd_mem_info *mem_info = calloc(1, sizeof(*mem_info));
119    if (!mem_info) {
120       vkFreeMemory(dev_handle, mem_handle, NULL);
121       return NULL;
122    }
123 
124    mem_info->device = dev_handle;
125    mem_info->device_memory = mem_handle;
126    mem_info->res_id = res->res_id;
127    mem_info->size = res->opaque_fd_metadata.allocation_size;
128 
129    list_addtail(&mem_info->head, &vkr_allocator.memories);
130 
131    return mem_info;
132 }
133 
134 void
vkr_allocator_fini(void)135 vkr_allocator_fini(void)
136 {
137    if (!vkr_allocator_initialized)
138       return;
139 
140    struct vkr_opaque_fd_mem_info *mem_info, *mem_info_temp;
141    LIST_FOR_EACH_ENTRY_SAFE (mem_info, mem_info_temp, &vkr_allocator.memories, head)
142       vkr_allocator_free_memory(mem_info);
143 
144    for (uint32_t i = 0; i < vkr_allocator.device_count; ++i) {
145       vkDestroyDevice(vkr_allocator.devices[i], NULL);
146    }
147    vkDestroyInstance(vkr_allocator.instance, NULL);
148 
149    memset(&vkr_allocator, 0, sizeof(vkr_allocator));
150 
151    vkr_allocator_initialized = false;
152 }
153 
154 int
vkr_allocator_init(void)155 vkr_allocator_init(void)
156 {
157    VkResult res;
158 
159    VkApplicationInfo app_info = {
160       .sType = VK_STRUCTURE_TYPE_APPLICATION_INFO,
161       .apiVersion = VK_API_VERSION_1_1,
162    };
163 
164    VkInstanceCreateInfo inst_info = {
165       .sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO,
166       .pApplicationInfo = &app_info,
167    };
168 
169    res = vkCreateInstance(&inst_info, NULL, &vkr_allocator.instance);
170    if (res != VK_SUCCESS)
171       goto fail;
172 
173    vkr_allocator.device_count = VKR_ALLOCATOR_MAX_DEVICE_COUNT;
174 
175    res = vkEnumeratePhysicalDevices(vkr_allocator.instance, &vkr_allocator.device_count,
176                                     vkr_allocator.physical_devices);
177    if (res != VK_SUCCESS && res != VK_INCOMPLETE)
178       goto fail;
179 
180    for (uint32_t i = 0; i < vkr_allocator.device_count; ++i) {
181       VkPhysicalDevice physical_dev_handle = vkr_allocator.physical_devices[i];
182 
183       VkPhysicalDeviceIDProperties id_props = {
184          .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES
185       };
186       VkPhysicalDeviceProperties2 props2 = {
187          .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2, .pNext = &id_props
188       };
189       vkGetPhysicalDeviceProperties2(physical_dev_handle, &props2);
190 
191       memcpy(vkr_allocator.device_uuids[i], id_props.deviceUUID, VK_UUID_SIZE);
192 
193       float priority = 1.0;
194       VkDeviceQueueCreateInfo queue_info = {
195          .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO,
196          /* Use any queue since we dont really need it.
197           * We are guaranteed at least one by the spec */
198          .queueFamilyIndex = 0,
199          .queueCount = 1,
200          .pQueuePriorities = &priority
201       };
202 
203       VkDeviceCreateInfo dev_info = {
204          .sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO,
205          .queueCreateInfoCount = 1,
206          .pQueueCreateInfos = &queue_info,
207       };
208 
209       res =
210          vkCreateDevice(physical_dev_handle, &dev_info, NULL, &vkr_allocator.devices[i]);
211       if (res != VK_SUCCESS)
212          goto fail;
213    }
214 
215    list_inithead(&vkr_allocator.memories);
216 
217    return 0;
218 
219 fail:
220    for (uint32_t i = 0; i < vkr_allocator.device_count; ++i) {
221       vkDestroyDevice(vkr_allocator.devices[i], NULL);
222    }
223    vkDestroyInstance(vkr_allocator.instance, NULL);
224 
225    memset(&vkr_allocator, 0, sizeof(vkr_allocator));
226 
227    return -1;
228 }
229 
230 int
vkr_allocator_resource_map(struct virgl_resource * res,void ** map,uint64_t * out_size)231 vkr_allocator_resource_map(struct virgl_resource *res, void **map, uint64_t *out_size)
232 {
233    if (!vkr_allocator_initialized) {
234       if (vkr_allocator_init())
235          return -EINVAL;
236       vkr_allocator_initialized = true;
237    }
238 
239    assert(vkr_allocator_initialized);
240 
241    struct vkr_opaque_fd_mem_info *mem_info = vkr_allocator_allocate_memory(res);
242    if (!mem_info)
243       return -EINVAL;
244 
245    void *ptr;
246    if (vkMapMemory(mem_info->device, mem_info->device_memory, 0, mem_info->size, 0,
247                    &ptr) != VK_SUCCESS) {
248       vkr_allocator_free_memory(mem_info);
249       return -EINVAL;
250    }
251 
252    *map = ptr;
253    *out_size = mem_info->size;
254 
255    return 0;
256 }
257 
258 static struct vkr_opaque_fd_mem_info *
vkr_allocator_get_mem_info(struct virgl_resource * res)259 vkr_allocator_get_mem_info(struct virgl_resource *res)
260 {
261    struct vkr_opaque_fd_mem_info *mem_info, *mem_info_temp;
262    LIST_FOR_EACH_ENTRY_SAFE (mem_info, mem_info_temp, &vkr_allocator.memories, head)
263       if (mem_info->res_id == res->res_id)
264          return mem_info;
265 
266    return NULL;
267 }
268 
269 int
vkr_allocator_resource_unmap(struct virgl_resource * res)270 vkr_allocator_resource_unmap(struct virgl_resource *res)
271 {
272    assert(vkr_allocator_initialized);
273 
274    struct vkr_opaque_fd_mem_info *mem_info = vkr_allocator_get_mem_info(res);
275    if (!mem_info)
276       return -EINVAL;
277 
278    vkUnmapMemory(mem_info->device, mem_info->device_memory);
279 
280    vkr_allocator_free_memory(mem_info);
281 
282    return 0;
283 }
284