1 /*
2 * Copyright © 2024 Collabora Ltd. and Red Hat Inc.
3 * SPDX-License-Identifier: MIT
4 */
5
6 #include "nvkmd.h"
7 #include "nouveau/nvkmd_nouveau.h"
8
9 #include <inttypes.h>
10
11 void
nvkmd_dev_track_mem(struct nvkmd_dev * dev,struct nvkmd_mem * mem)12 nvkmd_dev_track_mem(struct nvkmd_dev *dev,
13 struct nvkmd_mem *mem)
14 {
15 if (mem->link.next == NULL) {
16 simple_mtx_lock(&dev->mems_mutex);
17 list_addtail(&mem->link, &dev->mems);
18 simple_mtx_unlock(&dev->mems_mutex);
19 }
20 }
21
22 static void
nvkmd_dev_untrack_mem(struct nvkmd_dev * dev,struct nvkmd_mem * mem)23 nvkmd_dev_untrack_mem(struct nvkmd_dev *dev,
24 struct nvkmd_mem *mem)
25 {
26 if (mem->link.next != NULL) {
27 simple_mtx_lock(&dev->mems_mutex);
28 list_del(&mem->link);
29 simple_mtx_unlock(&dev->mems_mutex);
30 }
31 }
32
33 static struct nvkmd_mem *
nvkmd_dev_lookup_mem_by_va_locked(struct nvkmd_dev * dev,uint64_t addr,uint64_t * offset_out)34 nvkmd_dev_lookup_mem_by_va_locked(struct nvkmd_dev *dev,
35 uint64_t addr,
36 uint64_t *offset_out)
37 {
38 list_for_each_entry(struct nvkmd_mem, mem, &dev->mems, link) {
39 if (mem->va == NULL || addr < mem->va->addr)
40 continue;
41
42 const uint64_t offset = addr - mem->va->addr;
43 if (offset < mem->va->size_B) {
44 if (offset_out != NULL)
45 *offset_out = offset;
46 return nvkmd_mem_ref(mem);
47 }
48 }
49
50 return NULL;
51 }
52
53 struct nvkmd_mem *
nvkmd_dev_lookup_mem_by_va(struct nvkmd_dev * dev,uint64_t addr,uint64_t * offset_out)54 nvkmd_dev_lookup_mem_by_va(struct nvkmd_dev *dev,
55 uint64_t addr,
56 uint64_t *offset_out)
57 {
58 simple_mtx_lock(&dev->mems_mutex);
59 struct nvkmd_mem *mem =
60 nvkmd_dev_lookup_mem_by_va_locked(dev, addr, offset_out);
61 simple_mtx_unlock(&dev->mems_mutex);
62 return mem;
63 }
64
65 void
nvkmd_mem_init(struct nvkmd_dev * dev,struct nvkmd_mem * mem,const struct nvkmd_mem_ops * ops,enum nvkmd_mem_flags flags,uint64_t size_B,uint32_t bind_align_B)66 nvkmd_mem_init(struct nvkmd_dev *dev,
67 struct nvkmd_mem *mem,
68 const struct nvkmd_mem_ops *ops,
69 enum nvkmd_mem_flags flags,
70 uint64_t size_B,
71 uint32_t bind_align_B)
72 {
73 *mem = (struct nvkmd_mem) {
74 .ops = ops,
75 .dev = dev,
76 .refcnt = 1,
77 .flags = flags,
78 .bind_align_B = bind_align_B,
79 .size_B = size_B,
80 };
81
82 simple_mtx_init(&mem->map_mutex, mtx_plain);
83 }
84
85 VkResult
nvkmd_try_create_pdev_for_drm(struct _drmDevice * drm_device,struct vk_object_base * log_obj,enum nvk_debug debug_flags,struct nvkmd_pdev ** pdev_out)86 nvkmd_try_create_pdev_for_drm(struct _drmDevice *drm_device,
87 struct vk_object_base *log_obj,
88 enum nvk_debug debug_flags,
89 struct nvkmd_pdev **pdev_out)
90 {
91 return nvkmd_nouveau_try_create_pdev(drm_device, log_obj,
92 debug_flags, pdev_out);
93 }
94
95 VkResult
nvkmd_dev_alloc_mapped_mem(struct nvkmd_dev * dev,struct vk_object_base * log_obj,uint64_t size_B,uint64_t align_B,enum nvkmd_mem_flags flags,enum nvkmd_mem_map_flags map_flags,struct nvkmd_mem ** mem_out)96 nvkmd_dev_alloc_mapped_mem(struct nvkmd_dev *dev,
97 struct vk_object_base *log_obj,
98 uint64_t size_B, uint64_t align_B,
99 enum nvkmd_mem_flags flags,
100 enum nvkmd_mem_map_flags map_flags,
101 struct nvkmd_mem **mem_out)
102 {
103 struct nvkmd_mem *mem;
104 VkResult result;
105
106 result = nvkmd_dev_alloc_mem(dev, log_obj, size_B, align_B,
107 flags | NVKMD_MEM_CAN_MAP, &mem);
108 if (result != VK_SUCCESS)
109 return result;
110
111 assert(!(map_flags & NVKMD_MEM_MAP_FIXED));
112 result = nvkmd_mem_map(mem, log_obj, map_flags, NULL, NULL);
113 if (result != VK_SUCCESS) {
114 mem->ops->free(mem);
115 return result;
116 }
117
118 *mem_out = mem;
119
120 return VK_SUCCESS;
121 }
122
123 VkResult MUST_CHECK
nvkmd_dev_alloc_va(struct nvkmd_dev * dev,struct vk_object_base * log_obj,enum nvkmd_va_flags flags,uint8_t pte_kind,uint64_t size_B,uint64_t align_B,uint64_t fixed_addr,struct nvkmd_va ** va_out)124 nvkmd_dev_alloc_va(struct nvkmd_dev *dev,
125 struct vk_object_base *log_obj,
126 enum nvkmd_va_flags flags, uint8_t pte_kind,
127 uint64_t size_B, uint64_t align_B,
128 uint64_t fixed_addr, struct nvkmd_va **va_out)
129 {
130 VkResult result = dev->ops->alloc_va(dev, log_obj, flags, pte_kind,
131 size_B, align_B, fixed_addr, va_out);
132 if (result != VK_SUCCESS)
133 return result;
134
135 if (unlikely(dev->pdev->debug_flags & NVK_DEBUG_VM)) {
136 const char *sparse = (flags & NVKMD_VA_SPARSE) ? " sparse" : "";
137 fprintf(stderr, "alloc va [0x%" PRIx64 ", 0x%" PRIx64 ")%s\n",
138 (*va_out)->addr, (*va_out)->addr + size_B, sparse);
139 }
140
141 return VK_SUCCESS;
142 }
143
144 void
nvkmd_va_free(struct nvkmd_va * va)145 nvkmd_va_free(struct nvkmd_va *va)
146 {
147 if (unlikely(va->dev->pdev->debug_flags & NVK_DEBUG_VM)) {
148 const char *sparse = (va->flags & NVKMD_VA_SPARSE) ? " sparse" : "";
149 fprintf(stderr, "free va [0x%" PRIx64 ", 0x%" PRIx64 ")%s\n",
150 va->addr, va->addr + va->size_B, sparse);
151 }
152
153 va->ops->free(va);
154 }
155
156 static inline void
log_va_bind_mem(struct nvkmd_va * va,uint64_t va_offset_B,struct nvkmd_mem * mem,uint64_t mem_offset_B,uint64_t range_B)157 log_va_bind_mem(struct nvkmd_va *va,
158 uint64_t va_offset_B,
159 struct nvkmd_mem *mem,
160 uint64_t mem_offset_B,
161 uint64_t range_B)
162 {
163 fprintf(stderr, "bind vma mem<0x%" PRIx32 ">"
164 "[0x%" PRIx64 ", 0x%" PRIx64 ") to "
165 "[0x%" PRIx64 ", 0x%" PRIx64 ")\n",
166 mem->ops->log_handle(mem),
167 mem_offset_B, mem_offset_B + range_B,
168 va->addr, va->addr + range_B);
169 }
170
171 static inline void
log_va_unbind(struct nvkmd_va * va,uint64_t va_offset_B,uint64_t range_B)172 log_va_unbind(struct nvkmd_va *va,
173 uint64_t va_offset_B,
174 uint64_t range_B)
175 {
176 fprintf(stderr, "unbind vma [0x%" PRIx64 ", 0x%" PRIx64 ")\n",
177 va->addr, va->addr + range_B);
178 }
179
180 VkResult MUST_CHECK
nvkmd_va_bind_mem(struct nvkmd_va * va,struct vk_object_base * log_obj,uint64_t va_offset_B,struct nvkmd_mem * mem,uint64_t mem_offset_B,uint64_t range_B)181 nvkmd_va_bind_mem(struct nvkmd_va *va,
182 struct vk_object_base *log_obj,
183 uint64_t va_offset_B,
184 struct nvkmd_mem *mem,
185 uint64_t mem_offset_B,
186 uint64_t range_B)
187 {
188 assert(va_offset_B <= va->size_B);
189 assert(va_offset_B + range_B <= va->size_B);
190 assert(mem_offset_B <= mem->size_B);
191 assert(mem_offset_B + range_B <= mem->size_B);
192
193 assert(va->addr % mem->bind_align_B == 0);
194 assert(va_offset_B % mem->bind_align_B == 0);
195 assert(mem_offset_B % mem->bind_align_B == 0);
196 assert(range_B % mem->bind_align_B == 0);
197
198 if (unlikely(va->dev->pdev->debug_flags & NVK_DEBUG_VM))
199 log_va_bind_mem(va, va_offset_B, mem, mem_offset_B, range_B);
200
201 return va->ops->bind_mem(va, log_obj, va_offset_B,
202 mem, mem_offset_B, range_B);
203 }
204
205 VkResult MUST_CHECK
nvkmd_va_unbind(struct nvkmd_va * va,struct vk_object_base * log_obj,uint64_t va_offset_B,uint64_t range_B)206 nvkmd_va_unbind(struct nvkmd_va *va,
207 struct vk_object_base *log_obj,
208 uint64_t va_offset_B,
209 uint64_t range_B)
210 {
211 assert(va_offset_B <= va->size_B);
212 assert(va_offset_B + range_B <= va->size_B);
213
214 if (unlikely(va->dev->pdev->debug_flags & NVK_DEBUG_VM))
215 log_va_unbind(va, va_offset_B, range_B);
216
217 return va->ops->unbind(va, log_obj, va_offset_B, range_B);
218 }
219
220 VkResult MUST_CHECK
nvkmd_ctx_bind(struct nvkmd_ctx * ctx,struct vk_object_base * log_obj,uint32_t bind_count,const struct nvkmd_ctx_bind * binds)221 nvkmd_ctx_bind(struct nvkmd_ctx *ctx,
222 struct vk_object_base *log_obj,
223 uint32_t bind_count,
224 const struct nvkmd_ctx_bind *binds)
225 {
226 for (uint32_t i = 0; i < bind_count; i++) {
227 assert(binds[i].va_offset_B <= binds[i].va->size_B);
228 assert(binds[i].va_offset_B + binds[i].range_B <= binds[i].va->size_B);
229 if (binds[i].op == NVKMD_BIND_OP_BIND) {
230 assert(binds[i].mem_offset_B % binds[i].mem->bind_align_B == 0);
231 assert(binds[i].mem_offset_B <= binds[i].mem->size_B);
232 assert(binds[i].mem_offset_B + binds[i].range_B <=
233 binds[i].mem->size_B);
234
235 assert(binds[i].va->addr % binds[i].mem->bind_align_B == 0);
236 assert(binds[i].va_offset_B % binds[i].mem->bind_align_B == 0);
237 assert(binds[i].mem_offset_B % binds[i].mem->bind_align_B == 0);
238 assert(binds[i].range_B % binds[i].mem->bind_align_B == 0);
239 } else {
240 assert(binds[i].mem == NULL);
241 }
242 }
243
244 if (unlikely(ctx->dev->pdev->debug_flags & NVK_DEBUG_VM)) {
245 for (uint32_t i = 0; i < bind_count; i++) {
246 if (binds[i].op == NVKMD_BIND_OP_BIND) {
247 log_va_bind_mem(binds[i].va, binds[i].va_offset_B,
248 binds[i].mem, binds[i].mem_offset_B,
249 binds[i].range_B);
250 } else {
251 log_va_unbind(binds[i].va, binds[i].va_offset_B, binds[i].range_B);
252 }
253 }
254 }
255
256 return ctx->ops->bind(ctx, log_obj, bind_count, binds);
257 }
258
259 void
nvkmd_mem_unref(struct nvkmd_mem * mem)260 nvkmd_mem_unref(struct nvkmd_mem *mem)
261 {
262 assert(p_atomic_read(&mem->refcnt) > 0);
263 if (!p_atomic_dec_zero(&mem->refcnt))
264 return;
265
266 if (mem->client_map != NULL)
267 mem->ops->unmap(mem, NVKMD_MEM_MAP_CLIENT, mem->client_map);
268
269 if (mem->map != NULL)
270 mem->ops->unmap(mem, 0, mem->map);
271
272 nvkmd_dev_untrack_mem(mem->dev, mem);
273
274 mem->ops->free(mem);
275 }
276
277 VkResult
nvkmd_mem_map(struct nvkmd_mem * mem,struct vk_object_base * log_obj,enum nvkmd_mem_map_flags flags,void * fixed_addr,void ** map_out)278 nvkmd_mem_map(struct nvkmd_mem *mem, struct vk_object_base *log_obj,
279 enum nvkmd_mem_map_flags flags, void *fixed_addr,
280 void **map_out)
281 {
282 void *map = NULL;
283
284 assert((fixed_addr == NULL) == !(flags & NVKMD_MEM_MAP_FIXED));
285
286 if (flags & NVKMD_MEM_MAP_CLIENT) {
287 assert(mem->client_map == NULL);
288
289 VkResult result = mem->ops->map(mem, log_obj, flags, fixed_addr, &map);
290 if (result != VK_SUCCESS)
291 return result;
292
293 mem->client_map = map;
294 } else {
295 assert(!(flags & NVKMD_MEM_MAP_FIXED));
296
297 simple_mtx_lock(&mem->map_mutex);
298
299 assert((mem->map_cnt == 0) == (mem->map == NULL));
300 mem->map_cnt++;
301
302 VkResult result = VK_SUCCESS;
303 if (mem->map == NULL) {
304 /* We always map read/write for internal maps since they're reference
305 * counted and otherwise we don't have a good way to add permissions
306 * after the fact.
307 */
308 result = mem->ops->map(mem, log_obj, NVKMD_MEM_MAP_RDWR, NULL, &map);
309 if (result == VK_SUCCESS)
310 mem->map = map;
311 } else {
312 map = mem->map;
313 }
314
315 simple_mtx_unlock(&mem->map_mutex);
316
317 if (result != VK_SUCCESS)
318 return result;
319 }
320
321 if (map_out != NULL)
322 *map_out = map;
323
324 return VK_SUCCESS;
325 }
326
327 void
nvkmd_mem_unmap(struct nvkmd_mem * mem,enum nvkmd_mem_map_flags flags)328 nvkmd_mem_unmap(struct nvkmd_mem *mem, enum nvkmd_mem_map_flags flags)
329 {
330 if (flags & NVKMD_MEM_MAP_CLIENT) {
331 assert(mem->client_map != NULL);
332 mem->ops->unmap(mem, flags, mem->client_map);
333 mem->client_map = NULL;
334 } else {
335 assert(mem->map != NULL);
336 simple_mtx_lock(&mem->map_mutex);
337 if (--mem->map_cnt == 0) {
338 mem->ops->unmap(mem, flags, mem->map);
339 mem->map = NULL;
340 }
341 simple_mtx_unlock(&mem->map_mutex);
342 }
343 }
344