1 /*
2 * Copyright © 2023 Collabora, Ltd.
3 *
4 * SPDX-License-Identifier: MIT
5 */
6
7 #include <string.h>
8 #include <xf86drm.h>
9
10 #include "util/macros.h"
11 #include "pan_kmod.h"
12
13 extern const struct pan_kmod_ops panfrost_kmod_ops;
14
15 static const struct {
16 const char *name;
17 const struct pan_kmod_ops *ops;
18 } drivers[] = {
19 {
20 "panfrost",
21 &panfrost_kmod_ops,
22 },
23 };
24
25 static void *
default_zalloc(const struct pan_kmod_allocator * allocator,size_t size,UNUSED bool transient)26 default_zalloc(const struct pan_kmod_allocator *allocator, size_t size,
27 UNUSED bool transient)
28 {
29 return rzalloc_size(allocator, size);
30 }
31
32 static void
default_free(const struct pan_kmod_allocator * allocator,void * data)33 default_free(const struct pan_kmod_allocator *allocator, void *data)
34 {
35 return ralloc_free(data);
36 }
37
38 static const struct pan_kmod_allocator *
create_default_allocator(void)39 create_default_allocator(void)
40 {
41 struct pan_kmod_allocator *allocator =
42 rzalloc(NULL, struct pan_kmod_allocator);
43
44 if (allocator) {
45 allocator->zalloc = default_zalloc;
46 allocator->free = default_free;
47 }
48
49 return allocator;
50 }
51
52 struct pan_kmod_dev *
pan_kmod_dev_create(int fd,uint32_t flags,const struct pan_kmod_allocator * allocator)53 pan_kmod_dev_create(int fd, uint32_t flags,
54 const struct pan_kmod_allocator *allocator)
55 {
56 drmVersionPtr version = drmGetVersion(fd);
57 struct pan_kmod_dev *dev = NULL;
58
59 if (!version)
60 return NULL;
61
62 if (!allocator) {
63 allocator = create_default_allocator();
64 if (!allocator)
65 goto out_free_version;
66 }
67
68 for (unsigned i = 0; i < ARRAY_SIZE(drivers); i++) {
69 if (!strcmp(drivers[i].name, version->name)) {
70 const struct pan_kmod_ops *ops = drivers[i].ops;
71
72 dev = ops->dev_create(fd, flags, version, allocator);
73 if (dev)
74 goto out_free_version;
75
76 break;
77 }
78 }
79
80 if (allocator->zalloc == default_zalloc)
81 ralloc_free((void *)allocator);
82
83 out_free_version:
84 drmFreeVersion(version);
85 return dev;
86 }
87
88 void
pan_kmod_dev_destroy(struct pan_kmod_dev * dev)89 pan_kmod_dev_destroy(struct pan_kmod_dev *dev)
90 {
91 const struct pan_kmod_allocator *allocator = dev->allocator;
92
93 dev->ops->dev_destroy(dev);
94
95 if (allocator->zalloc == default_zalloc)
96 ralloc_free((void *)allocator);
97 }
98
99 struct pan_kmod_bo *
pan_kmod_bo_alloc(struct pan_kmod_dev * dev,struct pan_kmod_vm * exclusive_vm,size_t size,uint32_t flags)100 pan_kmod_bo_alloc(struct pan_kmod_dev *dev, struct pan_kmod_vm *exclusive_vm,
101 size_t size, uint32_t flags)
102 {
103 struct pan_kmod_bo *bo;
104
105 bo = dev->ops->bo_alloc(dev, exclusive_vm, size, flags);
106 if (!bo)
107 return NULL;
108
109 /* We intentionally don't take the lock when filling the sparse array,
110 * because we just created the BO, and haven't exported it yet, so
111 * there's no risk of imports racing with our BO insertion.
112 */
113 struct pan_kmod_bo **slot =
114 util_sparse_array_get(&dev->handle_to_bo.array, bo->handle);
115
116 if (!slot) {
117 mesa_loge("failed to allocate slot in the handle_to_bo array");
118 bo->dev->ops->bo_free(bo);
119 return NULL;
120 }
121
122 assert(*slot == NULL);
123 *slot = bo;
124 return bo;
125 }
126
127 void
pan_kmod_bo_put(struct pan_kmod_bo * bo)128 pan_kmod_bo_put(struct pan_kmod_bo *bo)
129 {
130 if (!bo)
131 return;
132
133 int32_t refcnt = p_atomic_dec_return(&bo->refcnt);
134
135 assert(refcnt >= 0);
136
137 if (refcnt)
138 return;
139
140 struct pan_kmod_dev *dev = bo->dev;
141
142 simple_mtx_lock(&dev->handle_to_bo.lock);
143
144 /* If some import took a ref on this BO while we were trying to acquire the
145 * lock, skip the destruction.
146 */
147 if (!p_atomic_read(&bo->refcnt)) {
148 struct pan_kmod_bo **slot = (struct pan_kmod_bo **)util_sparse_array_get(
149 &dev->handle_to_bo.array, bo->handle);
150
151 assert(slot);
152 *slot = NULL;
153 bo->dev->ops->bo_free(bo);
154 }
155
156 simple_mtx_unlock(&dev->handle_to_bo.lock);
157 }
158
159 static bool
pan_kmod_bo_check_import_flags(struct pan_kmod_bo * bo,uint32_t flags)160 pan_kmod_bo_check_import_flags(struct pan_kmod_bo *bo, uint32_t flags)
161 {
162 uint32_t mask = PAN_KMOD_BO_FLAG_EXECUTABLE |
163 PAN_KMOD_BO_FLAG_ALLOC_ON_FAULT | PAN_KMOD_BO_FLAG_NO_MMAP |
164 PAN_KMOD_BO_FLAG_GPU_UNCACHED;
165
166 /* If the BO exists, make sure the import flags match the original flags. */
167 return (bo->flags & mask) == (flags & mask);
168 }
169
170 struct pan_kmod_bo *
pan_kmod_bo_import(struct pan_kmod_dev * dev,int fd,uint32_t flags)171 pan_kmod_bo_import(struct pan_kmod_dev *dev, int fd, uint32_t flags)
172 {
173 struct pan_kmod_bo *bo = NULL;
174 struct pan_kmod_bo **slot;
175
176 simple_mtx_lock(&dev->handle_to_bo.lock);
177
178 uint32_t handle;
179 int ret = drmPrimeFDToHandle(dev->fd, fd, &handle);
180 if (ret)
181 goto err_unlock;
182
183 slot = util_sparse_array_get(&dev->handle_to_bo.array, handle);
184 if (!slot)
185 goto err_close_handle;
186
187 if (*slot) {
188 if (!pan_kmod_bo_check_import_flags(*slot, flags)) {
189 mesa_loge("invalid import flags");
190 goto err_unlock;
191 }
192
193 bo = *slot;
194
195 p_atomic_inc(&bo->refcnt);
196 } else {
197 size_t size = lseek(fd, 0, SEEK_END);
198 if (size == 0 || size == (size_t)-1) {
199 mesa_loge("invalid dmabuf size");
200 goto err_close_handle;
201 }
202
203 bo = dev->ops->bo_import(dev, handle, size, flags);
204 if (!bo)
205 goto err_close_handle;
206
207 *slot = bo;
208 }
209
210 assert(p_atomic_read(&bo->refcnt) > 0);
211
212 simple_mtx_unlock(&dev->handle_to_bo.lock);
213
214 return bo;
215
216 err_close_handle:
217 drmCloseBufferHandle(dev->fd, handle);
218
219 err_unlock:
220 simple_mtx_unlock(&dev->handle_to_bo.lock);
221
222 return NULL;
223 }
224
225