1 /*
2 * Copyright (C) 2012-2018 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robclark@freedesktop.org>
25 */
26
27 #ifndef FREEDRENO_PRIV_H_
28 #define FREEDRENO_PRIV_H_
29
30 #include <errno.h>
31 #include <fcntl.h>
32 #include <stdio.h>
33 #include <stdlib.h>
34 #include <string.h>
35 #include <unistd.h>
36 #include <sys/ioctl.h>
37 #include <sys/mman.h>
38
39 #include <xf86drm.h>
40
41 #include "util/hash_table.h"
42 #include "util/list.h"
43 #include "util/log.h"
44 #include "util/simple_mtx.h"
45 #include "util/u_atomic.h"
46 #include "util/u_debug.h"
47 #include "util/u_math.h"
48
49 #include "freedreno_dev_info.h"
50 #include "freedreno_drmif.h"
51 #include "freedreno_ringbuffer.h"
52
53 extern simple_mtx_t table_lock;
54
55 /*
56 * Stupid/simple growable array implementation:
57 */
58
59 #define MAX_ARRAY_SIZE ((unsigned short)~0)
60
61 static inline void
grow(void ** ptr,uint16_t nr,uint16_t * max,uint16_t sz)62 grow(void **ptr, uint16_t nr, uint16_t *max, uint16_t sz)
63 {
64 assert((nr + 1) < MAX_ARRAY_SIZE);
65 if ((nr + 1) > *max) {
66 if (*max > MAX_ARRAY_SIZE/2)
67 *max = MAX_ARRAY_SIZE;
68 else if ((*max * 2) < (nr + 1))
69 *max = nr + 5;
70 else
71 *max = *max * 2;
72 *ptr = realloc(*ptr, *max * sz);
73 }
74 }
75
76 #define DECLARE_ARRAY(type, name) \
77 unsigned short nr_##name, max_##name; \
78 type *name;
79
80 #define APPEND(x, name, ...) \
81 ({ \
82 grow((void **)&(x)->name, (x)->nr_##name, &(x)->max_##name, \
83 sizeof((x)->name[0])); \
84 (x)->name[(x)->nr_##name] = __VA_ARGS__; \
85 (x)->nr_##name++; \
86 })
87
88 #define READ_ONCE(x) (*(volatile __typeof__(x) *)&(x))
89
90
91 struct fd_device_funcs {
92 int (*bo_new_handle)(struct fd_device *dev, uint32_t size, uint32_t flags,
93 uint32_t *handle);
94 struct fd_bo *(*bo_from_handle)(struct fd_device *dev, uint32_t size,
95 uint32_t handle);
96 struct fd_pipe *(*pipe_new)(struct fd_device *dev, enum fd_pipe_id id,
97 unsigned prio);
98 void (*destroy)(struct fd_device *dev);
99 };
100
101 struct fd_bo_bucket {
102 uint32_t size;
103 struct list_head list;
104 };
105
106 struct fd_bo_cache {
107 struct fd_bo_bucket cache_bucket[14 * 4];
108 int num_buckets;
109 time_t time;
110 };
111
112 struct fd_device {
113 int fd;
114 enum fd_version version;
115 int32_t refcnt;
116
117 /* tables to keep track of bo's, to avoid "evil-twin" fd_bo objects:
118 *
119 * handle_table: maps handle to fd_bo
120 * name_table: maps flink name to fd_bo
121 *
122 * We end up needing two tables, because DRM_IOCTL_GEM_OPEN always
123 * returns a new handle. So we need to figure out if the bo is already
124 * open in the process first, before calling gem-open.
125 */
126 struct hash_table *handle_table, *name_table;
127
128 const struct fd_device_funcs *funcs;
129
130 struct fd_bo_cache bo_cache;
131 struct fd_bo_cache ring_cache;
132
133 bool has_cached_coherent;
134
135 bool closefd; /* call close(fd) upon destruction */
136
137 /* just for valgrind: */
138 int bo_size;
139
140 /**
141 * List of deferred submits, protected by submit_lock. The deferred
142 * submits are tracked globally per-device, even if they execute in
143 * different order on the kernel side (ie. due to different priority
144 * submitqueues, etc) to preserve the order that they are passed off
145 * to the kernel. Once the kernel has them, it is the fences' job
146 * to preserve correct order of execution.
147 */
148 struct list_head deferred_submits;
149 unsigned deferred_cmds;
150 simple_mtx_t submit_lock;
151 };
152
153 #define foreach_submit(name, list) \
154 list_for_each_entry(struct fd_submit, name, list, node)
155 #define foreach_submit_safe(name, list) \
156 list_for_each_entry_safe(struct fd_submit, name, list, node)
157 #define last_submit(list) \
158 list_last_entry(list, struct fd_submit, node)
159
160 void fd_bo_cache_init(struct fd_bo_cache *cache, int coarse);
161 void fd_bo_cache_cleanup(struct fd_bo_cache *cache, time_t time);
162 struct fd_bo *fd_bo_cache_alloc(struct fd_bo_cache *cache, uint32_t *size,
163 uint32_t flags);
164 int fd_bo_cache_free(struct fd_bo_cache *cache, struct fd_bo *bo);
165
166 /* for where @table_lock is already held: */
167 void fd_bo_del_locked(struct fd_bo *bo);
168 void fd_device_del_locked(struct fd_device *dev);
169 void fd_pipe_del_locked(struct fd_pipe *pipe);
170
171 struct fd_pipe_funcs {
172 struct fd_ringbuffer *(*ringbuffer_new_object)(struct fd_pipe *pipe,
173 uint32_t size);
174 struct fd_submit *(*submit_new)(struct fd_pipe *pipe);
175
176 /**
177 * Flush any deferred submits (if deferred submits are supported by
178 * the pipe implementation)
179 */
180 void (*flush)(struct fd_pipe *pipe, uint32_t fence);
181
182 int (*get_param)(struct fd_pipe *pipe, enum fd_param_id param,
183 uint64_t *value);
184 int (*wait)(struct fd_pipe *pipe, const struct fd_fence *fence,
185 uint64_t timeout);
186 void (*destroy)(struct fd_pipe *pipe);
187 };
188
189 struct fd_pipe_control {
190 uint32_t fence;
191 };
192 #define control_ptr(pipe, member) \
193 (pipe)->control_mem, offsetof(struct fd_pipe_control, member), 0, 0
194
195 struct fd_pipe {
196 struct fd_device *dev;
197 enum fd_pipe_id id;
198 struct fd_dev_id dev_id;
199
200 /**
201 * Note refcnt is *not* atomic, but protected by table_lock, since the
202 * table_lock is held in fd_bo_add_fence(), which is the hotpath.
203 */
204 int32_t refcnt;
205
206 /**
207 * Previous fence seqno allocated for this pipe. The fd_pipe represents
208 * a single timeline, fences allocated by this pipe can be compared to
209 * each other, but fences from different pipes are not comparable (as
210 * there could be preemption of multiple priority level submitqueues at
211 * play)
212 */
213 uint32_t last_fence;
214
215 struct fd_bo *control_mem;
216 volatile struct fd_pipe_control *control;
217
218 const struct fd_pipe_funcs *funcs;
219 };
220
221 uint32_t fd_pipe_emit_fence(struct fd_pipe *pipe, struct fd_ringbuffer *ring);
222
223 static inline void
fd_pipe_flush(struct fd_pipe * pipe,uint32_t fence)224 fd_pipe_flush(struct fd_pipe *pipe, uint32_t fence)
225 {
226 if (!pipe->funcs->flush)
227 return;
228 pipe->funcs->flush(pipe, fence);
229 }
230
231 struct fd_submit_funcs {
232 struct fd_ringbuffer *(*new_ringbuffer)(struct fd_submit *submit,
233 uint32_t size,
234 enum fd_ringbuffer_flags flags);
235 int (*flush)(struct fd_submit *submit, int in_fence_fd,
236 struct fd_submit_fence *out_fence);
237 void (*destroy)(struct fd_submit *submit);
238 };
239
240 struct fd_submit {
241 int32_t refcnt;
242 struct fd_pipe *pipe;
243 const struct fd_submit_funcs *funcs;
244
245 struct fd_ringbuffer *primary;
246 uint32_t fence;
247 struct list_head node; /* node in fd_pipe::deferred_submits */
248 };
249
250 static inline unsigned
fd_dev_count_deferred_cmds(struct fd_device * dev)251 fd_dev_count_deferred_cmds(struct fd_device *dev)
252 {
253 unsigned nr = 0;
254
255 simple_mtx_assert_locked(&dev->submit_lock);
256
257 list_for_each_entry (struct fd_submit, submit, &dev->deferred_submits, node) {
258 nr += fd_ringbuffer_cmd_count(submit->primary);
259 }
260
261 return nr;
262 }
263
264 struct fd_bo_funcs {
265 int (*offset)(struct fd_bo *bo, uint64_t *offset);
266 int (*cpu_prep)(struct fd_bo *bo, struct fd_pipe *pipe, uint32_t op);
267 void (*cpu_fini)(struct fd_bo *bo);
268 int (*madvise)(struct fd_bo *bo, int willneed);
269 uint64_t (*iova)(struct fd_bo *bo);
270 void (*set_name)(struct fd_bo *bo, const char *fmt, va_list ap);
271 void (*destroy)(struct fd_bo *bo);
272 };
273
274 struct fd_bo_fence {
275 /* For non-shared buffers, track the last pipe the buffer was active
276 * on, and the per-pipe fence value that indicates when the buffer is
277 * idle:
278 */
279 uint32_t fence;
280 struct fd_pipe *pipe;
281 };
282
283 struct fd_bo {
284 struct fd_device *dev;
285 uint32_t size;
286 uint32_t handle;
287 uint32_t name;
288 int32_t refcnt;
289 uint32_t reloc_flags; /* flags like FD_RELOC_DUMP to use for relocs to this BO */
290 uint32_t alloc_flags; /* flags that control allocation/mapping, ie. FD_BO_x */
291 uint64_t iova;
292 void *map;
293 const struct fd_bo_funcs *funcs;
294
295 enum {
296 NO_CACHE = 0,
297 BO_CACHE = 1,
298 RING_CACHE = 2,
299 } bo_reuse : 2;
300
301 /* Buffers that are shared (imported or exported) may be used in
302 * other processes, so we need to fallback to kernel to determine
303 * busyness.
304 */
305 bool shared : 1;
306
307 /* We need to be able to disable userspace fence synchronization for
308 * special internal buffers, namely the pipe->control buffer, to avoid
309 * a circular reference loop.
310 */
311 bool nosync : 1;
312
313 struct list_head list; /* bucket-list entry */
314 time_t free_time; /* time when added to bucket-list */
315
316 DECLARE_ARRAY(struct fd_bo_fence, fences);
317
318 /* In the common case, there is no more than one fence attached.
319 * This provides storage for the fences table until it grows to
320 * be larger than a single element.
321 */
322 struct fd_bo_fence _inline_fence;
323 };
324
325 void fd_bo_add_fence(struct fd_bo *bo, struct fd_pipe *pipe, uint32_t fence);
326
327 enum fd_bo_state {
328 FD_BO_STATE_IDLE,
329 FD_BO_STATE_BUSY,
330 FD_BO_STATE_UNKNOWN,
331 };
332 enum fd_bo_state fd_bo_state(struct fd_bo *bo);
333
334 struct fd_bo *fd_bo_new_ring(struct fd_device *dev, uint32_t size);
335
336 #define enable_debug 0 /* TODO make dynamic */
337
338 bool fd_dbg(void);
339
340 #define INFO_MSG(fmt, ...) \
341 do { \
342 if (fd_dbg()) \
343 mesa_logi("%s:%d: " fmt, __FUNCTION__, __LINE__, ##__VA_ARGS__); \
344 } while (0)
345 #define DEBUG_MSG(fmt, ...) \
346 do \
347 if (enable_debug) { \
348 mesa_logd("%s:%d: " fmt, __FUNCTION__, __LINE__, ##__VA_ARGS__); \
349 } \
350 while (0)
351 #define WARN_MSG(fmt, ...) \
352 do { \
353 mesa_logw("%s:%d: " fmt, __FUNCTION__, __LINE__, ##__VA_ARGS__); \
354 } while (0)
355 #define ERROR_MSG(fmt, ...) \
356 do { \
357 mesa_loge("%s:%d: " fmt, __FUNCTION__, __LINE__, ##__VA_ARGS__); \
358 } while (0)
359
360 #define U642VOID(x) ((void *)(unsigned long)(x))
361 #define VOID2U64(x) ((uint64_t)(unsigned long)(x))
362
363 #if HAVE_VALGRIND
364 #include <memcheck.h>
365
366 /*
367 * For tracking the backing memory (if valgrind enabled, we force a mmap
368 * for the purposes of tracking)
369 */
370 static inline void
VG_BO_ALLOC(struct fd_bo * bo)371 VG_BO_ALLOC(struct fd_bo *bo)
372 {
373 if (bo && RUNNING_ON_VALGRIND) {
374 VALGRIND_MALLOCLIKE_BLOCK(fd_bo_map(bo), bo->size, 0, 1);
375 }
376 }
377
378 static inline void
VG_BO_FREE(struct fd_bo * bo)379 VG_BO_FREE(struct fd_bo *bo)
380 {
381 VALGRIND_FREELIKE_BLOCK(bo->map, 0);
382 }
383
384 /*
385 * For tracking bo structs that are in the buffer-cache, so that valgrind
386 * doesn't attribute ownership to the first one to allocate the recycled
387 * bo.
388 *
389 * Note that the list_head in fd_bo is used to track the buffers in cache
390 * so disable error reporting on the range while they are in cache so
391 * valgrind doesn't squawk about list traversal.
392 *
393 */
394 static inline void
VG_BO_RELEASE(struct fd_bo * bo)395 VG_BO_RELEASE(struct fd_bo *bo)
396 {
397 if (RUNNING_ON_VALGRIND) {
398 VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE(bo, bo->dev->bo_size);
399 VALGRIND_MAKE_MEM_NOACCESS(bo, bo->dev->bo_size);
400 VALGRIND_FREELIKE_BLOCK(bo->map, 0);
401 }
402 }
403 static inline void
VG_BO_OBTAIN(struct fd_bo * bo)404 VG_BO_OBTAIN(struct fd_bo *bo)
405 {
406 if (RUNNING_ON_VALGRIND) {
407 VALGRIND_MAKE_MEM_DEFINED(bo, bo->dev->bo_size);
408 VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE(bo, bo->dev->bo_size);
409 VALGRIND_MALLOCLIKE_BLOCK(bo->map, bo->size, 0, 1);
410 }
411 }
412 #else
413 static inline void
VG_BO_ALLOC(struct fd_bo * bo)414 VG_BO_ALLOC(struct fd_bo *bo)
415 {
416 }
417 static inline void
VG_BO_FREE(struct fd_bo * bo)418 VG_BO_FREE(struct fd_bo *bo)
419 {
420 }
421 static inline void
VG_BO_RELEASE(struct fd_bo * bo)422 VG_BO_RELEASE(struct fd_bo *bo)
423 {
424 }
425 static inline void
VG_BO_OBTAIN(struct fd_bo * bo)426 VG_BO_OBTAIN(struct fd_bo *bo)
427 {
428 }
429 #endif
430
431 #define FD_DEFINE_CAST(parent, child) \
432 static inline struct child *to_##child(struct parent *x) \
433 { \
434 return (struct child *)x; \
435 }
436
437 #endif /* FREEDRENO_PRIV_H_ */
438