1 /*
2 * Copyright (C) 2012-2018 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robclark@freedesktop.org>
25 */
26
27 #ifndef FREEDRENO_DRMIF_H_
28 #define FREEDRENO_DRMIF_H_
29
30 #include <stdint.h>
31
32 #include "util/bitset.h"
33 #include "util/u_debug.h"
34
35 #ifdef __cplusplus
36 extern "C" {
37 #endif
38
39 struct fd_bo;
40 struct fd_pipe;
41 struct fd_device;
42
43 enum fd_pipe_id {
44 FD_PIPE_3D = 1,
45 FD_PIPE_2D = 2,
46 /* some devices have two 2d blocks.. not really sure how to
47 * use that yet, so just ignoring the 2nd 2d pipe for now
48 */
49 FD_PIPE_MAX
50 };
51
52 enum fd_param_id {
53 FD_DEVICE_ID,
54 FD_GMEM_SIZE,
55 FD_GMEM_BASE, /* 64b */
56 FD_GPU_ID,
57 FD_CHIP_ID, /* 64b */
58 FD_MAX_FREQ,
59 FD_TIMESTAMP,
60 FD_NR_RINGS, /* # of rings == # of distinct priority levels */
61 FD_PP_PGTABLE, /* are per-process pagetables used for the pipe/ctx */
62 FD_CTX_FAULTS, /* # of per context faults */
63 FD_GLOBAL_FAULTS, /* # of global (all context) faults */
64 FD_SUSPEND_COUNT, /* # of times the GPU has suspended, and potentially lost state */
65 };
66
67 /**
68 * Helper for fence/seqno comparisions which deals properly with rollover.
69 * Returns true if fence 'a' is before fence 'b'
70 */
71 static inline bool
fd_fence_before(uint32_t a,uint32_t b)72 fd_fence_before(uint32_t a, uint32_t b)
73 {
74 return (int32_t)(a - b) < 0;
75 }
76
77 static inline bool
fd_fence_after(uint32_t a,uint32_t b)78 fd_fence_after(uint32_t a, uint32_t b)
79 {
80 return (int32_t)(a - b) > 0;
81 }
82
83 /**
84 * Per submit, there are actually two fences:
85 * 1) The userspace maintained fence, which is used to optimistically
86 * avoid kernel ioctls to query if specific rendering is completed
87 * 2) The kernel maintained fence, which we cannot directly do anything
88 * with, other than pass it back to the kernel
89 *
90 * The userspace fence is mostly internal to the drm layer, but we want
91 * the gallium layer to be able to pass it back to us for things like
92 * fd_pipe_wait(). So this struct encapsulates the two.
93 */
94 struct fd_fence {
95 uint32_t kfence; /* kernel fence */
96 uint32_t ufence; /* userspace fence */
97 };
98
99 /* bo flags: */
100 #define FD_BO_GPUREADONLY BITSET_BIT(1)
101 #define FD_BO_SCANOUT BITSET_BIT(2)
102 #define FD_BO_CACHED_COHERENT BITSET_BIT(3)
103 /* Default caching is WRITECOMBINE */
104
105 /* bo access flags: (keep aligned to MSM_PREP_x) */
106 #define FD_BO_PREP_READ BITSET_BIT(0)
107 #define FD_BO_PREP_WRITE BITSET_BIT(1)
108 #define FD_BO_PREP_NOSYNC BITSET_BIT(2)
109 #define FD_BO_PREP_FLUSH BITSET_BIT(3)
110
111
112 /* device functions:
113 */
114
115 struct fd_device *fd_device_new(int fd);
116 struct fd_device *fd_device_new_dup(int fd);
117 struct fd_device *fd_device_ref(struct fd_device *dev);
118 void fd_device_purge(struct fd_device *dev);
119 void fd_device_del(struct fd_device *dev);
120 int fd_device_fd(struct fd_device *dev);
121
122 enum fd_version {
123 FD_VERSION_MADVISE = 1, /* kernel supports madvise */
124 FD_VERSION_UNLIMITED_CMDS = 1, /* submits w/ >4 cmd buffers (growable ringbuffer) */
125 FD_VERSION_FENCE_FD = 2, /* submit command supports in/out fences */
126 FD_VERSION_GMEM_BASE = 3, /* supports querying GMEM base address */
127 FD_VERSION_SUBMIT_QUEUES = 3, /* submit queues and multiple priority levels */
128 FD_VERSION_BO_IOVA = 3, /* supports fd_bo_get/put_iova() */
129 FD_VERSION_SOFTPIN = 4, /* adds softpin, bo name, and dump flag */
130 FD_VERSION_ROBUSTNESS = 5, /* adds FD_NR_FAULTS and FD_PP_PGTABLE */
131 FD_VERSION_MEMORY_FD = 2, /* supports shared memory objects */
132 FD_VERSION_SUSPENDS = 7, /* Adds MSM_PARAM_SUSPENDS to detect device suspend */
133 FD_VERSION_CACHED_COHERENT = 8, /* Adds cached-coherent support (a6xx+) */
134 };
135 enum fd_version fd_device_version(struct fd_device *dev);
136
137 bool fd_has_syncobj(struct fd_device *dev);
138
139 /* pipe functions:
140 */
141
142 struct fd_pipe *fd_pipe_new(struct fd_device *dev, enum fd_pipe_id id);
143 struct fd_pipe *fd_pipe_new2(struct fd_device *dev, enum fd_pipe_id id,
144 uint32_t prio);
145 struct fd_pipe *fd_pipe_ref(struct fd_pipe *pipe);
146 struct fd_pipe *fd_pipe_ref_locked(struct fd_pipe *pipe);
147 void fd_pipe_del(struct fd_pipe *pipe);
148 void fd_pipe_purge(struct fd_pipe *pipe);
149 const struct fd_dev_id * fd_pipe_dev_id(struct fd_pipe *pipe);
150 int fd_pipe_get_param(struct fd_pipe *pipe, enum fd_param_id param,
151 uint64_t *value);
152 int fd_pipe_wait(struct fd_pipe *pipe, const struct fd_fence *fence);
153 /* timeout in nanosec */
154 int fd_pipe_wait_timeout(struct fd_pipe *pipe, const struct fd_fence *fence,
155 uint64_t timeout);
156
157 /* buffer-object functions:
158 */
159
160 struct fd_bo *_fd_bo_new(struct fd_device *dev, uint32_t size, uint32_t flags);
161 void _fd_bo_set_name(struct fd_bo *bo, const char *fmt, va_list ap);
162
163 static inline void fd_bo_set_name(struct fd_bo *bo, const char *fmt, ...)
164 _util_printf_format(2, 3);
165
166 static inline void
fd_bo_set_name(struct fd_bo * bo,const char * fmt,...)167 fd_bo_set_name(struct fd_bo *bo, const char *fmt, ...)
168 {
169 #ifndef NDEBUG
170 va_list ap;
171 va_start(ap, fmt);
172 _fd_bo_set_name(bo, fmt, ap);
173 va_end(ap);
174 #endif
175 }
176
177 static inline struct fd_bo *fd_bo_new(struct fd_device *dev, uint32_t size,
178 uint32_t flags, const char *fmt, ...)
179 _util_printf_format(4, 5);
180
181 static inline struct fd_bo *
fd_bo_new(struct fd_device * dev,uint32_t size,uint32_t flags,const char * fmt,...)182 fd_bo_new(struct fd_device *dev, uint32_t size, uint32_t flags, const char *fmt,
183 ...)
184 {
185 struct fd_bo *bo = _fd_bo_new(dev, size, flags);
186 #ifndef NDEBUG
187 if (fmt) {
188 va_list ap;
189 va_start(ap, fmt);
190 _fd_bo_set_name(bo, fmt, ap);
191 va_end(ap);
192 }
193 #endif
194 return bo;
195 }
196
197 struct fd_bo *fd_bo_from_handle(struct fd_device *dev, uint32_t handle,
198 uint32_t size);
199 struct fd_bo *fd_bo_from_name(struct fd_device *dev, uint32_t name);
200 struct fd_bo *fd_bo_from_dmabuf(struct fd_device *dev, int fd);
201 void fd_bo_mark_for_dump(struct fd_bo *bo);
202 uint64_t fd_bo_get_iova(struct fd_bo *bo);
203 struct fd_bo *fd_bo_ref(struct fd_bo *bo);
204 void fd_bo_del(struct fd_bo *bo);
205 int fd_bo_get_name(struct fd_bo *bo, uint32_t *name);
206 uint32_t fd_bo_handle(struct fd_bo *bo);
207 int fd_bo_dmabuf(struct fd_bo *bo);
208 uint32_t fd_bo_size(struct fd_bo *bo);
209 void *fd_bo_map(struct fd_bo *bo);
210 int fd_bo_cpu_prep(struct fd_bo *bo, struct fd_pipe *pipe, uint32_t op);
211 void fd_bo_cpu_fini(struct fd_bo *bo);
212 bool fd_bo_is_cached(struct fd_bo *bo);
213
214 #ifdef __cplusplus
215 } /* end of extern "C" */
216 #endif
217
218 #endif /* FREEDRENO_DRMIF_H_ */
219