1 /*
2 * Copyright © 2022 Google, Inc.
3 * SPDX-License-Identifier: MIT
4 */
5
6 #include "util/libsync.h"
7 #include "util/slab.h"
8
9 #include "freedreno_ringbuffer_sp.h"
10 #include "virtio_priv.h"
11
12 static int
query_param(struct fd_pipe * pipe,uint32_t param,uint64_t * value)13 query_param(struct fd_pipe *pipe, uint32_t param, uint64_t *value)
14 {
15 struct virtio_pipe *virtio_pipe = to_virtio_pipe(pipe);
16 struct drm_msm_param req = {
17 .pipe = virtio_pipe->pipe,
18 .param = param,
19 };
20 int ret;
21
22 ret = virtio_simple_ioctl(pipe->dev, DRM_IOCTL_MSM_GET_PARAM, &req);
23 if (ret)
24 return ret;
25
26 *value = req.value;
27
28 return 0;
29 }
30
31 static int
query_faults(struct fd_pipe * pipe,uint64_t * value)32 query_faults(struct fd_pipe *pipe, uint64_t *value)
33 {
34 struct virtio_device *virtio_dev = to_virtio_device(pipe->dev);
35 uint32_t async_error = 0;
36 uint64_t global_faults;
37
38 if (vdrm_shmem_has_field(virtio_dev->shmem, async_error))
39 async_error = virtio_dev->shmem->async_error;
40
41 if (vdrm_shmem_has_field(virtio_dev->shmem, global_faults)) {
42 global_faults = virtio_dev->shmem->global_faults;
43 } else {
44 int ret = query_param(pipe, MSM_PARAM_FAULTS, &global_faults);
45 if (ret)
46 return ret;
47 }
48
49 *value = global_faults + async_error;
50
51 return 0;
52 }
53
54 static int
virtio_pipe_get_param(struct fd_pipe * pipe,enum fd_param_id param,uint64_t * value)55 virtio_pipe_get_param(struct fd_pipe *pipe, enum fd_param_id param,
56 uint64_t *value)
57 {
58 struct virtio_pipe *virtio_pipe = to_virtio_pipe(pipe);
59 struct virtio_device *virtio_dev = to_virtio_device(pipe->dev);
60
61 switch (param) {
62 case FD_DEVICE_ID: // XXX probably get rid of this..
63 case FD_GPU_ID:
64 *value = virtio_pipe->gpu_id;
65 return 0;
66 case FD_GMEM_SIZE:
67 *value = virtio_pipe->gmem;
68 return 0;
69 case FD_GMEM_BASE:
70 *value = virtio_pipe->gmem_base;
71 return 0;
72 case FD_CHIP_ID:
73 *value = virtio_pipe->chip_id;
74 return 0;
75 case FD_MAX_FREQ:
76 *value = virtio_dev->vdrm->caps.u.msm.max_freq;
77 return 0;
78 case FD_TIMESTAMP:
79 return query_param(pipe, MSM_PARAM_TIMESTAMP, value);
80 case FD_NR_PRIORITIES:
81 *value = virtio_dev->vdrm->caps.u.msm.priorities;
82 return 0;
83 case FD_CTX_FAULTS:
84 case FD_GLOBAL_FAULTS:
85 return query_faults(pipe, value);
86 case FD_SUSPEND_COUNT:
87 return query_param(pipe, MSM_PARAM_SUSPENDS, value);
88 case FD_VA_SIZE:
89 *value = virtio_dev->vdrm->caps.u.msm.va_size;
90 return 0;
91 default:
92 ERROR_MSG("invalid param id: %d", param);
93 return -1;
94 }
95 }
96
97 static void
virtio_pipe_finish(struct fd_pipe * pipe)98 virtio_pipe_finish(struct fd_pipe *pipe)
99 {
100 struct virtio_pipe *virtio_pipe = to_virtio_pipe(pipe);
101 if (util_queue_is_initialized(&virtio_pipe->retire_queue))
102 util_queue_finish(&virtio_pipe->retire_queue);
103 }
104
105 static int
virtio_pipe_wait(struct fd_pipe * pipe,const struct fd_fence * fence,uint64_t timeout)106 virtio_pipe_wait(struct fd_pipe *pipe, const struct fd_fence *fence, uint64_t timeout)
107 {
108 MESA_TRACE_FUNC();
109 struct vdrm_device *vdrm = to_virtio_device(pipe->dev)->vdrm;
110 struct msm_ccmd_wait_fence_req req = {
111 .hdr = MSM_CCMD(WAIT_FENCE, sizeof(req)),
112 .queue_id = to_virtio_pipe(pipe)->queue_id,
113 .fence = fence->kfence,
114 };
115 struct msm_ccmd_submitqueue_query_rsp *rsp;
116 int64_t end_time = os_time_get_nano() + timeout;
117 int ret;
118
119 /* Do a non-blocking wait to trigger host-side wait-boost,
120 * if the host kernel is new enough
121 */
122 rsp = vdrm_alloc_rsp(vdrm, &req.hdr, sizeof(*rsp));
123 ret = vdrm_send_req(vdrm, &req.hdr, false);
124 if (ret)
125 goto out;
126
127 vdrm_flush(vdrm);
128
129 if (fence->use_fence_fd)
130 return sync_wait(fence->fence_fd, timeout / 1000000);
131
132 do {
133 rsp = vdrm_alloc_rsp(vdrm, &req.hdr, sizeof(*rsp));
134
135 ret = vdrm_send_req(vdrm, &req.hdr, true);
136 if (ret)
137 goto out;
138
139 if ((timeout != OS_TIMEOUT_INFINITE) &&
140 (os_time_get_nano() >= end_time))
141 break;
142
143 ret = rsp->ret;
144 } while (ret == -ETIMEDOUT);
145
146 out:
147 return ret;
148 }
149
150 static int
__open_submitqueue(struct fd_pipe * pipe,uint32_t prio,uint32_t flags)151 __open_submitqueue(struct fd_pipe *pipe, uint32_t prio, uint32_t flags)
152 {
153 struct virtio_pipe *virtio_pipe = to_virtio_pipe(pipe);
154
155 struct drm_msm_submitqueue req = {
156 .flags = flags,
157 .prio = prio,
158 };
159 uint64_t nr_prio = 1;
160 int ret;
161
162 virtio_pipe_get_param(pipe, FD_NR_PRIORITIES, &nr_prio);
163
164 req.prio = MIN2(req.prio, MAX2(nr_prio, 1) - 1);
165
166 ret = virtio_simple_ioctl(pipe->dev, DRM_IOCTL_MSM_SUBMITQUEUE_NEW, &req);
167 if (ret) {
168 ERROR_MSG("could not create submitqueue! %d (%s)", ret, strerror(errno));
169 return ret;
170 }
171
172 virtio_pipe->queue_id = req.id;
173 virtio_pipe->ring_idx = req.prio + 1;
174
175 return 0;
176 }
177
178 static int
open_submitqueue(struct fd_pipe * pipe,uint32_t prio)179 open_submitqueue(struct fd_pipe *pipe, uint32_t prio)
180 {
181 const struct fd_dev_info *info = fd_dev_info_raw(&pipe->dev_id);
182 int ret = -1;
183
184 if (info && info->chip >= A7XX)
185 ret = __open_submitqueue(pipe, prio, MSM_SUBMITQUEUE_ALLOW_PREEMPT);
186
187 /* If kernel doesn't support preemption, try again without: */
188 if (ret)
189 ret = __open_submitqueue(pipe, prio, 0);
190
191 if (ret) {
192 ERROR_MSG("could not create submitqueue! %d (%s)", ret, strerror(errno));
193 return ret;
194 }
195
196 return 0;
197 }
198
199 static void
close_submitqueue(struct fd_pipe * pipe,uint32_t queue_id)200 close_submitqueue(struct fd_pipe *pipe, uint32_t queue_id)
201 {
202 virtio_simple_ioctl(pipe->dev, DRM_IOCTL_MSM_SUBMITQUEUE_CLOSE, &queue_id);
203 }
204
205 static void
virtio_pipe_destroy(struct fd_pipe * pipe)206 virtio_pipe_destroy(struct fd_pipe *pipe)
207 {
208 struct virtio_pipe *virtio_pipe = to_virtio_pipe(pipe);
209
210 if (util_queue_is_initialized(&virtio_pipe->retire_queue))
211 util_queue_destroy(&virtio_pipe->retire_queue);
212
213 close_submitqueue(pipe, virtio_pipe->queue_id);
214 fd_pipe_sp_ringpool_fini(pipe);
215 free(virtio_pipe);
216 }
217
218 static const struct fd_pipe_funcs funcs = {
219 .ringbuffer_new_object = fd_ringbuffer_sp_new_object,
220 .submit_new = virtio_submit_new,
221 .flush = fd_pipe_sp_flush,
222 .finish = virtio_pipe_finish,
223 .get_param = virtio_pipe_get_param,
224 .wait = virtio_pipe_wait,
225 .destroy = virtio_pipe_destroy,
226 };
227
228 struct fd_pipe *
virtio_pipe_new(struct fd_device * dev,enum fd_pipe_id id,uint32_t prio)229 virtio_pipe_new(struct fd_device *dev, enum fd_pipe_id id, uint32_t prio)
230 {
231 static const uint32_t pipe_id[] = {
232 [FD_PIPE_3D] = MSM_PIPE_3D0,
233 [FD_PIPE_2D] = MSM_PIPE_2D0,
234 };
235 struct virtio_device *virtio_dev = to_virtio_device(dev);
236 struct vdrm_device *vdrm = virtio_dev->vdrm;
237 struct virtio_pipe *virtio_pipe = NULL;
238 struct fd_pipe *pipe = NULL;
239
240 virtio_pipe = calloc(1, sizeof(*virtio_pipe));
241 if (!virtio_pipe) {
242 ERROR_MSG("allocation failed");
243 goto fail;
244 }
245
246 pipe = &virtio_pipe->base;
247
248 pipe->funcs = &funcs;
249
250 /* initialize before get_param(): */
251 pipe->dev = dev;
252 virtio_pipe->pipe = pipe_id[id];
253
254 virtio_pipe->gpu_id = vdrm->caps.u.msm.gpu_id;
255 virtio_pipe->gmem = vdrm->caps.u.msm.gmem_size;
256 virtio_pipe->gmem_base = vdrm->caps.u.msm.gmem_base;
257 virtio_pipe->chip_id = vdrm->caps.u.msm.chip_id;
258
259
260 if (!(virtio_pipe->gpu_id || virtio_pipe->chip_id))
261 goto fail;
262
263 util_queue_init(&virtio_pipe->retire_queue, "rq", 8, 1,
264 UTIL_QUEUE_INIT_RESIZE_IF_FULL, NULL);
265
266 INFO_MSG("Pipe Info:");
267 INFO_MSG(" GPU-id: %d", virtio_pipe->gpu_id);
268 INFO_MSG(" Chip-id: 0x%016"PRIx64, virtio_pipe->chip_id);
269 INFO_MSG(" GMEM size: 0x%08x", virtio_pipe->gmem);
270
271 if (open_submitqueue(pipe, prio))
272 goto fail;
273
274 fd_pipe_sp_ringpool_init(pipe);
275
276 return pipe;
277 fail:
278 if (pipe)
279 fd_pipe_del(pipe);
280 return NULL;
281 }
282