• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2012-2018 Rob Clark <robclark@freedesktop.org>
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  *
23  * Authors:
24  *    Rob Clark <robclark@freedesktop.org>
25  */
26 
27 #include "freedreno_drmif.h"
28 #include "freedreno_priv.h"
29 
30 /**
31  * priority of zero is highest priority, and higher numeric values are
32  * lower priorities
33  */
34 struct fd_pipe *
fd_pipe_new2(struct fd_device * dev,enum fd_pipe_id id,uint32_t prio)35 fd_pipe_new2(struct fd_device *dev, enum fd_pipe_id id, uint32_t prio)
36 {
37    struct fd_pipe *pipe;
38    uint64_t val;
39 
40    if (id > FD_PIPE_MAX) {
41       ERROR_MSG("invalid pipe id: %d", id);
42       return NULL;
43    }
44 
45    if ((prio != 1) && (fd_device_version(dev) < FD_VERSION_SUBMIT_QUEUES)) {
46       ERROR_MSG("invalid priority!");
47       return NULL;
48    }
49 
50    pipe = dev->funcs->pipe_new(dev, id, prio);
51    if (!pipe) {
52       ERROR_MSG("allocation failed");
53       return NULL;
54    }
55 
56    pipe->dev = dev;
57    pipe->id = id;
58    p_atomic_set(&pipe->refcnt, 1);
59 
60    fd_pipe_get_param(pipe, FD_GPU_ID, &val);
61    pipe->dev_id.gpu_id = val;
62 
63    fd_pipe_get_param(pipe, FD_CHIP_ID, &val);
64    pipe->dev_id.chip_id = val;
65 
66    pipe->is_64bit = fd_dev_64b(&pipe->dev_id);
67 
68    /* Use the _NOSYNC flags because we don't want the control_mem bo to hold
69     * a reference to the ourself.  This also means that we won't be able
70     * to determine if the buffer is idle which is needed by bo-cache.  But
71     * pipe creation/destroy is not a high frequency event.
72     */
73    pipe->control_mem = fd_bo_new(dev, sizeof(*pipe->control),
74                                  FD_BO_CACHED_COHERENT | _FD_BO_NOSYNC,
75                                  "pipe-control");
76    pipe->control = fd_bo_map(pipe->control_mem);
77 
78    /* We could be getting a bo from the bo-cache, make sure the fence value
79     * is not garbage:
80     */
81    pipe->control->fence = 0;
82    pipe->control_mem->bo_reuse = NO_CACHE;
83 
84    return pipe;
85 }
86 
87 struct fd_pipe *
fd_pipe_new(struct fd_device * dev,enum fd_pipe_id id)88 fd_pipe_new(struct fd_device *dev, enum fd_pipe_id id)
89 {
90    return fd_pipe_new2(dev, id, 1);
91 }
92 
93 struct fd_pipe *
fd_pipe_ref(struct fd_pipe * pipe)94 fd_pipe_ref(struct fd_pipe *pipe)
95 {
96    simple_mtx_lock(&fence_lock);
97    fd_pipe_ref_locked(pipe);
98    simple_mtx_unlock(&fence_lock);
99    return pipe;
100 }
101 
102 struct fd_pipe *
fd_pipe_ref_locked(struct fd_pipe * pipe)103 fd_pipe_ref_locked(struct fd_pipe *pipe)
104 {
105    simple_mtx_assert_locked(&fence_lock);
106    pipe->refcnt++;
107    return pipe;
108 }
109 
110 void
fd_pipe_del(struct fd_pipe * pipe)111 fd_pipe_del(struct fd_pipe *pipe)
112 {
113    simple_mtx_lock(&fence_lock);
114    fd_pipe_del_locked(pipe);
115    simple_mtx_unlock(&fence_lock);
116 }
117 
118 void
fd_pipe_del_locked(struct fd_pipe * pipe)119 fd_pipe_del_locked(struct fd_pipe *pipe)
120 {
121    simple_mtx_assert_locked(&fence_lock);
122    if (--pipe->refcnt)
123       return;
124 
125    fd_bo_del(pipe->control_mem);
126    pipe->funcs->destroy(pipe);
127 }
128 
129 /**
130  * Flush any unflushed deferred submits.  This is called at context-
131  * destroy to make sure we don't leak unflushed submits.
132  */
133 void
fd_pipe_purge(struct fd_pipe * pipe)134 fd_pipe_purge(struct fd_pipe *pipe)
135 {
136    struct fd_device *dev = pipe->dev;
137    struct fd_fence *unflushed_fence = NULL;
138 
139    simple_mtx_lock(&dev->submit_lock);
140 
141    /* We only queue up deferred submits for a single pipe at a time, so
142     * if there is a deferred_submits_fence on the same pipe as us, we
143     * know we have deferred_submits queued, which need to be flushed:
144     */
145    if (dev->deferred_submits_fence && dev->deferred_submits_fence->pipe == pipe) {
146       unflushed_fence = fd_fence_ref(dev->deferred_submits_fence);
147    }
148 
149    simple_mtx_unlock(&dev->submit_lock);
150 
151    if (unflushed_fence) {
152       fd_fence_flush(unflushed_fence);
153       fd_fence_del(unflushed_fence);
154    }
155 }
156 
157 int
fd_pipe_get_param(struct fd_pipe * pipe,enum fd_param_id param,uint64_t * value)158 fd_pipe_get_param(struct fd_pipe *pipe, enum fd_param_id param, uint64_t *value)
159 {
160    return pipe->funcs->get_param(pipe, param, value);
161 }
162 
163 int
fd_pipe_set_param(struct fd_pipe * pipe,enum fd_param_id param,uint64_t value)164 fd_pipe_set_param(struct fd_pipe *pipe, enum fd_param_id param, uint64_t value)
165 {
166    return pipe->funcs->set_param(pipe, param, value);
167 }
168 
169 const struct fd_dev_id *
fd_pipe_dev_id(struct fd_pipe * pipe)170 fd_pipe_dev_id(struct fd_pipe *pipe)
171 {
172    return &pipe->dev_id;
173 }
174 
175 int
fd_pipe_wait(struct fd_pipe * pipe,const struct fd_fence * fence)176 fd_pipe_wait(struct fd_pipe *pipe, const struct fd_fence *fence)
177 {
178    return fd_pipe_wait_timeout(pipe, fence, ~0);
179 }
180 
181 int
fd_pipe_wait_timeout(struct fd_pipe * pipe,const struct fd_fence * fence,uint64_t timeout)182 fd_pipe_wait_timeout(struct fd_pipe *pipe, const struct fd_fence *fence,
183                      uint64_t timeout)
184 {
185    if (!fd_fence_after(fence->ufence, pipe->control->fence))
186       return 0;
187 
188    if (!timeout)
189       return -ETIMEDOUT;
190 
191    fd_pipe_flush(pipe, fence->ufence);
192 
193    return pipe->funcs->wait(pipe, fence, timeout);
194 }
195 
196 uint32_t
fd_pipe_emit_fence(struct fd_pipe * pipe,struct fd_ringbuffer * ring)197 fd_pipe_emit_fence(struct fd_pipe *pipe, struct fd_ringbuffer *ring)
198 {
199    uint32_t fence = ++pipe->last_fence;
200 
201    if (pipe->is_64bit) {
202       OUT_PKT7(ring, CP_EVENT_WRITE, 4);
203       OUT_RING(ring, CP_EVENT_WRITE_0_EVENT(CACHE_FLUSH_TS));
204       OUT_RELOC(ring, control_ptr(pipe, fence));   /* ADDR_LO/HI */
205       OUT_RING(ring, fence);
206    } else {
207       OUT_PKT3(ring, CP_EVENT_WRITE, 3);
208       OUT_RING(ring, CP_EVENT_WRITE_0_EVENT(CACHE_FLUSH_TS));
209       OUT_RELOC(ring, control_ptr(pipe, fence));   /* ADDR */
210       OUT_RING(ring, fence);
211    }
212 
213    return fence;
214 }
215 
216 struct fd_fence *
fd_fence_new(struct fd_pipe * pipe,bool use_fence_fd)217 fd_fence_new(struct fd_pipe *pipe, bool use_fence_fd)
218 {
219    struct fd_fence *f = calloc(1, sizeof(*f));
220 
221    f->refcnt = 1;
222    f->pipe = fd_pipe_ref(pipe);
223    util_queue_fence_init(&f->ready);
224    f->use_fence_fd = use_fence_fd;
225    f->fence_fd = -1;
226 
227    return f;
228 }
229 
230 struct fd_fence *
fd_fence_ref(struct fd_fence * f)231 fd_fence_ref(struct fd_fence *f)
232 {
233    simple_mtx_lock(&fence_lock);
234    fd_fence_ref_locked(f);
235    simple_mtx_unlock(&fence_lock);
236 
237    return f;
238 }
239 
240 struct fd_fence *
fd_fence_ref_locked(struct fd_fence * f)241 fd_fence_ref_locked(struct fd_fence *f)
242 {
243    simple_mtx_assert_locked(&fence_lock);
244    f->refcnt++;
245    return f;
246 }
247 
248 void
fd_fence_del(struct fd_fence * f)249 fd_fence_del(struct fd_fence *f)
250 {
251    simple_mtx_lock(&fence_lock);
252    fd_fence_del_locked(f);
253    simple_mtx_unlock(&fence_lock);
254 }
255 
256 void
fd_fence_del_locked(struct fd_fence * f)257 fd_fence_del_locked(struct fd_fence *f)
258 {
259    simple_mtx_assert_locked(&fence_lock);
260 
261    if (--f->refcnt)
262       return;
263 
264    fd_pipe_del_locked(f->pipe);
265 
266    if (f->use_fence_fd && (f->fence_fd != -1))
267       close(f->fence_fd);
268 
269    free(f);
270 }
271 
272 /**
273  * Wait until corresponding submit is flushed to kernel
274  */
275 void
fd_fence_flush(struct fd_fence * f)276 fd_fence_flush(struct fd_fence *f)
277 {
278    MESA_TRACE_FUNC();
279    /*
280     * TODO we could simplify this to remove the flush_sync part of
281     * fd_pipe_sp_flush() and just rely on the util_queue_fence_wait()
282     */
283    fd_pipe_flush(f->pipe, f->ufence);
284    util_queue_fence_wait(&f->ready);
285 }
286 
287 int
fd_fence_wait(struct fd_fence * f)288 fd_fence_wait(struct fd_fence *f)
289 {
290    MESA_TRACE_FUNC();
291    return fd_pipe_wait(f->pipe, f);
292 }
293