1 /*
2 * Copyright (C) 2007 Ben Skeggs.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27 #include <linux/ktime.h>
28 #include <linux/hrtimer.h>
29 #include <linux/sched/signal.h>
30 #include <trace/events/dma_fence.h>
31
32 #include <nvif/cl826e.h>
33 #include <nvif/notify.h>
34 #include <nvif/event.h>
35
36 #include "nouveau_drv.h"
37 #include "nouveau_dma.h"
38 #include "nouveau_fence.h"
39
40 static const struct dma_fence_ops nouveau_fence_ops_uevent;
41 static const struct dma_fence_ops nouveau_fence_ops_legacy;
42
43 static inline struct nouveau_fence *
from_fence(struct dma_fence * fence)44 from_fence(struct dma_fence *fence)
45 {
46 return container_of(fence, struct nouveau_fence, base);
47 }
48
49 static inline struct nouveau_fence_chan *
nouveau_fctx(struct nouveau_fence * fence)50 nouveau_fctx(struct nouveau_fence *fence)
51 {
52 return container_of(fence->base.lock, struct nouveau_fence_chan, lock);
53 }
54
55 static int
nouveau_fence_signal(struct nouveau_fence * fence)56 nouveau_fence_signal(struct nouveau_fence *fence)
57 {
58 int drop = 0;
59
60 dma_fence_signal_locked(&fence->base);
61 list_del(&fence->head);
62 rcu_assign_pointer(fence->channel, NULL);
63
64 if (test_bit(DMA_FENCE_FLAG_USER_BITS, &fence->base.flags)) {
65 struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
66
67 if (!--fctx->notify_ref)
68 drop = 1;
69 }
70
71 dma_fence_put(&fence->base);
72 return drop;
73 }
74
75 static struct nouveau_fence *
nouveau_local_fence(struct dma_fence * fence,struct nouveau_drm * drm)76 nouveau_local_fence(struct dma_fence *fence, struct nouveau_drm *drm)
77 {
78 if (fence->ops != &nouveau_fence_ops_legacy &&
79 fence->ops != &nouveau_fence_ops_uevent)
80 return NULL;
81
82 if (fence->context < drm->chan.context_base ||
83 fence->context >= drm->chan.context_base + drm->chan.nr)
84 return NULL;
85
86 return from_fence(fence);
87 }
88
89 void
nouveau_fence_context_kill(struct nouveau_fence_chan * fctx,int error)90 nouveau_fence_context_kill(struct nouveau_fence_chan *fctx, int error)
91 {
92 struct nouveau_fence *fence;
93
94 spin_lock_irq(&fctx->lock);
95 while (!list_empty(&fctx->pending)) {
96 fence = list_entry(fctx->pending.next, typeof(*fence), head);
97
98 if (error)
99 dma_fence_set_error(&fence->base, error);
100
101 if (nouveau_fence_signal(fence))
102 nvif_notify_put(&fctx->notify);
103 }
104 spin_unlock_irq(&fctx->lock);
105 }
106
107 void
nouveau_fence_context_del(struct nouveau_fence_chan * fctx)108 nouveau_fence_context_del(struct nouveau_fence_chan *fctx)
109 {
110 nouveau_fence_context_kill(fctx, 0);
111 nvif_notify_dtor(&fctx->notify);
112 fctx->dead = 1;
113
114 /*
115 * Ensure that all accesses to fence->channel complete before freeing
116 * the channel.
117 */
118 synchronize_rcu();
119 }
120
121 static void
nouveau_fence_context_put(struct kref * fence_ref)122 nouveau_fence_context_put(struct kref *fence_ref)
123 {
124 kfree(container_of(fence_ref, struct nouveau_fence_chan, fence_ref));
125 }
126
127 void
nouveau_fence_context_free(struct nouveau_fence_chan * fctx)128 nouveau_fence_context_free(struct nouveau_fence_chan *fctx)
129 {
130 kref_put(&fctx->fence_ref, nouveau_fence_context_put);
131 }
132
133 static int
nouveau_fence_update(struct nouveau_channel * chan,struct nouveau_fence_chan * fctx)134 nouveau_fence_update(struct nouveau_channel *chan, struct nouveau_fence_chan *fctx)
135 {
136 struct nouveau_fence *fence;
137 int drop = 0;
138 u32 seq = fctx->read(chan);
139
140 while (!list_empty(&fctx->pending)) {
141 fence = list_entry(fctx->pending.next, typeof(*fence), head);
142
143 if ((int)(seq - fence->base.seqno) < 0)
144 break;
145
146 drop |= nouveau_fence_signal(fence);
147 }
148
149 return drop;
150 }
151
152 static int
nouveau_fence_wait_uevent_handler(struct nvif_notify * notify)153 nouveau_fence_wait_uevent_handler(struct nvif_notify *notify)
154 {
155 struct nouveau_fence_chan *fctx =
156 container_of(notify, typeof(*fctx), notify);
157 unsigned long flags;
158 int ret = NVIF_NOTIFY_KEEP;
159
160 spin_lock_irqsave(&fctx->lock, flags);
161 if (!list_empty(&fctx->pending)) {
162 struct nouveau_fence *fence;
163 struct nouveau_channel *chan;
164
165 fence = list_entry(fctx->pending.next, typeof(*fence), head);
166 chan = rcu_dereference_protected(fence->channel, lockdep_is_held(&fctx->lock));
167 if (nouveau_fence_update(chan, fctx))
168 ret = NVIF_NOTIFY_DROP;
169 }
170 spin_unlock_irqrestore(&fctx->lock, flags);
171
172 return ret;
173 }
174
175 void
nouveau_fence_context_new(struct nouveau_channel * chan,struct nouveau_fence_chan * fctx)176 nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_chan *fctx)
177 {
178 struct nouveau_fence_priv *priv = (void*)chan->drm->fence;
179 struct nouveau_cli *cli = (void *)chan->user.client;
180 int ret;
181
182 INIT_LIST_HEAD(&fctx->flip);
183 INIT_LIST_HEAD(&fctx->pending);
184 spin_lock_init(&fctx->lock);
185 fctx->context = chan->drm->chan.context_base + chan->chid;
186
187 if (chan == chan->drm->cechan)
188 strcpy(fctx->name, "copy engine channel");
189 else if (chan == chan->drm->channel)
190 strcpy(fctx->name, "generic kernel channel");
191 else
192 strcpy(fctx->name, nvxx_client(&cli->base)->name);
193
194 kref_init(&fctx->fence_ref);
195 if (!priv->uevent)
196 return;
197
198 ret = nvif_notify_ctor(&chan->user, "fenceNonStallIntr",
199 nouveau_fence_wait_uevent_handler,
200 false, NV826E_V0_NTFY_NON_STALL_INTERRUPT,
201 &(struct nvif_notify_uevent_req) { },
202 sizeof(struct nvif_notify_uevent_req),
203 sizeof(struct nvif_notify_uevent_rep),
204 &fctx->notify);
205
206 WARN_ON(ret);
207 }
208
209 int
nouveau_fence_emit(struct nouveau_fence * fence,struct nouveau_channel * chan)210 nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
211 {
212 struct nouveau_fence_chan *fctx = chan->fence;
213 struct nouveau_fence_priv *priv = (void*)chan->drm->fence;
214 int ret;
215
216 fence->channel = chan;
217 fence->timeout = jiffies + (15 * HZ);
218
219 if (priv->uevent)
220 dma_fence_init(&fence->base, &nouveau_fence_ops_uevent,
221 &fctx->lock, fctx->context, ++fctx->sequence);
222 else
223 dma_fence_init(&fence->base, &nouveau_fence_ops_legacy,
224 &fctx->lock, fctx->context, ++fctx->sequence);
225 kref_get(&fctx->fence_ref);
226
227 trace_dma_fence_emit(&fence->base);
228 ret = fctx->emit(fence);
229 if (!ret) {
230 dma_fence_get(&fence->base);
231 spin_lock_irq(&fctx->lock);
232
233 if (nouveau_fence_update(chan, fctx))
234 nvif_notify_put(&fctx->notify);
235
236 list_add_tail(&fence->head, &fctx->pending);
237 spin_unlock_irq(&fctx->lock);
238 }
239
240 return ret;
241 }
242
243 bool
nouveau_fence_done(struct nouveau_fence * fence)244 nouveau_fence_done(struct nouveau_fence *fence)
245 {
246 if (fence->base.ops == &nouveau_fence_ops_legacy ||
247 fence->base.ops == &nouveau_fence_ops_uevent) {
248 struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
249 struct nouveau_channel *chan;
250 unsigned long flags;
251
252 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
253 return true;
254
255 spin_lock_irqsave(&fctx->lock, flags);
256 chan = rcu_dereference_protected(fence->channel, lockdep_is_held(&fctx->lock));
257 if (chan && nouveau_fence_update(chan, fctx))
258 nvif_notify_put(&fctx->notify);
259 spin_unlock_irqrestore(&fctx->lock, flags);
260 }
261 return dma_fence_is_signaled(&fence->base);
262 }
263
264 static long
nouveau_fence_wait_legacy(struct dma_fence * f,bool intr,long wait)265 nouveau_fence_wait_legacy(struct dma_fence *f, bool intr, long wait)
266 {
267 struct nouveau_fence *fence = from_fence(f);
268 unsigned long sleep_time = NSEC_PER_MSEC / 1000;
269 unsigned long t = jiffies, timeout = t + wait;
270
271 while (!nouveau_fence_done(fence)) {
272 ktime_t kt;
273
274 t = jiffies;
275
276 if (wait != MAX_SCHEDULE_TIMEOUT && time_after_eq(t, timeout)) {
277 __set_current_state(TASK_RUNNING);
278 return 0;
279 }
280
281 __set_current_state(intr ? TASK_INTERRUPTIBLE :
282 TASK_UNINTERRUPTIBLE);
283
284 kt = sleep_time;
285 schedule_hrtimeout(&kt, HRTIMER_MODE_REL);
286 sleep_time *= 2;
287 if (sleep_time > NSEC_PER_MSEC)
288 sleep_time = NSEC_PER_MSEC;
289
290 if (intr && signal_pending(current))
291 return -ERESTARTSYS;
292 }
293
294 __set_current_state(TASK_RUNNING);
295
296 return timeout - t;
297 }
298
299 static int
nouveau_fence_wait_busy(struct nouveau_fence * fence,bool intr)300 nouveau_fence_wait_busy(struct nouveau_fence *fence, bool intr)
301 {
302 int ret = 0;
303
304 while (!nouveau_fence_done(fence)) {
305 if (time_after_eq(jiffies, fence->timeout)) {
306 ret = -EBUSY;
307 break;
308 }
309
310 __set_current_state(intr ?
311 TASK_INTERRUPTIBLE :
312 TASK_UNINTERRUPTIBLE);
313
314 if (intr && signal_pending(current)) {
315 ret = -ERESTARTSYS;
316 break;
317 }
318 }
319
320 __set_current_state(TASK_RUNNING);
321 return ret;
322 }
323
324 int
nouveau_fence_wait(struct nouveau_fence * fence,bool lazy,bool intr)325 nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr)
326 {
327 long ret;
328
329 if (!lazy)
330 return nouveau_fence_wait_busy(fence, intr);
331
332 ret = dma_fence_wait_timeout(&fence->base, intr, 15 * HZ);
333 if (ret < 0)
334 return ret;
335 else if (!ret)
336 return -EBUSY;
337 else
338 return 0;
339 }
340
341 int
nouveau_fence_sync(struct nouveau_bo * nvbo,struct nouveau_channel * chan,bool exclusive,bool intr)342 nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool exclusive, bool intr)
343 {
344 struct nouveau_fence_chan *fctx = chan->fence;
345 struct dma_fence *fence;
346 struct dma_resv *resv = nvbo->bo.base.resv;
347 struct dma_resv_list *fobj;
348 struct nouveau_fence *f;
349 int ret = 0, i;
350
351 if (!exclusive) {
352 ret = dma_resv_reserve_shared(resv, 1);
353
354 if (ret)
355 return ret;
356 }
357
358 fobj = dma_resv_get_list(resv);
359 fence = dma_resv_get_excl(resv);
360
361 if (fence && (!exclusive || !fobj || !fobj->shared_count)) {
362 struct nouveau_channel *prev = NULL;
363 bool must_wait = true;
364
365 f = nouveau_local_fence(fence, chan->drm);
366 if (f) {
367 rcu_read_lock();
368 prev = rcu_dereference(f->channel);
369 if (prev && (prev == chan || fctx->sync(f, prev, chan) == 0))
370 must_wait = false;
371 rcu_read_unlock();
372 }
373
374 if (must_wait)
375 ret = dma_fence_wait(fence, intr);
376
377 return ret;
378 }
379
380 if (!exclusive || !fobj)
381 return ret;
382
383 for (i = 0; i < fobj->shared_count && !ret; ++i) {
384 struct nouveau_channel *prev = NULL;
385 bool must_wait = true;
386
387 fence = rcu_dereference_protected(fobj->shared[i],
388 dma_resv_held(resv));
389
390 f = nouveau_local_fence(fence, chan->drm);
391 if (f) {
392 rcu_read_lock();
393 prev = rcu_dereference(f->channel);
394 if (prev && (prev == chan || fctx->sync(f, prev, chan) == 0))
395 must_wait = false;
396 rcu_read_unlock();
397 }
398
399 if (must_wait)
400 ret = dma_fence_wait(fence, intr);
401 }
402
403 return ret;
404 }
405
406 void
nouveau_fence_unref(struct nouveau_fence ** pfence)407 nouveau_fence_unref(struct nouveau_fence **pfence)
408 {
409 if (*pfence)
410 dma_fence_put(&(*pfence)->base);
411 *pfence = NULL;
412 }
413
414 int
nouveau_fence_new(struct nouveau_channel * chan,bool sysmem,struct nouveau_fence ** pfence)415 nouveau_fence_new(struct nouveau_channel *chan, bool sysmem,
416 struct nouveau_fence **pfence)
417 {
418 struct nouveau_fence *fence;
419 int ret = 0;
420
421 if (unlikely(!chan->fence))
422 return -ENODEV;
423
424 fence = kzalloc(sizeof(*fence), GFP_KERNEL);
425 if (!fence)
426 return -ENOMEM;
427
428 ret = nouveau_fence_emit(fence, chan);
429 if (ret)
430 nouveau_fence_unref(&fence);
431
432 *pfence = fence;
433 return ret;
434 }
435
nouveau_fence_get_get_driver_name(struct dma_fence * fence)436 static const char *nouveau_fence_get_get_driver_name(struct dma_fence *fence)
437 {
438 return "nouveau";
439 }
440
nouveau_fence_get_timeline_name(struct dma_fence * f)441 static const char *nouveau_fence_get_timeline_name(struct dma_fence *f)
442 {
443 struct nouveau_fence *fence = from_fence(f);
444 struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
445
446 return !fctx->dead ? fctx->name : "dead channel";
447 }
448
449 /*
450 * In an ideal world, read would not assume the channel context is still alive.
451 * This function may be called from another device, running into free memory as a
452 * result. The drm node should still be there, so we can derive the index from
453 * the fence context.
454 */
nouveau_fence_is_signaled(struct dma_fence * f)455 static bool nouveau_fence_is_signaled(struct dma_fence *f)
456 {
457 struct nouveau_fence *fence = from_fence(f);
458 struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
459 struct nouveau_channel *chan;
460 bool ret = false;
461
462 rcu_read_lock();
463 chan = rcu_dereference(fence->channel);
464 if (chan)
465 ret = (int)(fctx->read(chan) - fence->base.seqno) >= 0;
466 rcu_read_unlock();
467
468 return ret;
469 }
470
nouveau_fence_no_signaling(struct dma_fence * f)471 static bool nouveau_fence_no_signaling(struct dma_fence *f)
472 {
473 struct nouveau_fence *fence = from_fence(f);
474
475 /*
476 * caller should have a reference on the fence,
477 * else fence could get freed here
478 */
479 WARN_ON(kref_read(&fence->base.refcount) <= 1);
480
481 /*
482 * This needs uevents to work correctly, but dma_fence_add_callback relies on
483 * being able to enable signaling. It will still get signaled eventually,
484 * just not right away.
485 */
486 if (nouveau_fence_is_signaled(f)) {
487 list_del(&fence->head);
488
489 dma_fence_put(&fence->base);
490 return false;
491 }
492
493 return true;
494 }
495
nouveau_fence_release(struct dma_fence * f)496 static void nouveau_fence_release(struct dma_fence *f)
497 {
498 struct nouveau_fence *fence = from_fence(f);
499 struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
500
501 kref_put(&fctx->fence_ref, nouveau_fence_context_put);
502 dma_fence_free(&fence->base);
503 }
504
505 static const struct dma_fence_ops nouveau_fence_ops_legacy = {
506 .get_driver_name = nouveau_fence_get_get_driver_name,
507 .get_timeline_name = nouveau_fence_get_timeline_name,
508 .enable_signaling = nouveau_fence_no_signaling,
509 .signaled = nouveau_fence_is_signaled,
510 .wait = nouveau_fence_wait_legacy,
511 .release = nouveau_fence_release
512 };
513
nouveau_fence_enable_signaling(struct dma_fence * f)514 static bool nouveau_fence_enable_signaling(struct dma_fence *f)
515 {
516 struct nouveau_fence *fence = from_fence(f);
517 struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
518 bool ret;
519
520 if (!fctx->notify_ref++)
521 nvif_notify_get(&fctx->notify);
522
523 ret = nouveau_fence_no_signaling(f);
524 if (ret)
525 set_bit(DMA_FENCE_FLAG_USER_BITS, &fence->base.flags);
526 else if (!--fctx->notify_ref)
527 nvif_notify_put(&fctx->notify);
528
529 return ret;
530 }
531
532 static const struct dma_fence_ops nouveau_fence_ops_uevent = {
533 .get_driver_name = nouveau_fence_get_get_driver_name,
534 .get_timeline_name = nouveau_fence_get_timeline_name,
535 .enable_signaling = nouveau_fence_enable_signaling,
536 .signaled = nouveau_fence_is_signaled,
537 .release = nouveau_fence_release
538 };
539