Lines Matching defs:active
21 * they idle (when we know the active requests are inactive) and allocate the
36 node_from_active(struct i915_active_fence *active)
38 return container_of(active, struct active_node, base);
43 static inline bool is_barrier(const struct i915_active_fence *active)
45 return IS_ERR(rcu_access_pointer(active->fence));
79 return (void *)ref->active ?: (void *)ref->retire ?: (void *)ref;
204 __active_fence_slot(struct i915_active_fence *active)
206 return (struct dma_fence ** __force)&active->fence;
212 struct i915_active_fence *active =
213 container_of(cb, typeof(*active), cb);
215 return cmpxchg(__active_fence_slot(active), fence, NULL) == fence;
269 /* While active, the tree can only be built; not destroyed */
339 int (*active)(struct i915_active *ref),
348 ref->active = active;
414 replace_barrier(struct i915_active *ref, struct i915_active_fence *active)
416 if (!is_barrier(active)) /* proto-node used by our idle barrier? */
424 return __active_del_barrier(ref, node_from_active(active));
431 struct i915_active_fence *active;
440 active = active_instance(ref, idx);
441 if (!active) {
446 if (replace_barrier(ref, active)) {
447 RCU_INIT_POINTER(active->fence, NULL);
450 } while (unlikely(is_barrier(active)));
452 fence = __i915_active_fence_set(active, fence);
465 struct i915_active_fence *active,
470 if (replace_barrier(ref, active)) {
471 RCU_INIT_POINTER(active->fence, fence);
475 prev = __i915_active_fence_set(active, fence);
510 if (!ref->active) {
520 err = ref->active(ref);
532 struct i915_active_fence *active;
539 active = active_instance(ref, idx);
540 if (!active) {
545 return 0; /* return with active ref */
554 static void enable_signaling(struct i915_active_fence *active)
558 if (unlikely(is_barrier(active)))
561 fence = i915_active_fence_get(active);
620 * After the wait is complete, the caller may free the active.
627 static int __await_active(struct i915_active_fence *active,
633 if (is_barrier(active)) /* XXX flush the barrier? */
636 fence = i915_active_fence_get(active);
785 * i915_active, due to overlapping active phases there is likely a
1015 * __i915_active_fence_set: Update the last active fence along its timeline
1016 * @active: the active tracker
1019 * Records the new @fence as the last active fence along its timeline in
1020 * this active tracker, moving the tracking callbacks from the previous
1028 __i915_active_fence_set(struct i915_active_fence *active,
1039 * while tracked under a different active tracker. Combined with i915
1044 * As a countermeasure, we try to get a reference to the active->fence
1049 prev = i915_active_fence_get(active);
1057 * C already resident as the active->fence.
1076 * active->fence. Meanwhile, B follows the same path as A.
1078 * active->fence, locks it as soon as A completes, and possibly
1081 while (cmpxchg(__active_fence_slot(active), prev, fence) != prev) {
1088 prev = i915_active_fence_get(active);
1109 __list_del_entry(&active->cb.node);
1112 list_add_tail(&active->cb.node, &fence->cb_list);
1118 int i915_active_fence_set(struct i915_active_fence *active,
1124 /* Must maintain timeline ordering wrt previous active requests */
1125 fence = __i915_active_fence_set(active, &rq->fence);