Lines Matching defs:active
22 * they idle (when we know the active requests are inactive) and allocate the
40 node_from_active(struct i915_active_fence *active)
42 return container_of(active, struct active_node, base);
47 static inline bool is_barrier(const struct i915_active_fence *active)
49 return IS_ERR(rcu_access_pointer(active->fence));
83 return (void *)ref->active ?: (void *)ref->retire ?: (void *)ref;
209 __active_fence_slot(struct i915_active_fence *active)
211 return (struct dma_fence ** __force)&active->fence;
217 struct i915_active_fence *active =
218 container_of(cb, typeof(*active), cb);
220 return cmpxchg(__active_fence_slot(active), fence, NULL) == fence;
276 /* While active, the tree can only be built; not destroyed */
346 int (*active)(struct i915_active *ref),
356 ref->active = active;
424 replace_barrier(struct i915_active *ref, struct i915_active_fence *active)
426 if (!is_barrier(active)) /* proto-node used by our idle barrier? */
434 return __active_del_barrier(ref, node_from_active(active));
439 struct i915_active_fence *active;
448 active = active_instance(ref, idx);
449 if (!active) {
454 if (replace_barrier(ref, active)) {
455 RCU_INIT_POINTER(active->fence, NULL);
458 } while (unlikely(is_barrier(active)));
460 fence = __i915_active_fence_set(active, fence);
473 struct i915_active_fence *active,
478 if (replace_barrier(ref, active)) {
479 RCU_INIT_POINTER(active->fence, fence);
483 prev = __i915_active_fence_set(active, fence);
509 /* Only valid while active, see i915_active_acquire_for_context() */
541 if (!ref->active) {
551 err = ref->active(ref);
563 struct i915_active_fence *active;
570 active = active_instance(ref, idx);
571 if (!active) {
576 return 0; /* return with active ref */
585 static void enable_signaling(struct i915_active_fence *active)
589 if (unlikely(is_barrier(active)))
592 fence = i915_active_fence_get(active);
651 * After the wait is complete, the caller may free the active.
658 static int __await_active(struct i915_active_fence *active,
664 if (is_barrier(active)) /* XXX flush the barrier? */
667 fence = i915_active_fence_get(active);
816 * i915_active, due to overlapping active phases there is likely a
1046 * __i915_active_fence_set: Update the last active fence along its timeline
1047 * @active: the active tracker
1050 * Records the new @fence as the last active fence along its timeline in
1051 * this active tracker, moving the tracking callbacks from the previous
1059 __i915_active_fence_set(struct i915_active_fence *active,
1070 * while tracked under a different active tracker. Combined with i915
1075 * As a countermeasure, we try to get a reference to the active->fence
1080 prev = i915_active_fence_get(active);
1088 * C already resident as the active->fence.
1107 * active->fence. Meanwhile, B follows the same path as A.
1109 * active->fence, locks it as soon as A completes, and possibly
1112 while (cmpxchg(__active_fence_slot(active), prev, fence) != prev) {
1119 prev = i915_active_fence_get(active);
1140 __list_del_entry(&active->cb.node);
1143 list_add_tail(&active->cb.node, &fence->cb_list);
1149 int i915_active_fence_set(struct i915_active_fence *active,
1155 /* Must maintain timeline ordering wrt previous active requests */
1156 fence = __i915_active_fence_set(active, &rq->fence);