1 /*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright © 2019 Intel Corporation
5 */
6
7 #ifndef _I915_ACTIVE_H_
8 #define _I915_ACTIVE_H_
9
10 #include <linux/lockdep.h>
11
12 #include "i915_active_types.h"
13 #include "i915_request.h"
14
15 struct i915_request;
16 struct intel_engine_cs;
17 struct intel_timeline;
18
19 /*
20 * We treat requests as fences. This is not be to confused with our
21 * "fence registers" but pipeline synchronisation objects ala GL_ARB_sync.
22 * We use the fences to synchronize access from the CPU with activity on the
23 * GPU, for example, we should not rewrite an object's PTE whilst the GPU
24 * is reading them. We also track fences at a higher level to provide
25 * implicit synchronisation around GEM objects, e.g. set-domain will wait
26 * for outstanding GPU rendering before marking the object ready for CPU
27 * access, or a pageflip will wait until the GPU is complete before showing
28 * the frame on the scanout.
29 *
30 * In order to use a fence, the object must track the fence it needs to
31 * serialise with. For example, GEM objects want to track both read and
32 * write access so that we can perform concurrent read operations between
33 * the CPU and GPU engines, as well as waiting for all rendering to
34 * complete, or waiting for the last GPU user of a "fence register". The
35 * object then embeds a #i915_active_fence to track the most recent (in
36 * retirement order) request relevant for the desired mode of access.
37 * The #i915_active_fence is updated with i915_active_fence_set() to
38 * track the most recent fence request, typically this is done as part of
39 * i915_vma_move_to_active().
40 *
41 * When the #i915_active_fence completes (is retired), it will
42 * signal its completion to the owner through a callback as well as mark
43 * itself as idle (i915_active_fence.request == NULL). The owner
44 * can then perform any action, such as delayed freeing of an active
45 * resource including itself.
46 */
47
48 void i915_active_noop(struct dma_fence *fence, struct dma_fence_cb *cb);
49
50 /**
51 * __i915_active_fence_init - prepares the activity tracker for use
52 * @active - the active tracker
53 * @fence - initial fence to track, can be NULL
54 * @func - a callback when then the tracker is retired (becomes idle),
55 * can be NULL
56 *
57 * i915_active_fence_init() prepares the embedded @active struct for use as
58 * an activity tracker, that is for tracking the last known active fence
59 * associated with it. When the last fence becomes idle, when it is retired
60 * after completion, the optional callback @func is invoked.
61 */
62 static inline void
__i915_active_fence_init(struct i915_active_fence * active,void * fence,dma_fence_func_t fn)63 __i915_active_fence_init(struct i915_active_fence *active,
64 void *fence,
65 dma_fence_func_t fn)
66 {
67 RCU_INIT_POINTER(active->fence, fence);
68 active->cb.func = fn ?: i915_active_noop;
69 }
70
71 #define INIT_ACTIVE_FENCE(A) \
72 __i915_active_fence_init((A), NULL, NULL)
73
74 struct dma_fence *
75 __i915_active_fence_set(struct i915_active_fence *active,
76 struct dma_fence *fence);
77
78 /**
79 * i915_active_fence_set - updates the tracker to watch the current fence
80 * @active - the active tracker
81 * @rq - the request to watch
82 *
83 * i915_active_fence_set() watches the given @rq for completion. While
84 * that @rq is busy, the @active reports busy. When that @rq is signaled
85 * (or else retired) the @active tracker is updated to report idle.
86 */
87 int __must_check
88 i915_active_fence_set(struct i915_active_fence *active,
89 struct i915_request *rq);
90 /**
91 * i915_active_fence_get - return a reference to the active fence
92 * @active - the active tracker
93 *
94 * i915_active_fence_get() returns a reference to the active fence,
95 * or NULL if the active tracker is idle. The reference is obtained under RCU,
96 * so no locking is required by the caller.
97 *
98 * The reference should be freed with dma_fence_put().
99 */
100 static inline struct dma_fence *
i915_active_fence_get(struct i915_active_fence * active)101 i915_active_fence_get(struct i915_active_fence *active)
102 {
103 struct dma_fence *fence;
104
105 rcu_read_lock();
106 fence = dma_fence_get_rcu_safe(&active->fence);
107 rcu_read_unlock();
108
109 return fence;
110 }
111
112 /**
113 * i915_active_fence_isset - report whether the active tracker is assigned
114 * @active - the active tracker
115 *
116 * i915_active_fence_isset() returns true if the active tracker is currently
117 * assigned to a fence. Due to the lazy retiring, that fence may be idle
118 * and this may report stale information.
119 */
120 static inline bool
i915_active_fence_isset(const struct i915_active_fence * active)121 i915_active_fence_isset(const struct i915_active_fence *active)
122 {
123 return rcu_access_pointer(active->fence);
124 }
125
126 /*
127 * GPU activity tracking
128 *
129 * Each set of commands submitted to the GPU compromises a single request that
130 * signals a fence upon completion. struct i915_request combines the
131 * command submission, scheduling and fence signaling roles. If we want to see
132 * if a particular task is complete, we need to grab the fence (struct
133 * i915_request) for that task and check or wait for it to be signaled. More
134 * often though we want to track the status of a bunch of tasks, for example
135 * to wait for the GPU to finish accessing some memory across a variety of
136 * different command pipelines from different clients. We could choose to
137 * track every single request associated with the task, but knowing that
138 * each request belongs to an ordered timeline (later requests within a
139 * timeline must wait for earlier requests), we need only track the
140 * latest request in each timeline to determine the overall status of the
141 * task.
142 *
143 * struct i915_active provides this tracking across timelines. It builds a
144 * composite shared-fence, and is updated as new work is submitted to the task,
145 * forming a snapshot of the current status. It should be embedded into the
146 * different resources that need to track their associated GPU activity to
147 * provide a callback when that GPU activity has ceased, or otherwise to
148 * provide a serialisation point either for request submission or for CPU
149 * synchronisation.
150 */
151
152 void __i915_active_init(struct i915_active *ref,
153 int (*active)(struct i915_active *ref),
154 void (*retire)(struct i915_active *ref),
155 struct lock_class_key *mkey,
156 struct lock_class_key *wkey);
157
158 /* Specialise each class of i915_active to avoid impossible lockdep cycles. */
159 #define i915_active_init(ref, active, retire) do { \
160 static struct lock_class_key __mkey; \
161 static struct lock_class_key __wkey; \
162 \
163 __i915_active_init(ref, active, retire, &__mkey, &__wkey); \
164 } while (0)
165
166 struct dma_fence *
167 __i915_active_ref(struct i915_active *ref, u64 idx, struct dma_fence *fence);
168 int i915_active_ref(struct i915_active *ref, u64 idx, struct dma_fence *fence);
169
170 static inline int
i915_active_add_request(struct i915_active * ref,struct i915_request * rq)171 i915_active_add_request(struct i915_active *ref, struct i915_request *rq)
172 {
173 return i915_active_ref(ref,
174 i915_request_timeline(rq)->fence_context,
175 &rq->fence);
176 }
177
178 struct dma_fence *
179 i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f);
180
i915_active_has_exclusive(struct i915_active * ref)181 static inline bool i915_active_has_exclusive(struct i915_active *ref)
182 {
183 return rcu_access_pointer(ref->excl.fence);
184 }
185
186 int __i915_active_wait(struct i915_active *ref, int state);
i915_active_wait(struct i915_active * ref)187 static inline int i915_active_wait(struct i915_active *ref)
188 {
189 return __i915_active_wait(ref, TASK_INTERRUPTIBLE);
190 }
191
192 int i915_sw_fence_await_active(struct i915_sw_fence *fence,
193 struct i915_active *ref,
194 unsigned int flags);
195 int i915_request_await_active(struct i915_request *rq,
196 struct i915_active *ref,
197 unsigned int flags);
198 #define I915_ACTIVE_AWAIT_EXCL BIT(0)
199 #define I915_ACTIVE_AWAIT_ACTIVE BIT(1)
200 #define I915_ACTIVE_AWAIT_BARRIER BIT(2)
201
202 int i915_active_acquire(struct i915_active *ref);
203 int i915_active_acquire_for_context(struct i915_active *ref, u64 idx);
204 bool i915_active_acquire_if_busy(struct i915_active *ref);
205
206 void i915_active_release(struct i915_active *ref);
207
__i915_active_acquire(struct i915_active * ref)208 static inline void __i915_active_acquire(struct i915_active *ref)
209 {
210 GEM_BUG_ON(!atomic_read(&ref->count));
211 atomic_inc(&ref->count);
212 }
213
214 static inline bool
i915_active_is_idle(const struct i915_active * ref)215 i915_active_is_idle(const struct i915_active *ref)
216 {
217 return !atomic_read(&ref->count);
218 }
219
220 void i915_active_fini(struct i915_active *ref);
221
222 int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
223 struct intel_engine_cs *engine);
224 void i915_active_acquire_barrier(struct i915_active *ref);
225 void i915_request_add_active_barriers(struct i915_request *rq);
226
227 void i915_active_print(struct i915_active *ref, struct drm_printer *m);
228 void i915_active_unlock_wait(struct i915_active *ref);
229
230 struct i915_active *i915_active_create(void);
231 struct i915_active *i915_active_get(struct i915_active *ref);
232 void i915_active_put(struct i915_active *ref);
233
__i915_request_await_exclusive(struct i915_request * rq,struct i915_active * active)234 static inline int __i915_request_await_exclusive(struct i915_request *rq,
235 struct i915_active *active)
236 {
237 struct dma_fence *fence;
238 int err = 0;
239
240 fence = i915_active_fence_get(&active->excl);
241 if (fence) {
242 err = i915_request_await_dma_fence(rq, fence);
243 dma_fence_put(fence);
244 }
245
246 return err;
247 }
248
249 #endif /* _I915_ACTIVE_H_ */
250