1 /*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright © 2019 Intel Corporation
5 */
6
7 #ifndef __INTEL_CONTEXT_H__
8 #define __INTEL_CONTEXT_H__
9
10 #include <linux/bitops.h>
11 #include <linux/lockdep.h>
12 #include <linux/types.h>
13
14 #include "i915_active.h"
15 #include "i915_drv.h"
16 #include "intel_context_types.h"
17 #include "intel_engine_types.h"
18 #include "intel_ring_types.h"
19 #include "intel_timeline_types.h"
20
21 #define CE_TRACE(ce, fmt, ...) do { \
22 const struct intel_context *ce__ = (ce); \
23 ENGINE_TRACE(ce__->engine, "context:%llx " fmt, \
24 ce__->timeline->fence_context, \
25 ##__VA_ARGS__); \
26 } while (0)
27
28 struct i915_gem_ww_ctx;
29
30 void intel_context_init(struct intel_context *ce,
31 struct intel_engine_cs *engine);
32 void intel_context_fini(struct intel_context *ce);
33
34 struct intel_context *
35 intel_context_create(struct intel_engine_cs *engine);
36
37 int intel_context_alloc_state(struct intel_context *ce);
38
39 void intel_context_free(struct intel_context *ce);
40
41 int intel_context_reconfigure_sseu(struct intel_context *ce,
42 const struct intel_sseu sseu);
43
44 /**
45 * intel_context_lock_pinned - Stablises the 'pinned' status of the HW context
46 * @ce - the context
47 *
48 * Acquire a lock on the pinned status of the HW context, such that the context
49 * can neither be bound to the GPU or unbound whilst the lock is held, i.e.
50 * intel_context_is_pinned() remains stable.
51 */
intel_context_lock_pinned(struct intel_context * ce)52 static inline int intel_context_lock_pinned(struct intel_context *ce)
53 __acquires(ce->pin_mutex)
54 {
55 return mutex_lock_interruptible(&ce->pin_mutex);
56 }
57
58 /**
59 * intel_context_is_pinned - Reports the 'pinned' status
60 * @ce - the context
61 *
62 * While in use by the GPU, the context, along with its ring and page
63 * tables is pinned into memory and the GTT.
64 *
65 * Returns: true if the context is currently pinned for use by the GPU.
66 */
67 static inline bool
intel_context_is_pinned(struct intel_context * ce)68 intel_context_is_pinned(struct intel_context *ce)
69 {
70 return atomic_read(&ce->pin_count);
71 }
72
73 /**
74 * intel_context_unlock_pinned - Releases the earlier locking of 'pinned' status
75 * @ce - the context
76 *
77 * Releases the lock earlier acquired by intel_context_unlock_pinned().
78 */
intel_context_unlock_pinned(struct intel_context * ce)79 static inline void intel_context_unlock_pinned(struct intel_context *ce)
80 __releases(ce->pin_mutex)
81 {
82 mutex_unlock(&ce->pin_mutex);
83 }
84
85 int __intel_context_do_pin(struct intel_context *ce);
86 int __intel_context_do_pin_ww(struct intel_context *ce,
87 struct i915_gem_ww_ctx *ww);
88
intel_context_pin_if_active(struct intel_context * ce)89 static inline bool intel_context_pin_if_active(struct intel_context *ce)
90 {
91 return atomic_inc_not_zero(&ce->pin_count);
92 }
93
intel_context_pin(struct intel_context * ce)94 static inline int intel_context_pin(struct intel_context *ce)
95 {
96 if (likely(intel_context_pin_if_active(ce)))
97 return 0;
98
99 return __intel_context_do_pin(ce);
100 }
101
intel_context_pin_ww(struct intel_context * ce,struct i915_gem_ww_ctx * ww)102 static inline int intel_context_pin_ww(struct intel_context *ce,
103 struct i915_gem_ww_ctx *ww)
104 {
105 if (likely(intel_context_pin_if_active(ce)))
106 return 0;
107
108 return __intel_context_do_pin_ww(ce, ww);
109 }
110
__intel_context_pin(struct intel_context * ce)111 static inline void __intel_context_pin(struct intel_context *ce)
112 {
113 GEM_BUG_ON(!intel_context_is_pinned(ce));
114 atomic_inc(&ce->pin_count);
115 }
116
117 void intel_context_unpin(struct intel_context *ce);
118
119 void intel_context_enter_engine(struct intel_context *ce);
120 void intel_context_exit_engine(struct intel_context *ce);
121
intel_context_enter(struct intel_context * ce)122 static inline void intel_context_enter(struct intel_context *ce)
123 {
124 lockdep_assert_held(&ce->timeline->mutex);
125 if (!ce->active_count++)
126 ce->ops->enter(ce);
127 }
128
intel_context_mark_active(struct intel_context * ce)129 static inline void intel_context_mark_active(struct intel_context *ce)
130 {
131 lockdep_assert_held(&ce->timeline->mutex);
132 ++ce->active_count;
133 }
134
intel_context_exit(struct intel_context * ce)135 static inline void intel_context_exit(struct intel_context *ce)
136 {
137 lockdep_assert_held(&ce->timeline->mutex);
138 GEM_BUG_ON(!ce->active_count);
139 if (!--ce->active_count)
140 ce->ops->exit(ce);
141 }
142
intel_context_get(struct intel_context * ce)143 static inline struct intel_context *intel_context_get(struct intel_context *ce)
144 {
145 kref_get(&ce->ref);
146 return ce;
147 }
148
intel_context_put(struct intel_context * ce)149 static inline void intel_context_put(struct intel_context *ce)
150 {
151 kref_put(&ce->ref, ce->ops->destroy);
152 }
153
154 static inline struct intel_timeline *__must_check
intel_context_timeline_lock(struct intel_context * ce)155 intel_context_timeline_lock(struct intel_context *ce)
156 __acquires(&ce->timeline->mutex)
157 {
158 struct intel_timeline *tl = ce->timeline;
159 int err;
160
161 err = mutex_lock_interruptible(&tl->mutex);
162 if (err)
163 return ERR_PTR(err);
164
165 return tl;
166 }
167
intel_context_timeline_unlock(struct intel_timeline * tl)168 static inline void intel_context_timeline_unlock(struct intel_timeline *tl)
169 __releases(&tl->mutex)
170 {
171 mutex_unlock(&tl->mutex);
172 }
173
174 int intel_context_prepare_remote_request(struct intel_context *ce,
175 struct i915_request *rq);
176
177 struct i915_request *intel_context_create_request(struct intel_context *ce);
178
__intel_context_ring_size(u64 sz)179 static inline struct intel_ring *__intel_context_ring_size(u64 sz)
180 {
181 return u64_to_ptr(struct intel_ring, sz);
182 }
183
intel_context_is_barrier(const struct intel_context * ce)184 static inline bool intel_context_is_barrier(const struct intel_context *ce)
185 {
186 return test_bit(CONTEXT_BARRIER_BIT, &ce->flags);
187 }
188
intel_context_is_closed(const struct intel_context * ce)189 static inline bool intel_context_is_closed(const struct intel_context *ce)
190 {
191 return test_bit(CONTEXT_CLOSED_BIT, &ce->flags);
192 }
193
intel_context_use_semaphores(const struct intel_context * ce)194 static inline bool intel_context_use_semaphores(const struct intel_context *ce)
195 {
196 return test_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
197 }
198
intel_context_set_use_semaphores(struct intel_context * ce)199 static inline void intel_context_set_use_semaphores(struct intel_context *ce)
200 {
201 set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
202 }
203
intel_context_clear_use_semaphores(struct intel_context * ce)204 static inline void intel_context_clear_use_semaphores(struct intel_context *ce)
205 {
206 clear_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
207 }
208
intel_context_is_banned(const struct intel_context * ce)209 static inline bool intel_context_is_banned(const struct intel_context *ce)
210 {
211 return test_bit(CONTEXT_BANNED, &ce->flags);
212 }
213
intel_context_set_banned(struct intel_context * ce)214 static inline bool intel_context_set_banned(struct intel_context *ce)
215 {
216 return test_and_set_bit(CONTEXT_BANNED, &ce->flags);
217 }
218
219 static inline bool
intel_context_force_single_submission(const struct intel_context * ce)220 intel_context_force_single_submission(const struct intel_context *ce)
221 {
222 return test_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ce->flags);
223 }
224
225 static inline void
intel_context_set_single_submission(struct intel_context * ce)226 intel_context_set_single_submission(struct intel_context *ce)
227 {
228 __set_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ce->flags);
229 }
230
231 static inline bool
intel_context_nopreempt(const struct intel_context * ce)232 intel_context_nopreempt(const struct intel_context *ce)
233 {
234 return test_bit(CONTEXT_NOPREEMPT, &ce->flags);
235 }
236
237 static inline void
intel_context_set_nopreempt(struct intel_context * ce)238 intel_context_set_nopreempt(struct intel_context *ce)
239 {
240 set_bit(CONTEXT_NOPREEMPT, &ce->flags);
241 }
242
243 static inline void
intel_context_clear_nopreempt(struct intel_context * ce)244 intel_context_clear_nopreempt(struct intel_context *ce)
245 {
246 clear_bit(CONTEXT_NOPREEMPT, &ce->flags);
247 }
248
intel_context_get_total_runtime_ns(struct intel_context * ce)249 static inline u64 intel_context_get_total_runtime_ns(struct intel_context *ce)
250 {
251 const u32 period =
252 RUNTIME_INFO(ce->engine->i915)->cs_timestamp_period_ns;
253
254 return READ_ONCE(ce->runtime.total) * period;
255 }
256
intel_context_get_avg_runtime_ns(struct intel_context * ce)257 static inline u64 intel_context_get_avg_runtime_ns(struct intel_context *ce)
258 {
259 const u32 period =
260 RUNTIME_INFO(ce->engine->i915)->cs_timestamp_period_ns;
261
262 return mul_u32_u32(ewma_runtime_read(&ce->runtime.avg), period);
263 }
264
265 #endif /* __INTEL_CONTEXT_H__ */
266