1 /*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25 #ifndef __I915_GEM_CONTEXT_H__
26 #define __I915_GEM_CONTEXT_H__
27
28 #include <linux/bitops.h>
29 #include <linux/list.h>
30 #include <linux/radix-tree.h>
31
32 #include "i915_gem.h"
33 #include "i915_scheduler.h"
34
35 struct pid;
36
37 struct drm_device;
38 struct drm_file;
39
40 struct drm_i915_private;
41 struct drm_i915_file_private;
42 struct i915_hw_ppgtt;
43 struct i915_request;
44 struct i915_vma;
45 struct intel_ring;
46
47 #define DEFAULT_CONTEXT_HANDLE 0
48
49 struct intel_context;
50
51 struct intel_context_ops {
52 void (*unpin)(struct intel_context *ce);
53 void (*destroy)(struct intel_context *ce);
54 };
55
56 /**
57 * struct i915_gem_context - client state
58 *
59 * The struct i915_gem_context represents the combined view of the driver and
60 * logical hardware state for a particular client.
61 */
62 struct i915_gem_context {
63 /** i915: i915 device backpointer */
64 struct drm_i915_private *i915;
65
66 /** file_priv: owning file descriptor */
67 struct drm_i915_file_private *file_priv;
68
69 /**
70 * @ppgtt: unique address space (GTT)
71 *
72 * In full-ppgtt mode, each context has its own address space ensuring
73 * complete seperation of one client from all others.
74 *
75 * In other modes, this is a NULL pointer with the expectation that
76 * the caller uses the shared global GTT.
77 */
78 struct i915_hw_ppgtt *ppgtt;
79
80 /**
81 * @pid: process id of creator
82 *
83 * Note that who created the context may not be the principle user,
84 * as the context may be shared across a local socket. However,
85 * that should only affect the default context, all contexts created
86 * explicitly by the client are expected to be isolated.
87 */
88 struct pid *pid;
89
90 /**
91 * @name: arbitrary name
92 *
93 * A name is constructed for the context from the creator's process
94 * name, pid and user handle in order to uniquely identify the
95 * context in messages.
96 */
97 const char *name;
98
99 /** link: place with &drm_i915_private.context_list */
100 struct list_head link;
101 struct llist_node free_link;
102
103 /**
104 * @ref: reference count
105 *
106 * A reference to a context is held by both the client who created it
107 * and on each request submitted to the hardware using the request
108 * (to ensure the hardware has access to the state until it has
109 * finished all pending writes). See i915_gem_context_get() and
110 * i915_gem_context_put() for access.
111 */
112 struct kref ref;
113
114 /**
115 * @rcu: rcu_head for deferred freeing.
116 */
117 struct rcu_head rcu;
118
119 /**
120 * @flags: small set of booleans
121 */
122 unsigned long flags;
123 #define CONTEXT_NO_ZEROMAP BIT(0)
124 #define CONTEXT_NO_ERROR_CAPTURE 1
125 #define CONTEXT_CLOSED 2
126 #define CONTEXT_BANNABLE 3
127 #define CONTEXT_BANNED 4
128 #define CONTEXT_FORCE_SINGLE_SUBMISSION 5
129
130 /**
131 * @hw_id: - unique identifier for the context
132 *
133 * The hardware needs to uniquely identify the context for a few
134 * functions like fault reporting, PASID, scheduling. The
135 * &drm_i915_private.context_hw_ida is used to assign a unqiue
136 * id for the lifetime of the context.
137 */
138 unsigned int hw_id;
139
140 /**
141 * @user_handle: userspace identifier
142 *
143 * A unique per-file identifier is generated from
144 * &drm_i915_file_private.contexts.
145 */
146 u32 user_handle;
147
148 struct i915_sched_attr sched;
149
150 /** ggtt_offset_bias: placement restriction for context objects */
151 u32 ggtt_offset_bias;
152
153 /** engine: per-engine logical HW state */
154 struct intel_context {
155 struct i915_gem_context *gem_context;
156 struct i915_vma *state;
157 struct intel_ring *ring;
158 u32 *lrc_reg_state;
159 u64 lrc_desc;
160 int pin_count;
161
162 const struct intel_context_ops *ops;
163 } __engine[I915_NUM_ENGINES];
164
165 /** ring_size: size for allocating the per-engine ring buffer */
166 u32 ring_size;
167 /** desc_template: invariant fields for the HW context descriptor */
168 u32 desc_template;
169
170 /** guilty_count: How many times this context has caused a GPU hang. */
171 atomic_t guilty_count;
172 /**
173 * @active_count: How many times this context was active during a GPU
174 * hang, but did not cause it.
175 */
176 atomic_t active_count;
177
178 #define CONTEXT_SCORE_GUILTY 10
179 #define CONTEXT_SCORE_BAN_THRESHOLD 40
180 /** ban_score: Accumulated score of all hangs caused by this context. */
181 atomic_t ban_score;
182
183 /** remap_slice: Bitmask of cache lines that need remapping */
184 u8 remap_slice;
185
186 /** jump_whitelist: Bit array for tracking cmds during cmdparsing */
187 unsigned long *jump_whitelist;
188
189 /** jump_whitelist_cmds: No of cmd slots available */
190 u32 jump_whitelist_cmds;
191
192 /** handles_vma: rbtree to look up our context specific obj/vma for
193 * the user handle. (user handles are per fd, but the binding is
194 * per vm, which may be one per context or shared with the global GTT)
195 */
196 struct radix_tree_root handles_vma;
197
198 /** handles_list: reverse list of all the rbtree entries in use for
199 * this context, which allows us to free all the allocations on
200 * context close.
201 */
202 struct list_head handles_list;
203 };
204
i915_gem_context_is_closed(const struct i915_gem_context * ctx)205 static inline bool i915_gem_context_is_closed(const struct i915_gem_context *ctx)
206 {
207 return test_bit(CONTEXT_CLOSED, &ctx->flags);
208 }
209
i915_gem_context_set_closed(struct i915_gem_context * ctx)210 static inline void i915_gem_context_set_closed(struct i915_gem_context *ctx)
211 {
212 GEM_BUG_ON(i915_gem_context_is_closed(ctx));
213 __set_bit(CONTEXT_CLOSED, &ctx->flags);
214 }
215
i915_gem_context_no_error_capture(const struct i915_gem_context * ctx)216 static inline bool i915_gem_context_no_error_capture(const struct i915_gem_context *ctx)
217 {
218 return test_bit(CONTEXT_NO_ERROR_CAPTURE, &ctx->flags);
219 }
220
i915_gem_context_set_no_error_capture(struct i915_gem_context * ctx)221 static inline void i915_gem_context_set_no_error_capture(struct i915_gem_context *ctx)
222 {
223 __set_bit(CONTEXT_NO_ERROR_CAPTURE, &ctx->flags);
224 }
225
i915_gem_context_clear_no_error_capture(struct i915_gem_context * ctx)226 static inline void i915_gem_context_clear_no_error_capture(struct i915_gem_context *ctx)
227 {
228 __clear_bit(CONTEXT_NO_ERROR_CAPTURE, &ctx->flags);
229 }
230
i915_gem_context_is_bannable(const struct i915_gem_context * ctx)231 static inline bool i915_gem_context_is_bannable(const struct i915_gem_context *ctx)
232 {
233 return test_bit(CONTEXT_BANNABLE, &ctx->flags);
234 }
235
i915_gem_context_set_bannable(struct i915_gem_context * ctx)236 static inline void i915_gem_context_set_bannable(struct i915_gem_context *ctx)
237 {
238 __set_bit(CONTEXT_BANNABLE, &ctx->flags);
239 }
240
i915_gem_context_clear_bannable(struct i915_gem_context * ctx)241 static inline void i915_gem_context_clear_bannable(struct i915_gem_context *ctx)
242 {
243 __clear_bit(CONTEXT_BANNABLE, &ctx->flags);
244 }
245
i915_gem_context_is_banned(const struct i915_gem_context * ctx)246 static inline bool i915_gem_context_is_banned(const struct i915_gem_context *ctx)
247 {
248 return test_bit(CONTEXT_BANNED, &ctx->flags);
249 }
250
i915_gem_context_set_banned(struct i915_gem_context * ctx)251 static inline void i915_gem_context_set_banned(struct i915_gem_context *ctx)
252 {
253 __set_bit(CONTEXT_BANNED, &ctx->flags);
254 }
255
i915_gem_context_force_single_submission(const struct i915_gem_context * ctx)256 static inline bool i915_gem_context_force_single_submission(const struct i915_gem_context *ctx)
257 {
258 return test_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ctx->flags);
259 }
260
i915_gem_context_set_force_single_submission(struct i915_gem_context * ctx)261 static inline void i915_gem_context_set_force_single_submission(struct i915_gem_context *ctx)
262 {
263 __set_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ctx->flags);
264 }
265
i915_gem_context_is_default(const struct i915_gem_context * c)266 static inline bool i915_gem_context_is_default(const struct i915_gem_context *c)
267 {
268 return c->user_handle == DEFAULT_CONTEXT_HANDLE;
269 }
270
i915_gem_context_is_kernel(struct i915_gem_context * ctx)271 static inline bool i915_gem_context_is_kernel(struct i915_gem_context *ctx)
272 {
273 return !ctx->file_priv;
274 }
275
276 static inline struct intel_context *
to_intel_context(struct i915_gem_context * ctx,const struct intel_engine_cs * engine)277 to_intel_context(struct i915_gem_context *ctx,
278 const struct intel_engine_cs *engine)
279 {
280 return &ctx->__engine[engine->id];
281 }
282
283 static inline struct intel_context *
intel_context_pin(struct i915_gem_context * ctx,struct intel_engine_cs * engine)284 intel_context_pin(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
285 {
286 return engine->context_pin(engine, ctx);
287 }
288
__intel_context_pin(struct intel_context * ce)289 static inline void __intel_context_pin(struct intel_context *ce)
290 {
291 GEM_BUG_ON(!ce->pin_count);
292 ce->pin_count++;
293 }
294
intel_context_unpin(struct intel_context * ce)295 static inline void intel_context_unpin(struct intel_context *ce)
296 {
297 GEM_BUG_ON(!ce->pin_count);
298 if (--ce->pin_count)
299 return;
300
301 GEM_BUG_ON(!ce->ops);
302 ce->ops->unpin(ce);
303 }
304
305 /* i915_gem_context.c */
306 int __must_check i915_gem_contexts_init(struct drm_i915_private *dev_priv);
307 void i915_gem_contexts_lost(struct drm_i915_private *dev_priv);
308 void i915_gem_contexts_fini(struct drm_i915_private *dev_priv);
309
310 int i915_gem_context_open(struct drm_i915_private *i915,
311 struct drm_file *file);
312 void i915_gem_context_close(struct drm_file *file);
313
314 int i915_switch_context(struct i915_request *rq);
315 int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv);
316
317 void i915_gem_context_release(struct kref *ctx_ref);
318 struct i915_gem_context *
319 i915_gem_context_create_gvt(struct drm_device *dev);
320
321 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
322 struct drm_file *file);
323 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
324 struct drm_file *file);
325 int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
326 struct drm_file *file_priv);
327 int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
328 struct drm_file *file_priv);
329 int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, void *data,
330 struct drm_file *file);
331
332 struct i915_gem_context *
333 i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio);
334
335 static inline struct i915_gem_context *
i915_gem_context_get(struct i915_gem_context * ctx)336 i915_gem_context_get(struct i915_gem_context *ctx)
337 {
338 kref_get(&ctx->ref);
339 return ctx;
340 }
341
i915_gem_context_put(struct i915_gem_context * ctx)342 static inline void i915_gem_context_put(struct i915_gem_context *ctx)
343 {
344 kref_put(&ctx->ref, i915_gem_context_release);
345 }
346
347 #endif /* !__I915_GEM_CONTEXT_H__ */
348