• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5 
6 #ifndef __INTEL_CONTEXT_TYPES__
7 #define __INTEL_CONTEXT_TYPES__
8 
9 #include <linux/average.h>
10 #include <linux/kref.h>
11 #include <linux/list.h>
12 #include <linux/mutex.h>
13 #include <linux/types.h>
14 
15 #include "i915_active_types.h"
16 #include "i915_sw_fence.h"
17 #include "i915_utils.h"
18 #include "intel_engine_types.h"
19 #include "intel_sseu.h"
20 
21 #include "uc/intel_guc_fwif.h"
22 
23 #define CONTEXT_REDZONE POISON_INUSE
24 DECLARE_EWMA(runtime, 3, 8);
25 
26 struct i915_gem_context;
27 struct i915_gem_ww_ctx;
28 struct i915_vma;
29 struct intel_breadcrumbs;
30 struct intel_context;
31 struct intel_ring;
32 
33 struct intel_context_ops {
34 	unsigned long flags;
35 #define COPS_HAS_INFLIGHT_BIT 0
36 #define COPS_HAS_INFLIGHT BIT(COPS_HAS_INFLIGHT_BIT)
37 
38 	int (*alloc)(struct intel_context *ce);
39 
40 	void (*ban)(struct intel_context *ce, struct i915_request *rq);
41 
42 	int (*pre_pin)(struct intel_context *ce, struct i915_gem_ww_ctx *ww, void **vaddr);
43 	int (*pin)(struct intel_context *ce, void *vaddr);
44 	void (*unpin)(struct intel_context *ce);
45 	void (*post_unpin)(struct intel_context *ce);
46 
47 	void (*cancel_request)(struct intel_context *ce,
48 			       struct i915_request *rq);
49 
50 	void (*enter)(struct intel_context *ce);
51 	void (*exit)(struct intel_context *ce);
52 
53 	void (*sched_disable)(struct intel_context *ce);
54 
55 	void (*reset)(struct intel_context *ce);
56 	void (*destroy)(struct kref *kref);
57 
58 	/* virtual engine/context interface */
59 	struct intel_context *(*create_virtual)(struct intel_engine_cs **engine,
60 						unsigned int count);
61 	struct intel_engine_cs *(*get_sibling)(struct intel_engine_cs *engine,
62 					       unsigned int sibling);
63 };
64 
65 struct intel_context {
66 	/*
67 	 * Note: Some fields may be accessed under RCU.
68 	 *
69 	 * Unless otherwise noted a field can safely be assumed to be protected
70 	 * by strong reference counting.
71 	 */
72 	union {
73 		struct kref ref; /* no kref_get_unless_zero()! */
74 		struct rcu_head rcu;
75 	};
76 
77 	struct intel_engine_cs *engine;
78 	struct intel_engine_cs *inflight;
79 #define __intel_context_inflight(engine) ptr_mask_bits(engine, 3)
80 #define __intel_context_inflight_count(engine) ptr_unmask_bits(engine, 3)
81 #define intel_context_inflight(ce) \
82 	__intel_context_inflight(READ_ONCE((ce)->inflight))
83 #define intel_context_inflight_count(ce) \
84 	__intel_context_inflight_count(READ_ONCE((ce)->inflight))
85 
86 	struct i915_address_space *vm;
87 	struct i915_gem_context __rcu *gem_context;
88 
89 	/*
90 	 * @signal_lock protects the list of requests that need signaling,
91 	 * @signals. While there are any requests that need signaling,
92 	 * we add the context to the breadcrumbs worker, and remove it
93 	 * upon completion/cancellation of the last request.
94 	 */
95 	struct list_head signal_link; /* Accessed under RCU */
96 	struct list_head signals; /* Guarded by signal_lock */
97 	spinlock_t signal_lock; /* protects signals, the list of requests */
98 
99 	struct i915_vma *state;
100 	u32 ring_size;
101 	struct intel_ring *ring;
102 	struct intel_timeline *timeline;
103 
104 	unsigned long flags;
105 #define CONTEXT_BARRIER_BIT		0
106 #define CONTEXT_ALLOC_BIT		1
107 #define CONTEXT_INIT_BIT		2
108 #define CONTEXT_VALID_BIT		3
109 #define CONTEXT_CLOSED_BIT		4
110 #define CONTEXT_USE_SEMAPHORES		5
111 #define CONTEXT_BANNED			6
112 #define CONTEXT_FORCE_SINGLE_SUBMISSION	7
113 #define CONTEXT_NOPREEMPT		8
114 #define CONTEXT_LRCA_DIRTY		9
115 
116 	struct {
117 		u64 timeout_us;
118 	} watchdog;
119 
120 	u32 *lrc_reg_state;
121 	union {
122 		struct {
123 			u32 lrca;
124 			u32 ccid;
125 		};
126 		u64 desc;
127 	} lrc;
128 	u32 tag; /* cookie passed to HW to track this context on submission */
129 
130 	/* Time on GPU as tracked by the hw. */
131 	struct {
132 		struct ewma_runtime avg;
133 		u64 total;
134 		u32 last;
135 		I915_SELFTEST_DECLARE(u32 num_underflow);
136 		I915_SELFTEST_DECLARE(u32 max_underflow);
137 	} runtime;
138 
139 	unsigned int active_count; /* protected by timeline->mutex */
140 
141 	atomic_t pin_count;
142 	struct mutex pin_mutex; /* guards pinning and associated on-gpuing */
143 
144 	/**
145 	 * active: Active tracker for the rq activity (inc. external) on this
146 	 * intel_context object.
147 	 */
148 	struct i915_active active;
149 
150 	const struct intel_context_ops *ops;
151 
152 	/** sseu: Control eu/slice partitioning */
153 	struct intel_sseu sseu;
154 
155 	/**
156 	 * pinned_contexts_link: List link for the engine's pinned contexts.
157 	 * This is only used if this is a perma-pinned kernel context and
158 	 * the list is assumed to only be manipulated during driver load
159 	 * or unload time so no mutex protection currently.
160 	 */
161 	struct list_head pinned_contexts_link;
162 
163 	u8 wa_bb_page; /* if set, page num reserved for context workarounds */
164 
165 	struct {
166 		/** lock: protects everything in guc_state */
167 		spinlock_t lock;
168 		/**
169 		 * sched_state: scheduling state of this context using GuC
170 		 * submission
171 		 */
172 		u16 sched_state;
173 		/*
174 		 * fences: maintains of list of requests that have a submit
175 		 * fence related to GuC submission
176 		 */
177 		struct list_head fences;
178 	} guc_state;
179 
180 	struct {
181 		/** lock: protects everything in guc_active */
182 		spinlock_t lock;
183 		/** requests: active requests on this context */
184 		struct list_head requests;
185 	} guc_active;
186 
187 	/* GuC scheduling state flags that do not require a lock. */
188 	atomic_t guc_sched_state_no_lock;
189 
190 	/* GuC LRC descriptor ID */
191 	u16 guc_id;
192 
193 	/* GuC LRC descriptor reference count */
194 	atomic_t guc_id_ref;
195 
196 	/*
197 	 * GuC ID link - in list when unpinned but guc_id still valid in GuC
198 	 */
199 	struct list_head guc_id_link;
200 
201 	/* GuC context blocked fence */
202 	struct i915_sw_fence guc_blocked;
203 
204 	/*
205 	 * GuC priority management
206 	 */
207 	u8 guc_prio;
208 	u32 guc_prio_count[GUC_CLIENT_PRIORITY_NUM];
209 };
210 
211 #endif /* __INTEL_CONTEXT_TYPES__ */
212