• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: MIT */
2 #ifndef _INTEL_RINGBUFFER_H_
3 #define _INTEL_RINGBUFFER_H_
4 
5 #include <drm/drm_util.h>
6 
7 #include <linux/hashtable.h>
8 #include <linux/irq_work.h>
9 #include <linux/random.h>
10 #include <linux/seqlock.h>
11 
12 #include "i915_pmu.h"
13 #include "i915_reg.h"
14 #include "i915_request.h"
15 #include "i915_selftest.h"
16 #include "gt/intel_timeline.h"
17 #include "intel_engine_types.h"
18 #include "intel_gpu_commands.h"
19 #include "intel_workarounds.h"
20 
21 struct drm_printer;
22 struct intel_gt;
23 
24 /* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
25  * but keeps the logic simple. Indeed, the whole purpose of this macro is just
26  * to give some inclination as to some of the magic values used in the various
27  * workarounds!
28  */
29 #define CACHELINE_BYTES 64
30 #define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(u32))
31 
32 #define ENGINE_TRACE(e, fmt, ...) do {					\
33 	const struct intel_engine_cs *e__ __maybe_unused = (e);		\
34 	GEM_TRACE("%s %s: " fmt,					\
35 		  dev_name(e__->i915->drm.dev), e__->name,		\
36 		  ##__VA_ARGS__);					\
37 } while (0)
38 
39 /*
40  * The register defines to be used with the following macros need to accept a
41  * base param, e.g:
42  *
43  * REG_FOO(base) _MMIO((base) + <relative offset>)
44  * ENGINE_READ(engine, REG_FOO);
45  *
46  * register arrays are to be defined and accessed as follows:
47  *
48  * REG_BAR(base, i) _MMIO((base) + <relative offset> + (i) * <shift>)
49  * ENGINE_READ_IDX(engine, REG_BAR, i)
50  */
51 
52 #define __ENGINE_REG_OP(op__, engine__, ...) \
53 	intel_uncore_##op__((engine__)->uncore, __VA_ARGS__)
54 
55 #define __ENGINE_READ_OP(op__, engine__, reg__) \
56 	__ENGINE_REG_OP(op__, (engine__), reg__((engine__)->mmio_base))
57 
58 #define ENGINE_READ16(...)	__ENGINE_READ_OP(read16, __VA_ARGS__)
59 #define ENGINE_READ(...)	__ENGINE_READ_OP(read, __VA_ARGS__)
60 #define ENGINE_READ_FW(...)	__ENGINE_READ_OP(read_fw, __VA_ARGS__)
61 #define ENGINE_POSTING_READ(...) __ENGINE_READ_OP(posting_read_fw, __VA_ARGS__)
62 #define ENGINE_POSTING_READ16(...) __ENGINE_READ_OP(posting_read16, __VA_ARGS__)
63 
64 #define ENGINE_READ64(engine__, lower_reg__, upper_reg__) \
65 	__ENGINE_REG_OP(read64_2x32, (engine__), \
66 			lower_reg__((engine__)->mmio_base), \
67 			upper_reg__((engine__)->mmio_base))
68 
69 #define ENGINE_READ_IDX(engine__, reg__, idx__) \
70 	__ENGINE_REG_OP(read, (engine__), reg__((engine__)->mmio_base, (idx__)))
71 
72 #define __ENGINE_WRITE_OP(op__, engine__, reg__, val__) \
73 	__ENGINE_REG_OP(op__, (engine__), reg__((engine__)->mmio_base), (val__))
74 
75 #define ENGINE_WRITE16(...)	__ENGINE_WRITE_OP(write16, __VA_ARGS__)
76 #define ENGINE_WRITE(...)	__ENGINE_WRITE_OP(write, __VA_ARGS__)
77 #define ENGINE_WRITE_FW(...)	__ENGINE_WRITE_OP(write_fw, __VA_ARGS__)
78 
79 #define GEN6_RING_FAULT_REG_READ(engine__) \
80 	intel_uncore_read((engine__)->uncore, RING_FAULT_REG(engine__))
81 
82 #define GEN6_RING_FAULT_REG_POSTING_READ(engine__) \
83 	intel_uncore_posting_read((engine__)->uncore, RING_FAULT_REG(engine__))
84 
85 #define GEN6_RING_FAULT_REG_RMW(engine__, clear__, set__) \
86 ({ \
87 	u32 __val; \
88 \
89 	__val = intel_uncore_read((engine__)->uncore, \
90 				  RING_FAULT_REG(engine__)); \
91 	__val &= ~(clear__); \
92 	__val |= (set__); \
93 	intel_uncore_write((engine__)->uncore, RING_FAULT_REG(engine__), \
94 			   __val); \
95 })
96 
97 /* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
98  * do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
99  */
100 
101 static inline unsigned int
execlists_num_ports(const struct intel_engine_execlists * const execlists)102 execlists_num_ports(const struct intel_engine_execlists * const execlists)
103 {
104 	return execlists->port_mask + 1;
105 }
106 
107 static inline struct i915_request *
execlists_active(const struct intel_engine_execlists * execlists)108 execlists_active(const struct intel_engine_execlists *execlists)
109 {
110 	struct i915_request * const *cur, * const *old, *active;
111 
112 	cur = READ_ONCE(execlists->active);
113 	smp_rmb(); /* pairs with overwrite protection in process_csb() */
114 	do {
115 		old = cur;
116 
117 		active = READ_ONCE(*cur);
118 		cur = READ_ONCE(execlists->active);
119 
120 		smp_rmb(); /* and complete the seqlock retry */
121 	} while (unlikely(cur != old));
122 
123 	return active;
124 }
125 
126 static inline void
execlists_active_lock_bh(struct intel_engine_execlists * execlists)127 execlists_active_lock_bh(struct intel_engine_execlists *execlists)
128 {
129 	local_bh_disable(); /* prevent local softirq and lock recursion */
130 	tasklet_lock(&execlists->tasklet);
131 }
132 
133 static inline void
execlists_active_unlock_bh(struct intel_engine_execlists * execlists)134 execlists_active_unlock_bh(struct intel_engine_execlists *execlists)
135 {
136 	tasklet_unlock(&execlists->tasklet);
137 	local_bh_enable(); /* restore softirq, and kick ksoftirqd! */
138 }
139 
140 struct i915_request *
141 execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists);
142 
143 static inline u32
intel_read_status_page(const struct intel_engine_cs * engine,int reg)144 intel_read_status_page(const struct intel_engine_cs *engine, int reg)
145 {
146 	/* Ensure that the compiler doesn't optimize away the load. */
147 	return READ_ONCE(engine->status_page.addr[reg]);
148 }
149 
150 static inline void
intel_write_status_page(struct intel_engine_cs * engine,int reg,u32 value)151 intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
152 {
153 	/* Writing into the status page should be done sparingly. Since
154 	 * we do when we are uncertain of the device state, we take a bit
155 	 * of extra paranoia to try and ensure that the HWS takes the value
156 	 * we give and that it doesn't end up trapped inside the CPU!
157 	 */
158 	if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
159 		mb();
160 		clflush(&engine->status_page.addr[reg]);
161 		engine->status_page.addr[reg] = value;
162 		clflush(&engine->status_page.addr[reg]);
163 		mb();
164 	} else {
165 		WRITE_ONCE(engine->status_page.addr[reg], value);
166 	}
167 }
168 
169 /*
170  * Reads a dword out of the status page, which is written to from the command
171  * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
172  * MI_STORE_DATA_IMM.
173  *
174  * The following dwords have a reserved meaning:
175  * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
176  * 0x04: ring 0 head pointer
177  * 0x05: ring 1 head pointer (915-class)
178  * 0x06: ring 2 head pointer (915-class)
179  * 0x10-0x1b: Context status DWords (GM45)
180  * 0x1f: Last written status offset. (GM45)
181  * 0x20-0x2f: Reserved (Gen6+)
182  *
183  * The area from dword 0x30 to 0x3ff is available for driver usage.
184  */
185 #define I915_GEM_HWS_PREEMPT		0x32
186 #define I915_GEM_HWS_PREEMPT_ADDR	(I915_GEM_HWS_PREEMPT * sizeof(u32))
187 #define I915_GEM_HWS_SEQNO		0x40
188 #define I915_GEM_HWS_SEQNO_ADDR		(I915_GEM_HWS_SEQNO * sizeof(u32))
189 #define I915_GEM_HWS_SCRATCH		0x80
190 
191 #define I915_HWS_CSB_BUF0_INDEX		0x10
192 #define I915_HWS_CSB_WRITE_INDEX	0x1f
193 #define CNL_HWS_CSB_WRITE_INDEX		0x2f
194 
195 void intel_engine_stop(struct intel_engine_cs *engine);
196 void intel_engine_cleanup(struct intel_engine_cs *engine);
197 
198 int intel_engines_init_mmio(struct intel_gt *gt);
199 int intel_engines_init(struct intel_gt *gt);
200 
201 void intel_engine_free_request_pool(struct intel_engine_cs *engine);
202 
203 void intel_engines_release(struct intel_gt *gt);
204 void intel_engines_free(struct intel_gt *gt);
205 
206 int intel_engine_init_common(struct intel_engine_cs *engine);
207 void intel_engine_cleanup_common(struct intel_engine_cs *engine);
208 
209 int intel_engine_resume(struct intel_engine_cs *engine);
210 
211 int intel_ring_submission_setup(struct intel_engine_cs *engine);
212 
213 int intel_engine_stop_cs(struct intel_engine_cs *engine);
214 void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine);
215 
216 void intel_engine_set_hwsp_writemask(struct intel_engine_cs *engine, u32 mask);
217 
218 u64 intel_engine_get_active_head(const struct intel_engine_cs *engine);
219 u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine);
220 
221 void intel_engine_get_instdone(const struct intel_engine_cs *engine,
222 			       struct intel_instdone *instdone);
223 
224 void intel_engine_init_execlists(struct intel_engine_cs *engine);
225 
__gen8_emit_pipe_control(u32 * batch,u32 flags0,u32 flags1,u32 offset)226 static inline u32 *__gen8_emit_pipe_control(u32 *batch, u32 flags0, u32 flags1, u32 offset)
227 {
228 	memset(batch, 0, 6 * sizeof(u32));
229 
230 	batch[0] = GFX_OP_PIPE_CONTROL(6) | flags0;
231 	batch[1] = flags1;
232 	batch[2] = offset;
233 
234 	return batch + 6;
235 }
236 
gen8_emit_pipe_control(u32 * batch,u32 flags,u32 offset)237 static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset)
238 {
239 	return __gen8_emit_pipe_control(batch, 0, flags, offset);
240 }
241 
gen12_emit_pipe_control(u32 * batch,u32 flags0,u32 flags1,u32 offset)242 static inline u32 *gen12_emit_pipe_control(u32 *batch, u32 flags0, u32 flags1, u32 offset)
243 {
244 	return __gen8_emit_pipe_control(batch, flags0, flags1, offset);
245 }
246 
247 static inline u32 *
__gen8_emit_write_rcs(u32 * cs,u32 value,u32 offset,u32 flags0,u32 flags1)248 __gen8_emit_write_rcs(u32 *cs, u32 value, u32 offset, u32 flags0, u32 flags1)
249 {
250 	*cs++ = GFX_OP_PIPE_CONTROL(6) | flags0;
251 	*cs++ = flags1 | PIPE_CONTROL_QW_WRITE;
252 	*cs++ = offset;
253 	*cs++ = 0;
254 	*cs++ = value;
255 	*cs++ = 0; /* We're thrashing one extra dword. */
256 
257 	return cs;
258 }
259 
260 static inline u32*
gen8_emit_ggtt_write_rcs(u32 * cs,u32 value,u32 gtt_offset,u32 flags)261 gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
262 {
263 	/* We're using qword write, offset should be aligned to 8 bytes. */
264 	GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
265 
266 	return __gen8_emit_write_rcs(cs,
267 				     value,
268 				     gtt_offset,
269 				     0,
270 				     flags | PIPE_CONTROL_GLOBAL_GTT_IVB);
271 }
272 
273 static inline u32*
gen12_emit_ggtt_write_rcs(u32 * cs,u32 value,u32 gtt_offset,u32 flags0,u32 flags1)274 gen12_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags0, u32 flags1)
275 {
276 	/* We're using qword write, offset should be aligned to 8 bytes. */
277 	GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
278 
279 	return __gen8_emit_write_rcs(cs,
280 				     value,
281 				     gtt_offset,
282 				     flags0,
283 				     flags1 | PIPE_CONTROL_GLOBAL_GTT_IVB);
284 }
285 
286 static inline u32 *
__gen8_emit_flush_dw(u32 * cs,u32 value,u32 gtt_offset,u32 flags)287 __gen8_emit_flush_dw(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
288 {
289 	*cs++ = (MI_FLUSH_DW + 1) | flags;
290 	*cs++ = gtt_offset;
291 	*cs++ = 0;
292 	*cs++ = value;
293 
294 	return cs;
295 }
296 
297 static inline u32 *
gen8_emit_ggtt_write(u32 * cs,u32 value,u32 gtt_offset,u32 flags)298 gen8_emit_ggtt_write(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
299 {
300 	/* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
301 	GEM_BUG_ON(gtt_offset & (1 << 5));
302 	/* Offset should be aligned to 8 bytes for both (QW/DW) write types */
303 	GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
304 
305 	return __gen8_emit_flush_dw(cs,
306 				    value,
307 				    gtt_offset | MI_FLUSH_DW_USE_GTT,
308 				    flags | MI_FLUSH_DW_OP_STOREDW);
309 }
310 
__intel_engine_reset(struct intel_engine_cs * engine,bool stalled)311 static inline void __intel_engine_reset(struct intel_engine_cs *engine,
312 					bool stalled)
313 {
314 	if (engine->reset.rewind)
315 		engine->reset.rewind(engine, stalled);
316 	engine->serial++; /* contexts lost */
317 }
318 
319 bool intel_engines_are_idle(struct intel_gt *gt);
320 bool intel_engine_is_idle(struct intel_engine_cs *engine);
321 void intel_engine_flush_submission(struct intel_engine_cs *engine);
322 
323 void intel_engines_reset_default_submission(struct intel_gt *gt);
324 
325 bool intel_engine_can_store_dword(struct intel_engine_cs *engine);
326 
327 __printf(3, 4)
328 void intel_engine_dump(struct intel_engine_cs *engine,
329 		       struct drm_printer *m,
330 		       const char *header, ...);
331 
332 ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine,
333 				   ktime_t *now);
334 
335 struct i915_request *
336 intel_engine_find_active_request(struct intel_engine_cs *engine);
337 
338 u32 intel_engine_context_size(struct intel_gt *gt, u8 class);
339 
340 void intel_engine_init_active(struct intel_engine_cs *engine,
341 			      unsigned int subclass);
342 #define ENGINE_PHYSICAL	0
343 #define ENGINE_MOCK	1
344 #define ENGINE_VIRTUAL	2
345 
346 static inline bool
intel_engine_has_preempt_reset(const struct intel_engine_cs * engine)347 intel_engine_has_preempt_reset(const struct intel_engine_cs *engine)
348 {
349 	if (!IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT))
350 		return false;
351 
352 	return intel_engine_has_preemption(engine);
353 }
354 
355 static inline bool
intel_engine_has_heartbeat(const struct intel_engine_cs * engine)356 intel_engine_has_heartbeat(const struct intel_engine_cs *engine)
357 {
358 	if (!IS_ACTIVE(CONFIG_DRM_I915_HEARTBEAT_INTERVAL))
359 		return false;
360 
361 	return READ_ONCE(engine->props.heartbeat_interval_ms);
362 }
363 
364 #endif /* _INTEL_RINGBUFFER_H_ */
365