1 /*
2 * Copyright © 2017 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "igt.h"
25 #include "igt_dummyload.h"
26
27 #define MAX_REG 0x200000
28 #define NUM_REGS (MAX_REG / sizeof(uint32_t))
29
30 #define PAGE_ALIGN(x) ALIGN(x, 4096)
31
32 #define DIRTY1 0x1
33 #define DIRTY2 0x2
34 #define RESET 0x4
35
36 #define ENGINE(x, y) BIT(4*(x) + (y))
37
38 enum {
39 RCS0 = ENGINE(I915_ENGINE_CLASS_RENDER, 0),
40 BCS0 = ENGINE(I915_ENGINE_CLASS_COPY, 0),
41 VCS0 = ENGINE(I915_ENGINE_CLASS_VIDEO, 0),
42 VCS1 = ENGINE(I915_ENGINE_CLASS_VIDEO, 1),
43 VCS2 = ENGINE(I915_ENGINE_CLASS_VIDEO, 2),
44 VCS3 = ENGINE(I915_ENGINE_CLASS_VIDEO, 3),
45 VECS0 = ENGINE(I915_ENGINE_CLASS_VIDEO_ENHANCE, 0),
46 };
47
48 #define ALL ~0u
49 #define GEN_RANGE(x, y) ((ALL >> (32 - (y - x + 1))) << x)
50 #define GEN4 (ALL << 4)
51 #define GEN5 (ALL << 5)
52 #define GEN6 (ALL << 6)
53 #define GEN7 (ALL << 7)
54 #define GEN8 (ALL << 8)
55 #define GEN9 (ALL << 9)
56 #define GEN10 (ALL << 10)
57 #define GEN11 (ALL << 11)
58
59 #define NOCTX 0
60
61 #define LAST_KNOWN_GEN 11
62
63 static const struct named_register {
64 const char *name;
65 unsigned int gen_mask; /* on which gen the register exists */
66 unsigned int engine_mask; /* preferred engine / powerwell */
67 uint32_t offset; /* address of register, from bottom of mmio bar */
68 uint32_t count;
69 uint32_t ignore_bits;
70 uint32_t write_mask; /* some registers bits do not exist */
71 bool masked;
72 } nonpriv_registers[] = {
73 { "NOPID", NOCTX, RCS0, 0x2094 },
74 { "MI_PREDICATE_RESULT_2", NOCTX, RCS0, 0x23bc },
75 {
76 "INSTPM",
77 GEN6, RCS0, 0x20c0,
78 .ignore_bits = BIT(8) /* ro counter */,
79 .write_mask = BIT(8) /* rsvd varies between gen */,
80 .masked = true,
81 },
82 { "IA_VERTICES_COUNT", GEN4, RCS0, 0x2310, 2 },
83 { "IA_PRIMITIVES_COUNT", GEN4, RCS0, 0x2318, 2 },
84 { "VS_INVOCATION_COUNT", GEN4, RCS0, 0x2320, 2 },
85 { "HS_INVOCATION_COUNT", GEN4, RCS0, 0x2300, 2 },
86 { "DS_INVOCATION_COUNT", GEN4, RCS0, 0x2308, 2 },
87 { "GS_INVOCATION_COUNT", GEN4, RCS0, 0x2328, 2 },
88 { "GS_PRIMITIVES_COUNT", GEN4, RCS0, 0x2330, 2 },
89 { "CL_INVOCATION_COUNT", GEN4, RCS0, 0x2338, 2 },
90 { "CL_PRIMITIVES_COUNT", GEN4, RCS0, 0x2340, 2 },
91 { "PS_INVOCATION_COUNT_0", GEN4, RCS0, 0x22c8, 2, .write_mask = ~0x3 },
92 { "PS_DEPTH_COUNT_0", GEN4, RCS0, 0x22d8, 2 },
93 { "GPUGPU_DISPATCHDIMX", GEN8, RCS0, 0x2500 },
94 { "GPUGPU_DISPATCHDIMY", GEN8, RCS0, 0x2504 },
95 { "GPUGPU_DISPATCHDIMZ", GEN8, RCS0, 0x2508 },
96 { "MI_PREDICATE_SRC0", GEN8, RCS0, 0x2400, 2 },
97 { "MI_PREDICATE_SRC1", GEN8, RCS0, 0x2408, 2 },
98 { "MI_PREDICATE_DATA", GEN8, RCS0, 0x2410, 2 },
99 { "MI_PRED_RESULT", GEN8, RCS0, 0x2418, .write_mask = 0x1 },
100 { "3DPRIM_END_OFFSET", GEN6, RCS0, 0x2420 },
101 { "3DPRIM_START_VERTEX", GEN6, RCS0, 0x2430 },
102 { "3DPRIM_VERTEX_COUNT", GEN6, RCS0, 0x2434 },
103 { "3DPRIM_INSTANCE_COUNT", GEN6, RCS0, 0x2438 },
104 { "3DPRIM_START_INSTANCE", GEN6, RCS0, 0x243c },
105 { "3DPRIM_BASE_VERTEX", GEN6, RCS0, 0x2440 },
106 { "GPGPU_THREADS_DISPATCHED", GEN8, RCS0, 0x2290, 2 },
107 { "PS_INVOCATION_COUNT_1", GEN8, RCS0, 0x22f0, 2, .write_mask = ~0x3 },
108 { "PS_DEPTH_COUNT_1", GEN8, RCS0, 0x22f8, 2 },
109 { "BB_OFFSET", GEN8, RCS0, 0x2158, .ignore_bits = 0x7 },
110 { "MI_PREDICATE_RESULT_1", GEN8, RCS0, 0x241c },
111 { "CS_GPR", GEN8, RCS0, 0x2600, 32 },
112 { "OA_CTX_CONTROL", GEN8, RCS0, 0x2360 },
113 { "OACTXID", GEN8, RCS0, 0x2364 },
114 { "PS_INVOCATION_COUNT_2", GEN8, RCS0, 0x2448, 2, .write_mask = ~0x3 },
115 { "PS_DEPTH_COUNT_2", GEN8, RCS0, 0x2450, 2 },
116 { "Cache_Mode_0", GEN7, RCS0, 0x7000, .masked = true },
117 { "Cache_Mode_1", GEN7, RCS0, 0x7004, .masked = true },
118 { "GT_MODE", GEN8, RCS0, 0x7008, .masked = true },
119 { "L3_Config", GEN8, RCS0, 0x7034 },
120 { "TD_CTL", GEN8, RCS0, 0xe400, .write_mask = 0xffff },
121 { "TD_CTL2", GEN8, RCS0, 0xe404 },
122 { "SO_NUM_PRIMS_WRITTEN0", GEN6, RCS0, 0x5200, 2 },
123 { "SO_NUM_PRIMS_WRITTEN1", GEN6, RCS0, 0x5208, 2 },
124 { "SO_NUM_PRIMS_WRITTEN2", GEN6, RCS0, 0x5210, 2 },
125 { "SO_NUM_PRIMS_WRITTEN3", GEN6, RCS0, 0x5218, 2 },
126 { "SO_PRIM_STORAGE_NEEDED0", GEN6, RCS0, 0x5240, 2 },
127 { "SO_PRIM_STORAGE_NEEDED1", GEN6, RCS0, 0x5248, 2 },
128 { "SO_PRIM_STORAGE_NEEDED2", GEN6, RCS0, 0x5250, 2 },
129 { "SO_PRIM_STORAGE_NEEDED3", GEN6, RCS0, 0x5258, 2 },
130 { "SO_WRITE_OFFSET0", GEN7, RCS0, 0x5280, .write_mask = ~0x3 },
131 { "SO_WRITE_OFFSET1", GEN7, RCS0, 0x5284, .write_mask = ~0x3 },
132 { "SO_WRITE_OFFSET2", GEN7, RCS0, 0x5288, .write_mask = ~0x3 },
133 { "SO_WRITE_OFFSET3", GEN7, RCS0, 0x528c, .write_mask = ~0x3 },
134 { "OA_CONTROL", NOCTX, RCS0, 0x2b00 },
135 { "PERF_CNT_1", NOCTX, RCS0, 0x91b8, 2 },
136 { "PERF_CNT_2", NOCTX, RCS0, 0x91c0, 2 },
137
138 { "CTX_PREEMPT", NOCTX /* GEN10 */, RCS0, 0x2248 },
139 { "CS_CHICKEN1", GEN11, RCS0, 0x2580, .masked = true },
140 { "HDC_CHICKEN1", GEN_RANGE(10, 10), RCS0, 0x7304, .masked = true },
141
142 /* Privileged (enabled by w/a + FORCE_TO_NONPRIV) */
143 { "CTX_PREEMPT", NOCTX /* GEN9 */, RCS0, 0x2248 },
144 { "CS_CHICKEN1", GEN_RANGE(9, 10), RCS0, 0x2580, .masked = true },
145 { "HDC_CHICKEN1", GEN_RANGE(9, 9), RCS0, 0x7304, .masked = true },
146 { "L3SQREG4", NOCTX /* GEN9:skl,kbl */, RCS0, 0xb118, .write_mask = ~0x1ffff0 },
147 { "HALF_SLICE_CHICKEN7", GEN_RANGE(11, 11), RCS0, 0xe194, .masked = true },
148 { "SAMPLER_MODE", GEN_RANGE(11, 11), RCS0, 0xe18c, .masked = true },
149
150 { "BCS_GPR", GEN9, BCS0, 0x22600, 32 },
151 { "BCS_SWCTRL", GEN8, BCS0, 0x22200, .write_mask = 0x3, .masked = true },
152
153 { "MFC_VDBOX1", NOCTX, VCS0, 0x12800, 64 },
154 { "MFC_VDBOX2", NOCTX, VCS1, 0x1c800, 64 },
155
156 { "VCS0_GPR", GEN_RANGE(9, 10), VCS0, 0x12600, 32 },
157 { "VCS1_GPR", GEN_RANGE(9, 10), VCS1, 0x1c600, 32 },
158 { "VECS_GPR", GEN_RANGE(9, 10), VECS0, 0x1a600, 32 },
159
160 { "VCS0_GPR", GEN11, VCS0, 0x1c0600, 32 },
161 { "VCS1_GPR", GEN11, VCS1, 0x1c4600, 32 },
162 { "VCS2_GPR", GEN11, VCS2, 0x1d0600, 32 },
163 { "VCS3_GPR", GEN11, VCS3, 0x1d4600, 32 },
164 { "VECS_GPR", GEN11, VECS0, 0x1c8600, 32 },
165
166 {}
167 }, ignore_registers[] = {
168 { "RCS timestamp", GEN6, ~0u, 0x2358 },
169 { "BCS timestamp", GEN7, ~0u, 0x22358 },
170
171 { "VCS0 timestamp", GEN_RANGE(7, 10), ~0u, 0x12358 },
172 { "VCS1 timestamp", GEN_RANGE(7, 10), ~0u, 0x1c358 },
173 { "VECS timestamp", GEN_RANGE(8, 10), ~0u, 0x1a358 },
174
175 { "VCS0 timestamp", GEN11, ~0u, 0x1c0358 },
176 { "VCS1 timestamp", GEN11, ~0u, 0x1c4358 },
177 { "VCS2 timestamp", GEN11, ~0u, 0x1d0358 },
178 { "VCS3 timestamp", GEN11, ~0u, 0x1d4358 },
179 { "VECS timestamp", GEN11, ~0u, 0x1c8358 },
180
181 {}
182 };
183
register_name(uint32_t offset,char * buf,size_t len)184 static const char *register_name(uint32_t offset, char *buf, size_t len)
185 {
186 for (const struct named_register *r = nonpriv_registers; r->name; r++) {
187 unsigned int width = r->count ? 4*r->count : 4;
188 if (offset >= r->offset && offset < r->offset + width) {
189 if (r->count <= 1)
190 return r->name;
191
192 snprintf(buf, len, "%s[%d]",
193 r->name, (offset - r->offset)/4);
194 return buf;
195 }
196 }
197
198 return "unknown";
199 }
200
lookup_register(uint32_t offset)201 static const struct named_register *lookup_register(uint32_t offset)
202 {
203 for (const struct named_register *r = nonpriv_registers; r->name; r++) {
204 unsigned int width = r->count ? 4*r->count : 4;
205 if (offset >= r->offset && offset < r->offset + width)
206 return r;
207 }
208
209 return NULL;
210 }
211
ignore_register(uint32_t offset)212 static bool ignore_register(uint32_t offset)
213 {
214 for (const struct named_register *r = ignore_registers; r->name; r++) {
215 unsigned int width = r->count ? 4*r->count : 4;
216 if (offset >= r->offset && offset < r->offset + width)
217 return true;
218 }
219
220 return false;
221 }
222
tmpl_regs(int fd,uint32_t ctx,const struct intel_execution_engine2 * e,uint32_t handle,uint32_t value)223 static void tmpl_regs(int fd,
224 uint32_t ctx,
225 const struct intel_execution_engine2 *e,
226 uint32_t handle,
227 uint32_t value)
228 {
229 const unsigned int gen_bit = 1 << intel_gen(intel_get_drm_devid(fd));
230 const unsigned int engine_bit = ENGINE(e->class, e->instance);
231 unsigned int regs_size;
232 uint32_t *regs;
233
234 regs_size = NUM_REGS * sizeof(uint32_t);
235 regs_size = PAGE_ALIGN(regs_size);
236
237 regs = gem_mmap__cpu(fd, handle, 0, regs_size, PROT_WRITE);
238 gem_set_domain(fd, handle,
239 I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
240
241 for (const struct named_register *r = nonpriv_registers; r->name; r++) {
242 if (!(r->engine_mask & engine_bit))
243 continue;
244 if (!(r->gen_mask & gen_bit))
245 continue;
246 for (unsigned count = r->count ?: 1, offset = r->offset;
247 count--; offset += 4) {
248 uint32_t x = value;
249 if (r->write_mask)
250 x &= r->write_mask;
251 if (r->masked)
252 x &= 0xffff;
253 regs[offset/sizeof(*regs)] = x;
254 }
255 }
256 munmap(regs, regs_size);
257 }
258
read_regs(int fd,uint32_t ctx,const struct intel_execution_engine2 * e,unsigned int flags)259 static uint32_t read_regs(int fd,
260 uint32_t ctx,
261 const struct intel_execution_engine2 *e,
262 unsigned int flags)
263 {
264 const unsigned int gen = intel_gen(intel_get_drm_devid(fd));
265 const unsigned int gen_bit = 1 << gen;
266 const unsigned int engine_bit = ENGINE(e->class, e->instance);
267 const bool r64b = gen >= 8;
268 struct drm_i915_gem_exec_object2 obj[2];
269 struct drm_i915_gem_relocation_entry *reloc;
270 struct drm_i915_gem_execbuffer2 execbuf;
271 unsigned int regs_size, batch_size, n;
272 uint32_t *batch, *b;
273
274 reloc = calloc(NUM_REGS, sizeof(*reloc));
275 igt_assert(reloc);
276
277 regs_size = NUM_REGS * sizeof(uint32_t);
278 regs_size = PAGE_ALIGN(regs_size);
279
280 batch_size = NUM_REGS * 4 * sizeof(uint32_t) + 4;
281 batch_size = PAGE_ALIGN(batch_size);
282
283 memset(obj, 0, sizeof(obj));
284 obj[0].handle = gem_create(fd, regs_size);
285 obj[1].handle = gem_create(fd, batch_size);
286 obj[1].relocs_ptr = to_user_pointer(reloc);
287
288 b = batch = gem_mmap__cpu(fd, obj[1].handle, 0, batch_size, PROT_WRITE);
289 gem_set_domain(fd, obj[1].handle,
290 I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
291
292 n = 0;
293 for (const struct named_register *r = nonpriv_registers; r->name; r++) {
294 if (!(r->engine_mask & engine_bit))
295 continue;
296 if (!(r->gen_mask & gen_bit))
297 continue;
298
299 for (unsigned count = r->count ?: 1, offset = r->offset;
300 count--; offset += 4) {
301 *b++ = 0x24 << 23 | (1 + r64b); /* SRM */
302 *b++ = offset;
303 reloc[n].target_handle = obj[0].handle;
304 reloc[n].presumed_offset = 0;
305 reloc[n].offset = (b - batch) * sizeof(*b);
306 reloc[n].delta = offset;
307 reloc[n].read_domains = I915_GEM_DOMAIN_RENDER;
308 reloc[n].write_domain = I915_GEM_DOMAIN_RENDER;
309 *b++ = offset;
310 if (r64b)
311 *b++ = 0;
312 n++;
313 }
314 }
315
316 obj[1].relocation_count = n;
317 *b++ = MI_BATCH_BUFFER_END;
318 munmap(batch, batch_size);
319
320 memset(&execbuf, 0, sizeof(execbuf));
321 execbuf.buffers_ptr = to_user_pointer(obj);
322 execbuf.buffer_count = 2;
323 execbuf.flags = e->flags;
324 execbuf.rsvd1 = ctx;
325 gem_execbuf(fd, &execbuf);
326 gem_close(fd, obj[1].handle);
327 free(reloc);
328
329 return obj[0].handle;
330 }
331
write_regs(int fd,uint32_t ctx,const struct intel_execution_engine2 * e,unsigned int flags,uint32_t value)332 static void write_regs(int fd,
333 uint32_t ctx,
334 const struct intel_execution_engine2 *e,
335 unsigned int flags,
336 uint32_t value)
337 {
338 const unsigned int gen_bit = 1 << intel_gen(intel_get_drm_devid(fd));
339 const unsigned int engine_bit = ENGINE(e->class, e->instance);
340 struct drm_i915_gem_exec_object2 obj;
341 struct drm_i915_gem_execbuffer2 execbuf;
342 unsigned int batch_size;
343 uint32_t *batch, *b;
344
345 batch_size = NUM_REGS * 3 * sizeof(uint32_t) + 4;
346 batch_size = PAGE_ALIGN(batch_size);
347
348 memset(&obj, 0, sizeof(obj));
349 obj.handle = gem_create(fd, batch_size);
350
351 b = batch = gem_mmap__cpu(fd, obj.handle, 0, batch_size, PROT_WRITE);
352 gem_set_domain(fd, obj.handle,
353 I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
354 for (const struct named_register *r = nonpriv_registers; r->name; r++) {
355 if (!(r->engine_mask & engine_bit))
356 continue;
357 if (!(r->gen_mask & gen_bit))
358 continue;
359 for (unsigned count = r->count ?: 1, offset = r->offset;
360 count--; offset += 4) {
361 uint32_t x = value;
362 if (r->write_mask)
363 x &= r->write_mask;
364 if (r->masked)
365 x |= 0xffffu << 16;
366
367 *b++ = 0x22 << 23 | 1; /* LRI */
368 *b++ = offset;
369 *b++ = x;
370 }
371 }
372 *b++ = MI_BATCH_BUFFER_END;
373 munmap(batch, batch_size);
374
375 memset(&execbuf, 0, sizeof(execbuf));
376 execbuf.buffers_ptr = to_user_pointer(&obj);
377 execbuf.buffer_count = 1;
378 execbuf.flags = e->flags;
379 execbuf.rsvd1 = ctx;
380 gem_execbuf(fd, &execbuf);
381 gem_close(fd, obj.handle);
382 }
383
restore_regs(int fd,uint32_t ctx,const struct intel_execution_engine2 * e,unsigned int flags,uint32_t regs)384 static void restore_regs(int fd,
385 uint32_t ctx,
386 const struct intel_execution_engine2 *e,
387 unsigned int flags,
388 uint32_t regs)
389 {
390 const unsigned int gen = intel_gen(intel_get_drm_devid(fd));
391 const unsigned int gen_bit = 1 << gen;
392 const unsigned int engine_bit = ENGINE(e->class, e->instance);
393 const bool r64b = gen >= 8;
394 struct drm_i915_gem_exec_object2 obj[2];
395 struct drm_i915_gem_execbuffer2 execbuf;
396 struct drm_i915_gem_relocation_entry *reloc;
397 unsigned int batch_size, n;
398 uint32_t *batch, *b;
399
400 if (gen < 7) /* no LRM */
401 return;
402
403 reloc = calloc(NUM_REGS, sizeof(*reloc));
404 igt_assert(reloc);
405
406 batch_size = NUM_REGS * 3 * sizeof(uint32_t) + 4;
407 batch_size = PAGE_ALIGN(batch_size);
408
409 memset(obj, 0, sizeof(obj));
410 obj[0].handle = regs;
411 obj[1].handle = gem_create(fd, batch_size);
412 obj[1].relocs_ptr = to_user_pointer(reloc);
413
414 b = batch = gem_mmap__cpu(fd, obj[1].handle, 0, batch_size, PROT_WRITE);
415 gem_set_domain(fd, obj[1].handle,
416 I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
417
418 n = 0;
419 for (const struct named_register *r = nonpriv_registers; r->name; r++) {
420 if (!(r->engine_mask & engine_bit))
421 continue;
422 if (!(r->gen_mask & gen_bit))
423 continue;
424
425 for (unsigned count = r->count ?: 1, offset = r->offset;
426 count--; offset += 4) {
427 *b++ = 0x29 << 23 | (1 + r64b); /* LRM */
428 *b++ = offset;
429 reloc[n].target_handle = obj[0].handle;
430 reloc[n].presumed_offset = 0;
431 reloc[n].offset = (b - batch) * sizeof(*b);
432 reloc[n].delta = offset;
433 reloc[n].read_domains = I915_GEM_DOMAIN_RENDER;
434 reloc[n].write_domain = 0;
435 *b++ = offset;
436 if (r64b)
437 *b++ = 0;
438 n++;
439 }
440 }
441 obj[1].relocation_count = n;
442 *b++ = MI_BATCH_BUFFER_END;
443 munmap(batch, batch_size);
444
445 memset(&execbuf, 0, sizeof(execbuf));
446 execbuf.buffers_ptr = to_user_pointer(obj);
447 execbuf.buffer_count = 2;
448 execbuf.flags = e->flags;
449 execbuf.rsvd1 = ctx;
450 gem_execbuf(fd, &execbuf);
451 gem_close(fd, obj[1].handle);
452 }
453
454 __attribute__((unused))
dump_regs(int fd,const struct intel_execution_engine2 * e,unsigned int regs)455 static void dump_regs(int fd,
456 const struct intel_execution_engine2 *e,
457 unsigned int regs)
458 {
459 const int gen = intel_gen(intel_get_drm_devid(fd));
460 const unsigned int gen_bit = 1 << gen;
461 const unsigned int engine_bit = ENGINE(e->class, e->instance);
462 unsigned int regs_size;
463 uint32_t *out;
464
465 regs_size = NUM_REGS * sizeof(uint32_t);
466 regs_size = PAGE_ALIGN(regs_size);
467
468 out = gem_mmap__cpu(fd, regs, 0, regs_size, PROT_READ);
469 gem_set_domain(fd, regs, I915_GEM_DOMAIN_CPU, 0);
470
471 for (const struct named_register *r = nonpriv_registers; r->name; r++) {
472 if (!(r->engine_mask & engine_bit))
473 continue;
474 if (!(r->gen_mask & gen_bit))
475 continue;
476
477 if (r->count <= 1) {
478 igt_debug("0x%04x (%s): 0x%08x\n",
479 r->offset, r->name, out[r->offset/4]);
480 } else {
481 for (unsigned x = 0; x < r->count; x++)
482 igt_debug("0x%04x (%s[%d]): 0x%08x\n",
483 r->offset+4*x, r->name, x,
484 out[r->offset/4 + x]);
485 }
486 }
487 munmap(out, regs_size);
488 }
489
compare_regs(int fd,uint32_t A,uint32_t B,const char * who)490 static void compare_regs(int fd, uint32_t A, uint32_t B, const char *who)
491 {
492 unsigned int num_errors;
493 unsigned int regs_size;
494 uint32_t *a, *b;
495 char buf[80];
496
497 regs_size = NUM_REGS * sizeof(uint32_t);
498 regs_size = PAGE_ALIGN(regs_size);
499
500 a = gem_mmap__cpu(fd, A, 0, regs_size, PROT_READ);
501 gem_set_domain(fd, A, I915_GEM_DOMAIN_CPU, 0);
502
503 b = gem_mmap__cpu(fd, B, 0, regs_size, PROT_READ);
504 gem_set_domain(fd, B, I915_GEM_DOMAIN_CPU, 0);
505
506 num_errors = 0;
507 for (unsigned int n = 0; n < NUM_REGS; n++) {
508 const struct named_register *r;
509 uint32_t offset = n * sizeof(uint32_t);
510 uint32_t mask;
511
512 if (a[n] == b[n])
513 continue;
514
515 if (ignore_register(offset))
516 continue;
517
518 mask = ~0u;
519 r = lookup_register(offset);
520 if (r && r->masked)
521 mask >>= 16;
522 if (r && r->ignore_bits)
523 mask &= ~r->ignore_bits;
524
525 if ((a[n] & mask) == (b[n] & mask))
526 continue;
527
528 igt_warn("Register 0x%04x (%s): A=%08x B=%08x\n",
529 offset,
530 register_name(offset, buf, sizeof(buf)),
531 a[n] & mask, b[n] & mask);
532 num_errors++;
533 }
534 munmap(b, regs_size);
535 munmap(a, regs_size);
536
537 igt_assert_f(num_errors == 0,
538 "%d registers mistached between %s.\n",
539 num_errors, who);
540 }
541
nonpriv(int fd,const struct intel_execution_engine2 * e,unsigned int flags)542 static void nonpriv(int fd,
543 const struct intel_execution_engine2 *e,
544 unsigned int flags)
545 {
546 static const uint32_t values[] = {
547 0x0,
548 0xffffffff,
549 0xcccccccc,
550 0x33333333,
551 0x55555555,
552 0xaaaaaaaa,
553 0xf0f00f0f,
554 0xa0a00303,
555 0x0505c0c0,
556 0xdeadbeef
557 };
558 unsigned int engine = e->flags;
559 unsigned int num_values = ARRAY_SIZE(values);
560
561 /* Sigh -- hsw: we need cmdparser access to our own registers! */
562 igt_skip_on(intel_gen(intel_get_drm_devid(fd)) < 8);
563
564 gem_quiescent_gpu(fd);
565
566 for (int v = 0; v < num_values; v++) {
567 igt_spin_t *spin = NULL;
568 uint32_t ctx, regs[2], tmpl;
569
570 ctx = gem_context_create(fd);
571 tmpl = read_regs(fd, ctx, e, flags);
572 regs[0] = read_regs(fd, ctx, e, flags);
573
574 tmpl_regs(fd, ctx, e, tmpl, values[v]);
575
576 spin = igt_spin_new(fd, .ctx = ctx, .engine = engine);
577
578 igt_debug("%s[%d]: Setting all registers to 0x%08x\n",
579 __func__, v, values[v]);
580 write_regs(fd, ctx, e, flags, values[v]);
581
582 regs[1] = read_regs(fd, ctx, e, flags);
583
584 /*
585 * Restore the original register values before the HW idles.
586 * Or else it may never restart!
587 */
588 restore_regs(fd, ctx, e, flags, regs[0]);
589
590 igt_spin_free(fd, spin);
591
592 compare_regs(fd, tmpl, regs[1], "nonpriv read/writes");
593
594 for (int n = 0; n < ARRAY_SIZE(regs); n++)
595 gem_close(fd, regs[n]);
596 gem_context_destroy(fd, ctx);
597 gem_close(fd, tmpl);
598 }
599 }
600
isolation(int fd,const struct intel_execution_engine2 * e,unsigned int flags)601 static void isolation(int fd,
602 const struct intel_execution_engine2 *e,
603 unsigned int flags)
604 {
605 static const uint32_t values[] = {
606 0x0,
607 0xffffffff,
608 0xcccccccc,
609 0x33333333,
610 0x55555555,
611 0xaaaaaaaa,
612 0xdeadbeef
613 };
614 unsigned int engine = e->flags;
615 unsigned int num_values =
616 flags & (DIRTY1 | DIRTY2) ? ARRAY_SIZE(values) : 1;
617
618 gem_quiescent_gpu(fd);
619
620 for (int v = 0; v < num_values; v++) {
621 igt_spin_t *spin = NULL;
622 uint32_t ctx[2], regs[2], tmp;
623
624 ctx[0] = gem_context_create(fd);
625 regs[0] = read_regs(fd, ctx[0], e, flags);
626
627 spin = igt_spin_new(fd, .ctx = ctx[0], .engine = engine);
628
629 if (flags & DIRTY1) {
630 igt_debug("%s[%d]: Setting all registers of ctx 0 to 0x%08x\n",
631 __func__, v, values[v]);
632 write_regs(fd, ctx[0], e, flags, values[v]);
633 }
634
635 /*
636 * We create and execute a new context, whilst the HW is
637 * occupied with the previous context (we should switch from
638 * the old to the new proto-context without idling, which could
639 * then load the powercontext). If all goes well, we only see
640 * the default values from this context, but if goes badly we
641 * see the corruption from the previous context instead!
642 */
643 ctx[1] = gem_context_create(fd);
644 regs[1] = read_regs(fd, ctx[1], e, flags);
645
646 if (flags & DIRTY2) {
647 igt_debug("%s[%d]: Setting all registers of ctx 1 to 0x%08x\n",
648 __func__, v, ~values[v]);
649 write_regs(fd, ctx[1], e, flags, ~values[v]);
650 }
651
652 /*
653 * Restore the original register values before the HW idles.
654 * Or else it may never restart!
655 */
656 tmp = read_regs(fd, ctx[0], e, flags);
657 restore_regs(fd, ctx[0], e, flags, regs[0]);
658
659 igt_spin_free(fd, spin);
660
661 if (!(flags & DIRTY1))
662 compare_regs(fd, regs[0], tmp, "two reads of the same ctx");
663 compare_regs(fd, regs[0], regs[1], "two virgin contexts");
664
665 for (int n = 0; n < ARRAY_SIZE(ctx); n++) {
666 gem_close(fd, regs[n]);
667 gem_context_destroy(fd, ctx[n]);
668 }
669 gem_close(fd, tmp);
670 }
671 }
672
673 #define NOSLEEP (0 << 8)
674 #define S3_DEVICES (1 << 8)
675 #define S3 (2 << 8)
676 #define S4_DEVICES (3 << 8)
677 #define S4 (4 << 8)
678 #define SLEEP_MASK (0xf << 8)
679
inject_reset_context(int fd,unsigned int engine)680 static void inject_reset_context(int fd, unsigned int engine)
681 {
682 struct igt_spin_factory opts = {
683 .ctx = gem_context_create(fd),
684 .engine = engine,
685 .flags = IGT_SPIN_FAST,
686 };
687 igt_spin_t *spin;
688
689 /*
690 * Force a context switch before triggering the reset, or else
691 * we risk corrupting the target context and we can't blame the
692 * HW for screwing up if the context was already broken.
693 */
694
695 if (gem_can_store_dword(fd, engine))
696 opts.flags |= IGT_SPIN_POLL_RUN;
697
698 spin = __igt_spin_factory(fd, &opts);
699
700 if (igt_spin_has_poll(spin))
701 igt_spin_busywait_until_started(spin);
702 else
703 usleep(1000); /* better than nothing */
704
705 igt_force_gpu_reset(fd);
706
707 igt_spin_free(fd, spin);
708 gem_context_destroy(fd, opts.ctx);
709 }
710
preservation(int fd,const struct intel_execution_engine2 * e,unsigned int flags)711 static void preservation(int fd,
712 const struct intel_execution_engine2 *e,
713 unsigned int flags)
714 {
715 static const uint32_t values[] = {
716 0x0,
717 0xffffffff,
718 0xcccccccc,
719 0x33333333,
720 0x55555555,
721 0xaaaaaaaa,
722 0xdeadbeef
723 };
724 const unsigned int num_values = ARRAY_SIZE(values);
725 unsigned int engine = e->flags;
726 uint32_t ctx[num_values +1 ];
727 uint32_t regs[num_values + 1][2];
728 igt_spin_t *spin;
729
730 gem_quiescent_gpu(fd);
731
732 ctx[num_values] = gem_context_create(fd);
733 spin = igt_spin_new(fd, .ctx = ctx[num_values], .engine = engine);
734 regs[num_values][0] = read_regs(fd, ctx[num_values], e, flags);
735 for (int v = 0; v < num_values; v++) {
736 ctx[v] = gem_context_create(fd);
737 write_regs(fd, ctx[v], e, flags, values[v]);
738
739 regs[v][0] = read_regs(fd, ctx[v], e, flags);
740
741 }
742 gem_close(fd, read_regs(fd, ctx[num_values], e, flags));
743 igt_spin_free(fd, spin);
744
745 if (flags & RESET)
746 inject_reset_context(fd, engine);
747
748 switch (flags & SLEEP_MASK) {
749 case NOSLEEP:
750 break;
751
752 case S3_DEVICES:
753 igt_system_suspend_autoresume(SUSPEND_STATE_MEM,
754 SUSPEND_TEST_DEVICES);
755 break;
756
757 case S3:
758 igt_system_suspend_autoresume(SUSPEND_STATE_MEM,
759 SUSPEND_TEST_NONE);
760 break;
761
762 case S4_DEVICES:
763 igt_system_suspend_autoresume(SUSPEND_STATE_DISK,
764 SUSPEND_TEST_DEVICES);
765 break;
766
767 case S4:
768 igt_system_suspend_autoresume(SUSPEND_STATE_DISK,
769 SUSPEND_TEST_NONE);
770 break;
771 }
772
773 spin = igt_spin_new(fd, .ctx = ctx[num_values], .engine = engine);
774 for (int v = 0; v < num_values; v++)
775 regs[v][1] = read_regs(fd, ctx[v], e, flags);
776 regs[num_values][1] = read_regs(fd, ctx[num_values], e, flags);
777 igt_spin_free(fd, spin);
778
779 for (int v = 0; v < num_values; v++) {
780 char buf[80];
781
782 snprintf(buf, sizeof(buf), "dirty %x context\n", values[v]);
783 compare_regs(fd, regs[v][0], regs[v][1], buf);
784
785 gem_close(fd, regs[v][0]);
786 gem_close(fd, regs[v][1]);
787 gem_context_destroy(fd, ctx[v]);
788 }
789 compare_regs(fd, regs[num_values][0], regs[num_values][1], "clean");
790 gem_context_destroy(fd, ctx[num_values]);
791 }
792
__has_context_isolation(int fd)793 static unsigned int __has_context_isolation(int fd)
794 {
795 struct drm_i915_getparam gp;
796 int value = 0;
797
798 memset(&gp, 0, sizeof(gp));
799 gp.param = 50; /* I915_PARAM_HAS_CONTEXT_ISOLATION */
800 gp.value = &value;
801
802 igt_ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gp);
803 errno = 0;
804
805 return value;
806 }
807
808 igt_main
809 {
810 unsigned int has_context_isolation = 0;
811 int fd = -1;
812
813 igt_fixture {
814 int gen;
815
816 fd = drm_open_driver(DRIVER_INTEL);
817 igt_require_gem(fd);
818 igt_require(gem_has_contexts(fd));
819
820 has_context_isolation = __has_context_isolation(fd);
821 igt_require(has_context_isolation);
822
823 gen = intel_gen(intel_get_drm_devid(fd));
824
825 igt_warn_on_f(gen > LAST_KNOWN_GEN,
826 "GEN not recognized! Test needs to be updated to run.");
827 igt_skip_on(gen > LAST_KNOWN_GEN);
828 }
829
830 for (const struct intel_execution_engine2 *e = intel_execution_engines2;
831 e->name; e++) {
832 igt_subtest_group {
833 igt_fixture {
834 igt_require(has_context_isolation & (1 << e->class));
835 gem_require_ring(fd, e->flags);
836 igt_fork_hang_detector(fd);
837 }
838
839 igt_subtest_f("%s-nonpriv", e->name)
840 nonpriv(fd, e, 0);
841
842 igt_subtest_f("%s-clean", e->name)
843 isolation(fd, e, 0);
844 igt_subtest_f("%s-dirty-create", e->name)
845 isolation(fd, e, DIRTY1);
846 igt_subtest_f("%s-dirty-switch", e->name)
847 isolation(fd, e, DIRTY2);
848
849 igt_subtest_f("%s-none", e->name)
850 preservation(fd, e, 0);
851 igt_subtest_f("%s-S3", e->name)
852 preservation(fd, e, S3);
853 igt_subtest_f("%s-S4", e->name)
854 preservation(fd, e, S4);
855
856 igt_fixture {
857 igt_stop_hang_detector();
858 }
859
860 igt_subtest_f("%s-reset", e->name) {
861 igt_hang_t hang = igt_allow_hang(fd, 0, 0);
862 preservation(fd, e, RESET);
863 igt_disallow_hang(fd, hang);
864 }
865 }
866 }
867 }
868