1 /*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25 /** @file gem_mocs_settings.c
26 *
27 * Check that the MOCs cache settings are valid.
28 */
29
30 #include "igt.h"
31 #include "igt_gt.h"
32 #include "igt_perf.h"
33 #include "igt_sysfs.h"
34
35 #define GEN9_NUM_MOCS_ENTRIES 62 /* 62 out of 64 - 63 & 64 are reserved. */
36 #define GEN11_NUM_MOCS_ENTRIES 64 /* 63-64 are reserved, but configured. */
37
38 enum {
39 NONE,
40 RESET,
41 RC6,
42 SUSPEND,
43 HIBERNATE,
44 MAX_MOCS_TEST_MODES
45 };
46
47 static const char * const test_modes[] = {
48 [NONE] = "settings",
49 [RESET] = "reset",
50 [RC6] = "rc6",
51 [SUSPEND] = "suspend",
52 [HIBERNATE] = "hibernate"
53 };
54
55 #define MOCS_NON_DEFAULT_CTX (1<<0)
56 #define MOCS_DIRTY_VALUES (1<<1)
57 #define ALL_MOCS_FLAGS (MOCS_NON_DEFAULT_CTX | \
58 MOCS_DIRTY_VALUES)
59
60 #define GEN9_LNCFCMOCS0 (0xB020) /* L3 Cache Control base */
61 #define GEN9_GFX_MOCS_0 (0xc800) /* Graphics MOCS base register*/
62 #define GEN9_MFX0_MOCS_0 (0xc900) /* Media 0 MOCS base register*/
63 #define GEN9_MFX1_MOCS_0 (0xcA00) /* Media 1 MOCS base register*/
64 #define GEN9_VEBOX_MOCS_0 (0xcB00) /* Video MOCS base register*/
65 #define GEN9_BLT_MOCS_0 (0xcc00) /* Blitter MOCS base register*/
66 #define GEN12_GLOBAL_MOCS (0x4000)
67 #define ICELAKE_MOCS_PTE {0x00000004, 0x0030, 0x1}
68 #define MOCS_PTE {0x00000038, 0x0030, 0x1}
69
70 struct mocs_entry {
71 uint32_t control_value;
72 uint16_t l3cc_value;
73 uint8_t used;
74 };
75
76 struct mocs_table {
77 uint32_t size;
78 const struct mocs_entry *table;
79 };
80
81 /* The first entries in the MOCS tables are defined by uABI */
82
83 static const struct mocs_entry tigerlake_mocs_table[GEN11_NUM_MOCS_ENTRIES] = {
84 [2] = { 0x00000037, 0x0030, 0x1},
85 [3] = { 0x00000005, 0x0010, 0x1},
86 [4] = { 0x00000005, 0x0030, 0x1},
87 [5] = { 0x00000037, 0x0010, 0x1},
88 [6] = { 0x00000017, 0x0010, 0x1},
89 [7] = { 0x00000017, 0x0030, 0x1},
90 [8] = { 0x00000027, 0x0010, 0x1},
91 [9] = { 0x00000027, 0x0030, 0x1},
92 [10] = { 0x00000077, 0x0010, 0x1},
93 [11] = { 0x00000077, 0x0030, 0x1},
94 [12] = { 0x00000057, 0x0010, 0x1},
95 [13] = { 0x00000057, 0x0030, 0x1},
96 [14] = { 0x00000067, 0x0010, 0x1},
97 [15] = { 0x00000067, 0x0030, 0x1},
98 [16] = { 0x00004005, 0x0010, 0x1},
99 [17] = { 0x00004005, 0x0030, 0x1},
100 [18] = { 0x00060037, 0x0030, 0x1},
101 [19] = { 0x00000737, 0x0030, 0x1},
102 [20] = { 0x00000337, 0x0030, 0x1},
103 [21] = { 0x00000137, 0x0030, 0x1},
104 [22] = { 0x000003b7, 0x0030, 0x1},
105 [23] = { 0x000007b7, 0x0030, 0x1},
106 [48] = { 0x00000037, 0x0030, 0x1},
107 [49] = { 0x00000005, 0x0030, 0x1},
108 [50] = { 0x00000037, 0x0010, 0x1},
109 [51] = { 0x00000005, 0x0010, 0x1},
110 [60] = { 0x00000037, 0x0010, 0x1},
111 [61] = { 0x00004005, 0x0030, 0x1},
112 [62] = { 0x00000037, 0x0010, 0x1},
113 [63] = { 0x00000037, 0x0010, 0x1},
114 };
115
116 static const struct mocs_entry icelake_mocs_table[GEN11_NUM_MOCS_ENTRIES] = {
117 [0] = { 0x00000005, 0x0010, 0x1},
118 [1] = ICELAKE_MOCS_PTE,
119 [2] = { 0x00000037, 0x0030, 0x1},
120 [3] = { 0x00000005, 0x0010, 0x1},
121 [4] = { 0x00000005, 0x0030, 0x1},
122 [5] = { 0x00000037, 0x0010, 0x1},
123 [6] = { 0x00000017, 0x0010, 0x1},
124 [7] = { 0x00000017, 0x0030, 0x1},
125 [8] = { 0x00000027, 0x0010, 0x1},
126 [9] = { 0x00000027, 0x0030, 0x1},
127 [10] = { 0x00000077, 0x0010, 0x1},
128 [11] = { 0x00000077, 0x0030, 0x1},
129 [12] = { 0x00000057, 0x0010, 0x1},
130 [13] = { 0x00000057, 0x0030, 0x1},
131 [14] = { 0x00000067, 0x0010, 0x1},
132 [15] = { 0x00000067, 0x0030, 0x1},
133 [18] = { 0x00060037, 0x0030, 0x1},
134 [19] = { 0x00000737, 0x0030, 0x1},
135 [20] = { 0x00000337, 0x0030, 0x1},
136 [21] = { 0x00000137, 0x0030, 0x1},
137 [22] = { 0x000003b7, 0x0030, 0x1},
138 [23] = { 0x000007b7, 0x0030, 0x1},
139 [62] = { 0x00000037, 0x0010, 0x1},
140 [63] = { 0x00000037, 0x0010, 0x1},
141 };
142
143 static const struct mocs_entry skylake_mocs_table[GEN9_NUM_MOCS_ENTRIES] = {
144 [0] = { 0x00000009, 0x0010, 0x1},
145 [1] = MOCS_PTE,
146 [2] = { 0x0000003b, 0x0030, 0x1},
147 [3 ... GEN9_NUM_MOCS_ENTRIES - 1] = MOCS_PTE,
148 };
149
150 static const struct mocs_entry dirty_skylake_mocs_table[GEN9_NUM_MOCS_ENTRIES] = {
151 [0 ... GEN9_NUM_MOCS_ENTRIES - 1] = { 0x00003FFF, 0x003F, 0x1 },
152 };
153
154 static const struct mocs_entry broxton_mocs_table[GEN9_NUM_MOCS_ENTRIES] = {
155 [0] = { 0x00000009, 0x0010, 0x1},
156 [1] = MOCS_PTE,
157 [2] = { 0x00000039, 0x0030, 0x1},
158 [3 ... GEN9_NUM_MOCS_ENTRIES - 1] = MOCS_PTE,
159 };
160
161 static const struct mocs_entry dirty_broxton_mocs_table[GEN9_NUM_MOCS_ENTRIES] = {
162 [0 ... GEN9_NUM_MOCS_ENTRIES - 1] = { 0x00007FFF, 0x003F, 0x1 },
163 };
164
165 static const uint32_t write_values[GEN9_NUM_MOCS_ENTRIES] = {
166 [0 ... GEN9_NUM_MOCS_ENTRIES - 1] = 0xFFFFFFFF,
167 };
168
has_global_mocs(int fd)169 static bool has_global_mocs(int fd)
170 {
171 return intel_gen(intel_get_drm_devid(fd)) >= 12;
172 }
173
get_mocs_settings(int fd,struct mocs_table * table,bool dirty)174 static bool get_mocs_settings(int fd, struct mocs_table *table, bool dirty)
175 {
176 uint32_t devid = intel_get_drm_devid(fd);
177 bool result = false;
178
179 if (IS_SKYLAKE(devid) || IS_KABYLAKE(devid) || IS_COMETLAKE(devid)) {
180 if (dirty) {
181 table->size = ARRAY_SIZE(dirty_skylake_mocs_table);
182 table->table = dirty_skylake_mocs_table;
183 } else {
184 table->size = ARRAY_SIZE(skylake_mocs_table);
185 table->table = skylake_mocs_table;
186 }
187 result = true;
188 } else if (IS_BROXTON(devid)) {
189 if (dirty) {
190 table->size = ARRAY_SIZE(dirty_broxton_mocs_table);
191 table->table = dirty_broxton_mocs_table;
192 } else {
193 table->size = ARRAY_SIZE(broxton_mocs_table);
194 table->table = broxton_mocs_table;
195 }
196 result = true;
197 } else if (IS_ICELAKE(devid)) {
198 table->size = ARRAY_SIZE(icelake_mocs_table);
199 table->table = icelake_mocs_table;
200 result = true;
201 } else if (IS_TIGERLAKE(devid)) {
202 table->size = ARRAY_SIZE(tigerlake_mocs_table);
203 table->table = tigerlake_mocs_table;
204 result = true;
205 }
206
207 return result;
208 }
209
210 #define LOCAL_I915_EXEC_BSD1 (I915_EXEC_BSD | (1<<13))
211 #define LOCAL_I915_EXEC_BSD2 (I915_EXEC_BSD | (2<<13))
212
get_engine_base(int fd,uint32_t engine)213 static uint32_t get_engine_base(int fd, uint32_t engine)
214 {
215 if (has_global_mocs(fd))
216 return GEN12_GLOBAL_MOCS;
217
218 switch (engine) {
219 case LOCAL_I915_EXEC_BSD1: return GEN9_MFX0_MOCS_0;
220 case LOCAL_I915_EXEC_BSD2: return GEN9_MFX1_MOCS_0;
221 case I915_EXEC_RENDER: return GEN9_GFX_MOCS_0;
222 case I915_EXEC_BLT: return GEN9_BLT_MOCS_0;
223 case I915_EXEC_VEBOX: return GEN9_VEBOX_MOCS_0;
224 default: return 0;
225 }
226 }
227
228 #define MI_STORE_REGISTER_MEM_64_BIT_ADDR ((0x24 << 23) | 2)
229
create_read_batch(struct drm_i915_gem_relocation_entry * reloc,uint32_t * batch,uint32_t dst_handle,uint32_t size,uint32_t reg_base)230 static int create_read_batch(struct drm_i915_gem_relocation_entry *reloc,
231 uint32_t *batch,
232 uint32_t dst_handle,
233 uint32_t size,
234 uint32_t reg_base)
235 {
236 unsigned int offset = 0;
237
238 for (uint32_t index = 0; index < size; index++, offset += 4) {
239 batch[offset] = MI_STORE_REGISTER_MEM_64_BIT_ADDR;
240 batch[offset+1] = reg_base + (index * sizeof(uint32_t));
241 batch[offset+2] = index * sizeof(uint32_t); /* reloc */
242 batch[offset+3] = 0;
243
244 reloc[index].offset = (offset + 2) * sizeof(uint32_t);
245 reloc[index].delta = index * sizeof(uint32_t);
246 reloc[index].target_handle = dst_handle;
247 reloc[index].write_domain = I915_GEM_DOMAIN_RENDER;
248 reloc[index].read_domains = I915_GEM_DOMAIN_RENDER;
249 }
250
251 batch[offset++] = MI_BATCH_BUFFER_END;
252 batch[offset++] = 0;
253
254 return offset * sizeof(uint32_t);
255 }
256
do_read_registers(int fd,uint32_t ctx_id,uint32_t dst_handle,uint32_t reg_base,uint32_t size,uint32_t engine_id)257 static void do_read_registers(int fd,
258 uint32_t ctx_id,
259 uint32_t dst_handle,
260 uint32_t reg_base,
261 uint32_t size,
262 uint32_t engine_id)
263 {
264 struct drm_i915_gem_execbuffer2 execbuf;
265 struct drm_i915_gem_exec_object2 obj[2];
266 struct drm_i915_gem_relocation_entry reloc[size];
267 uint32_t batch[size * 4 + 4];
268 uint32_t handle = gem_create(fd, 4096);
269
270 memset(reloc, 0, sizeof(reloc));
271 memset(obj, 0, sizeof(obj));
272 memset(&execbuf, 0, sizeof(execbuf));
273
274 obj[0].handle = dst_handle;
275
276 obj[1].handle = handle;
277 obj[1].relocation_count = size;
278 obj[1].relocs_ptr = to_user_pointer(reloc);
279
280 execbuf.buffers_ptr = to_user_pointer(obj);
281 execbuf.buffer_count = 2;
282 execbuf.batch_len =
283 create_read_batch(reloc, batch, dst_handle, size, reg_base);
284 i915_execbuffer2_set_context_id(execbuf, ctx_id);
285 execbuf.flags = I915_EXEC_SECURE | engine_id;
286
287 gem_write(fd, handle, 0, batch, execbuf.batch_len);
288 gem_execbuf(fd, &execbuf);
289 gem_close(fd, handle);
290 }
291
292 #define LOCAL_MI_LOAD_REGISTER_IMM (0x22 << 23)
293
create_write_batch(uint32_t * batch,const uint32_t * values,uint32_t size,uint32_t reg_base)294 static int create_write_batch(uint32_t *batch,
295 const uint32_t *values,
296 uint32_t size,
297 uint32_t reg_base)
298 {
299 unsigned int i;
300 unsigned int offset = 0;
301
302 batch[offset++] = LOCAL_MI_LOAD_REGISTER_IMM | (size * 2 - 1);
303
304 for (i = 0; i < size; i++) {
305 batch[offset++] = reg_base + (i * 4);
306 batch[offset++] = values[i];
307 }
308
309 batch[offset++] = MI_BATCH_BUFFER_END;
310
311 return offset * sizeof(uint32_t);
312 }
313
write_registers(int fd,uint32_t ctx_id,uint32_t reg_base,const uint32_t * values,uint32_t size,uint32_t engine_id,bool privileged)314 static void write_registers(int fd,
315 uint32_t ctx_id,
316 uint32_t reg_base,
317 const uint32_t *values,
318 uint32_t size,
319 uint32_t engine_id,
320 bool privileged)
321 {
322 struct drm_i915_gem_exec_object2 obj;
323 struct drm_i915_gem_execbuffer2 execbuf;
324 uint32_t batch[size * 4 + 2];
325 uint32_t handle = gem_create(fd, 4096);
326
327 memset(&obj, 0, sizeof(obj));
328 memset(&execbuf, 0, sizeof(execbuf));
329
330 obj.handle = handle;
331
332 execbuf.buffers_ptr = to_user_pointer(&obj);
333 execbuf.buffer_count = 1;
334 execbuf.batch_len = create_write_batch(batch, values, size, reg_base);
335 i915_execbuffer2_set_context_id(execbuf, ctx_id);
336 if (privileged)
337 execbuf.flags = I915_EXEC_SECURE | engine_id;
338 else
339 execbuf.flags = engine_id;
340
341 gem_write(fd, handle, 0, batch, execbuf.batch_len);
342 gem_execbuf(fd, &execbuf);
343 gem_close(fd, handle);
344 }
345
check_control_registers(int fd,unsigned engine,uint32_t ctx_id,bool dirty)346 static void check_control_registers(int fd,
347 unsigned engine,
348 uint32_t ctx_id,
349 bool dirty)
350 {
351 const uint32_t reg_base = get_engine_base(fd, engine);
352 uint32_t dst_handle = gem_create(fd, 4096);
353 uint32_t *read_regs;
354 struct mocs_table table;
355
356 igt_assert(get_mocs_settings(fd, &table, dirty));
357
358 do_read_registers(fd,
359 ctx_id,
360 dst_handle,
361 reg_base,
362 table.size,
363 engine);
364
365 read_regs = gem_mmap__cpu(fd, dst_handle, 0, 4096, PROT_READ);
366
367 gem_set_domain(fd, dst_handle, I915_GEM_DOMAIN_CPU, 0);
368 for (int index = 0; index < table.size; index++) {
369 uint32_t val, read_val;
370
371 if (!table.table[index].used)
372 continue;
373
374 read_val = read_regs[index];
375 val = table.table[index].control_value;
376 igt_assert_f(read_val == val,
377 "engine=%u index=%u read_value=0x%08x value=0x%08x\n",
378 engine, index, read_val, val);
379 }
380
381 munmap(read_regs, 4096);
382 gem_close(fd, dst_handle);
383 }
384
check_l3cc_registers(int fd,unsigned engine,uint32_t ctx_id,bool dirty)385 static void check_l3cc_registers(int fd,
386 unsigned engine,
387 uint32_t ctx_id,
388 bool dirty)
389 {
390 struct mocs_table table;
391 uint32_t dst_handle = gem_create(fd, 4096);
392 uint32_t *read_regs;
393 int index;
394
395 igt_assert(get_mocs_settings(fd, &table, dirty));
396
397 do_read_registers(fd,
398 ctx_id,
399 dst_handle,
400 GEN9_LNCFCMOCS0,
401 (table.size + 1) / 2,
402 engine);
403
404 read_regs = gem_mmap__cpu(fd, dst_handle, 0, 4096, PROT_READ);
405
406 gem_set_domain(fd, dst_handle, I915_GEM_DOMAIN_CPU, 0);
407
408 for (index = 0; index < table.size / 2; index++) {
409 if (table.table[index * 2].used) {
410 igt_assert_eq_u32(read_regs[index] & 0xffff,
411 table.table[index * 2].l3cc_value);
412 }
413 if (table.table[index * 2 + 1].used) {
414 igt_assert_eq_u32(read_regs[index] >> 16,
415 table.table[index * 2 + 1].l3cc_value);
416 }
417 }
418
419 if (table.size & 1)
420 igt_assert_eq_u32(read_regs[index] & 0xffff,
421 table.table[index * 2].l3cc_value);
422
423 munmap(read_regs, 4096);
424 gem_close(fd, dst_handle);
425 }
426
rc6_wait(int i915)427 static void rc6_wait(int i915)
428 {
429 uint64_t start[2], now[2], prev;
430 bool rc6 = false;
431 int fd;
432
433 fd = perf_i915_open(I915_PMU_RC6_RESIDENCY);
434 igt_require(fd != -1);
435
436 /* First wait for roughly an RC6 Evaluation Interval */
437 gem_quiescent_gpu(i915);
438 usleep(320e3);
439
440 /* Then poll for RC6 to start ticking */
441 igt_assert_eq(read(fd, start, sizeof(start)), sizeof(start));
442 prev = start[1];
443 do {
444 usleep(5e3);
445 igt_assert_eq(read(fd, now, sizeof(now)), sizeof(now));
446 if (now[1] - prev > 1e6) {
447 rc6 = true;
448 break;
449 }
450 prev = now[1];
451 } while (now[0] - start[0] < 1e9);
452
453 close(fd);
454
455 igt_debug("rc6 residency %.2fms (delta %.1fms over 5ms), elapsed %.2fms\n",
456 1e-6 * (now[1] - start[1]),
457 1e-6 * (now[1] - prev),
458 1e-6 * (now[0] - start[0]));
459 igt_require(rc6);
460 }
461
check_mocs_values(int fd,unsigned engine,uint32_t ctx_id,bool dirty)462 static void check_mocs_values(int fd,
463 unsigned engine, uint32_t ctx_id,
464 bool dirty)
465 {
466 check_control_registers(fd, engine, ctx_id, dirty);
467
468 if (engine == I915_EXEC_RENDER)
469 check_l3cc_registers(fd, engine, ctx_id, dirty);
470 }
471
write_dirty_mocs(int fd,unsigned engine,uint32_t ctx_id,bool privileged)472 static void write_dirty_mocs(int fd,
473 unsigned engine, uint32_t ctx_id,
474 bool privileged)
475 {
476 int num_of_mocs_entries;
477
478 if (intel_gen(intel_get_drm_devid(fd)) >= 11)
479 num_of_mocs_entries = GEN11_NUM_MOCS_ENTRIES;
480 else
481 num_of_mocs_entries = GEN9_NUM_MOCS_ENTRIES;
482
483 write_registers(fd, ctx_id, get_engine_base(fd, engine),
484 write_values, num_of_mocs_entries,
485 engine, privileged);
486
487 if (engine == I915_EXEC_RENDER)
488 write_registers(fd, ctx_id, GEN9_LNCFCMOCS0,
489 write_values, num_of_mocs_entries/2,
490 engine, privileged);
491 }
492
run_test(int fd,unsigned engine,unsigned flags,unsigned mode)493 static void run_test(int fd, unsigned engine, unsigned flags, unsigned mode)
494 {
495 uint32_t ctx_id = 0;
496 uint32_t ctx_clean_id;
497 uint32_t ctx_dirty_id;
498
499 /* As mocs is global for GEN11+, trying privileged write to dirty
500 * the mocs and testing context save and restore of mocs between
501 * contexts is bound to fail.
502 */
503 if (flags & MOCS_DIRTY_VALUES)
504 igt_skip_on(intel_gen(intel_get_drm_devid(fd)) >= 11);
505
506 gem_require_ring(fd, engine);
507
508 /* Skip if we don't know where the registers are for this engine */
509 igt_require(get_engine_base(fd, engine));
510
511 if (flags & MOCS_NON_DEFAULT_CTX)
512 ctx_id = gem_context_create(fd);
513
514 if (flags & MOCS_DIRTY_VALUES) {
515 ctx_dirty_id = gem_context_create(fd);
516 write_dirty_mocs(fd, engine, ctx_dirty_id, true);
517 check_mocs_values(fd, engine, ctx_dirty_id, true);
518 }
519
520 check_mocs_values(fd, engine, ctx_id, false);
521
522 switch (mode) {
523 case NONE: break;
524 case RESET: igt_force_gpu_reset(fd); break;
525 case SUSPEND: igt_system_suspend_autoresume(SUSPEND_STATE_MEM,
526 SUSPEND_TEST_NONE); break;
527 case HIBERNATE: igt_system_suspend_autoresume(SUSPEND_STATE_DISK,
528 SUSPEND_TEST_NONE); break;
529 case RC6: rc6_wait(fd); break;
530 }
531
532 check_mocs_values(fd, engine, ctx_id, false);
533
534 if (flags & MOCS_DIRTY_VALUES) {
535 ctx_clean_id = gem_context_create(fd);
536 check_mocs_values(fd, engine, ctx_dirty_id, true);
537 check_mocs_values(fd, engine, ctx_clean_id, false);
538 gem_context_destroy(fd, ctx_dirty_id);
539 gem_context_destroy(fd, ctx_clean_id);
540 }
541
542 if (ctx_id)
543 gem_context_destroy(fd, ctx_id);
544 }
545
isolation_test(int fd,unsigned engine)546 static void isolation_test(int fd, unsigned engine)
547 {
548 uint32_t ctx[2] = { gem_context_create(fd), gem_context_create(fd) };
549
550 /* Any writes by one normal client should not affect a second client */
551 write_dirty_mocs(fd, engine, ctx[0], false);
552 check_mocs_values(fd, engine, ctx[1], false);
553
554 for (int i = 0; i < ARRAY_SIZE(ctx); i++)
555 gem_context_destroy(fd, ctx[i]);
556 }
557
558 igt_main
559 {
560 const struct intel_execution_engine *e;
561 struct mocs_table table;
562 int fd = -1;
563
564 igt_fixture {
565 fd = drm_open_driver_master(DRIVER_INTEL); /* for SECURE */
566 igt_require_gem(fd);
567 gem_require_mocs_registers(fd);
568 igt_require(get_mocs_settings(fd, &table, false));
569 }
570
571 for (e = intel_execution_engines; e->name; e++) {
572 /* We don't know which engine will be assigned to us if we're
573 * using plain I915_EXEC_BSD, I915_EXEC_DEFAULT is just
574 * duplicating render
575 */
576 if ((e->exec_id == I915_EXEC_BSD && !e->flags) ||
577 e->exec_id == I915_EXEC_DEFAULT)
578 continue;
579
580 for (unsigned mode = NONE; mode < MAX_MOCS_TEST_MODES; mode++) {
581 igt_subtest_group {
582 igt_hang_t hang = {};
583
584 igt_fixture {
585 if (mode == RESET)
586 hang = igt_allow_hang(fd, 0, 0);
587 }
588
589 for (unsigned flags = 0; flags < ALL_MOCS_FLAGS + 1; flags++) {
590 /* Trying to test non-render engines for dirtying MOCS
591 * values from one context having effect on different
592 * context is bound to fail - only render engine is
593 * doing context save/restore of MOCS registers.
594 * Let's also limit testing values on non-default
595 * contexts to render-only.
596 */
597 if (flags && e->exec_id != I915_EXEC_RENDER)
598 continue;
599
600 igt_subtest_f("mocs-%s%s%s-%s",
601 test_modes[mode],
602 flags & MOCS_NON_DEFAULT_CTX ? "-ctx": "",
603 flags & MOCS_DIRTY_VALUES ? "-dirty" : "",
604 e->name) {
605 if (flags & (MOCS_NON_DEFAULT_CTX | MOCS_DIRTY_VALUES))
606 gem_require_contexts(fd);
607
608 run_test(fd, e->exec_id | e->flags, flags, mode);
609 }
610 }
611
612 igt_fixture {
613 if (mode == RESET)
614 igt_disallow_hang(fd, hang);
615 }
616 }
617 }
618
619 igt_subtest_f("mocs-isolation-%s", e->name) {
620 gem_require_ring(fd, e->exec_id | e->flags);
621 gem_require_contexts(fd);
622
623 isolation_test(fd, e->exec_id | e->flags);
624 }
625 }
626
627 igt_fixture
628 close(fd);
629 }
630