1 /*
2 * Copyright © 2013 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25 #include "igt.h"
26 #include <stdlib.h>
27 #include <stdint.h>
28 #include <stdio.h>
29 #include <errno.h>
30
31 #include <drm.h>
32
33 #ifndef I915_PARAM_CMD_PARSER_VERSION
34 #define I915_PARAM_CMD_PARSER_VERSION 28
35 #endif
36
37 #define DERRMR 0x44050
38 #define OASTATUS2 0x2368
39 #define OACONTROL 0x2360
40 #define SO_WRITE_OFFSET_0 0x5280
41
42 #define HSW_CS_GPR(n) (0x2600 + 8*(n))
43 #define HSW_CS_GPR0 HSW_CS_GPR(0)
44 #define HSW_CS_GPR1 HSW_CS_GPR(1)
45
46 /* To help craft commands known to be invalid across all engines */
47 #define INSTR_CLIENT_SHIFT 29
48 #define INSTR_INVALID_CLIENT 0x7
49
50 #define MI_LOAD_REGISTER_REG (0x2a << 23)
51 #define MI_STORE_REGISTER_MEM (0x24 << 23)
52 #define MI_ARB_ON_OFF (0x8 << 23)
53 #define MI_DISPLAY_FLIP ((0x14 << 23) | 1)
54
55 #define GFX_OP_PIPE_CONTROL ((0x3<<29)|(0x3<<27)|(0x2<<24)|2)
56 #define PIPE_CONTROL_QW_WRITE (1<<14)
57 #define PIPE_CONTROL_LRI_POST_OP (1<<23)
58
59 static int parser_version;
60
command_parser_version(int fd)61 static int command_parser_version(int fd)
62 {
63 int version = -1;
64 drm_i915_getparam_t gp;
65
66 gp.param = I915_PARAM_CMD_PARSER_VERSION;
67 gp.value = &version;
68
69 if (drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp) == 0)
70 return version;
71
72 return -1;
73 }
74
__exec_batch_patched(int fd,uint32_t cmd_bo,uint32_t * cmds,int size,int patch_offset)75 static uint64_t __exec_batch_patched(int fd, uint32_t cmd_bo, uint32_t *cmds,
76 int size, int patch_offset)
77 {
78 struct drm_i915_gem_execbuffer2 execbuf;
79 struct drm_i915_gem_exec_object2 obj[2];
80 struct drm_i915_gem_relocation_entry reloc[1];
81
82 uint32_t target_bo = gem_create(fd, 4096);
83 uint64_t actual_value = 0;
84
85 gem_write(fd, cmd_bo, 0, cmds, size);
86
87 memset(obj, 0, sizeof(obj));
88 obj[0].handle = target_bo;
89 obj[1].handle = cmd_bo;
90
91 memset(reloc, 0, sizeof(reloc));
92 reloc[0].offset = patch_offset;
93 reloc[0].target_handle = obj[0].handle;
94 reloc[0].delta = 0;
95 reloc[0].read_domains = I915_GEM_DOMAIN_COMMAND;
96 reloc[0].write_domain = I915_GEM_DOMAIN_COMMAND;
97 obj[1].relocs_ptr = to_user_pointer(reloc);
98 obj[1].relocation_count = 1;
99
100 memset(&execbuf, 0, sizeof(execbuf));
101 execbuf.buffers_ptr = to_user_pointer(obj);
102 execbuf.buffer_count = 2;
103 execbuf.batch_len = size;
104 execbuf.flags = I915_EXEC_RENDER;
105
106 gem_execbuf(fd, &execbuf);
107 gem_sync(fd, cmd_bo);
108
109 gem_read(fd,target_bo, 0, &actual_value, sizeof(actual_value));
110
111 gem_close(fd, target_bo);
112
113 return actual_value;
114 }
115
exec_batch_patched(int fd,uint32_t cmd_bo,uint32_t * cmds,int size,int patch_offset,uint64_t expected_value)116 static void exec_batch_patched(int fd, uint32_t cmd_bo, uint32_t *cmds,
117 int size, int patch_offset,
118 uint64_t expected_value)
119 {
120 igt_assert_eq(__exec_batch_patched(fd, cmd_bo, cmds,
121 size, patch_offset),
122 expected_value);
123 }
124
__exec_batch(int fd,uint32_t cmd_bo,uint32_t * cmds,int size,int ring)125 static int __exec_batch(int fd, uint32_t cmd_bo, uint32_t *cmds,
126 int size, int ring)
127 {
128 struct drm_i915_gem_execbuffer2 execbuf;
129 struct drm_i915_gem_exec_object2 obj[1];
130
131 gem_write(fd, cmd_bo, 0, cmds, size);
132
133 memset(obj, 0, sizeof(obj));
134 obj[0].handle = cmd_bo;
135
136 memset(&execbuf, 0, sizeof(execbuf));
137 execbuf.buffers_ptr = to_user_pointer(obj);
138 execbuf.buffer_count = 1;
139 execbuf.batch_len = size;
140 execbuf.flags = ring;
141
142 return __gem_execbuf(fd, &execbuf);
143 }
144 #define exec_batch(fd, bo, cmds, sz, ring, expected) \
145 igt_assert_eq(__exec_batch(fd, bo, cmds, sz, ring), expected)
146
exec_split_batch(int fd,uint32_t * cmds,int size,int ring,int expected_ret)147 static void exec_split_batch(int fd, uint32_t *cmds,
148 int size, int ring, int expected_ret)
149 {
150 struct drm_i915_gem_execbuffer2 execbuf;
151 struct drm_i915_gem_exec_object2 obj[1];
152 uint32_t cmd_bo;
153 uint32_t noop[1024] = { 0 };
154 const int alloc_size = 4096 * 2;
155 const int actual_start_offset = 4096-sizeof(uint32_t);
156
157 /* Allocate and fill a 2-page batch with noops */
158 cmd_bo = gem_create(fd, alloc_size);
159 gem_write(fd, cmd_bo, 0, noop, sizeof(noop));
160 gem_write(fd, cmd_bo, 4096, noop, sizeof(noop));
161
162 /* Write the provided commands such that the first dword
163 * of the command buffer is the last dword of the first
164 * page (i.e. the command is split across the two pages).
165 */
166 gem_write(fd, cmd_bo, actual_start_offset, cmds, size);
167
168 memset(obj, 0, sizeof(obj));
169 obj[0].handle = cmd_bo;
170
171 memset(&execbuf, 0, sizeof(execbuf));
172 execbuf.buffers_ptr = to_user_pointer(obj);
173 execbuf.buffer_count = 1;
174 /* NB: We want batch_start_offset and batch_len to point to the block
175 * of the actual commands (i.e. at the last dword of the first page),
176 * but have to adjust both the start offset and length to meet the
177 * kernel driver's requirements on the alignment of those fields.
178 */
179 execbuf.batch_start_offset = actual_start_offset & ~0x7;
180 execbuf.batch_len =
181 ALIGN(size + actual_start_offset - execbuf.batch_start_offset,
182 0x8);
183 execbuf.flags = ring;
184
185 igt_assert_eq(__gem_execbuf(fd, &execbuf), expected_ret);
186
187 gem_sync(fd, cmd_bo);
188 gem_close(fd, cmd_bo);
189 }
190
exec_batch_chained(int fd,uint32_t cmd_bo,uint32_t * cmds,int size,int patch_offset,uint64_t expected_value)191 static void exec_batch_chained(int fd, uint32_t cmd_bo, uint32_t *cmds,
192 int size, int patch_offset,
193 uint64_t expected_value)
194 {
195 struct drm_i915_gem_execbuffer2 execbuf;
196 struct drm_i915_gem_exec_object2 obj[3];
197 struct drm_i915_gem_relocation_entry reloc[1];
198 struct drm_i915_gem_relocation_entry first_level_reloc;
199
200 uint32_t target_bo = gem_create(fd, 4096);
201 uint32_t first_level_bo = gem_create(fd, 4096);
202 uint64_t actual_value = 0;
203
204 static uint32_t first_level_cmds[] = {
205 MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965,
206 0,
207 MI_BATCH_BUFFER_END,
208 0,
209 };
210
211 if (IS_HASWELL(intel_get_drm_devid(fd)))
212 first_level_cmds[0] |= MI_BATCH_NON_SECURE_HSW;
213
214 gem_write(fd, first_level_bo, 0,
215 first_level_cmds, sizeof(first_level_cmds));
216 gem_write(fd, cmd_bo, 0, cmds, size);
217
218 memset(obj, 0, sizeof(obj));
219 obj[0].handle = target_bo;
220 obj[1].handle = cmd_bo;
221 obj[2].handle = first_level_bo;
222
223 memset(reloc, 0, sizeof(reloc));
224 reloc[0].offset = patch_offset;
225 reloc[0].delta = 0;
226 reloc[0].target_handle = target_bo;
227 reloc[0].read_domains = I915_GEM_DOMAIN_COMMAND;
228 reloc[0].write_domain = I915_GEM_DOMAIN_COMMAND;
229 obj[1].relocation_count = 1;
230 obj[1].relocs_ptr = to_user_pointer(&reloc);
231
232 memset(&first_level_reloc, 0, sizeof(first_level_reloc));
233 first_level_reloc.offset = 4;
234 first_level_reloc.delta = 0;
235 first_level_reloc.target_handle = cmd_bo;
236 first_level_reloc.read_domains = I915_GEM_DOMAIN_COMMAND;
237 first_level_reloc.write_domain = 0;
238 obj[2].relocation_count = 1;
239 obj[2].relocs_ptr = to_user_pointer(&first_level_reloc);
240
241 memset(&execbuf, 0, sizeof(execbuf));
242 execbuf.buffers_ptr = to_user_pointer(obj);
243 execbuf.buffer_count = 3;
244 execbuf.batch_len = sizeof(first_level_cmds);
245 execbuf.flags = I915_EXEC_RENDER;
246
247 gem_execbuf(fd, &execbuf);
248 gem_sync(fd, cmd_bo);
249
250 gem_read(fd,target_bo, 0, &actual_value, sizeof(actual_value));
251 igt_assert_eq(expected_value, actual_value);
252
253 gem_close(fd, first_level_bo);
254 gem_close(fd, target_bo);
255 }
256
257 /* Be careful to take into account what register bits we can store and read
258 * from...
259 */
260 struct test_lri {
261 const char *name; /* register name for debug info */
262 uint32_t reg; /* address to test */
263 uint32_t read_mask; /* ignore things like HW status bits */
264 uint32_t init_val; /* initial identifiable value to set without LRI */
265 uint32_t test_val; /* value to attempt loading via LRI command */
266 bool whitelisted; /* expect to become NOOP / fail if not whitelisted */
267 int min_ver; /* required command parser version to test */
268 };
269
270 static void
test_lri(int fd,uint32_t handle,struct test_lri * test)271 test_lri(int fd, uint32_t handle, struct test_lri *test)
272 {
273 uint32_t lri[] = {
274 MI_LOAD_REGISTER_IMM,
275 test->reg,
276 test->test_val,
277 MI_BATCH_BUFFER_END,
278 };
279 int bad_lri_errno = parser_version >= 8 ? 0 : -EINVAL;
280 int expected_errno = test->whitelisted ? 0 : bad_lri_errno;
281 uint32_t expect = test->whitelisted ? test->test_val : test->init_val;
282
283 igt_debug("Testing %s LRI: addr=%x, val=%x, expected errno=%d, expected val=%x\n",
284 test->name, test->reg, test->test_val,
285 expected_errno, expect);
286
287 intel_register_write(test->reg, test->init_val);
288
289 igt_assert_eq_u32((intel_register_read(test->reg) &
290 test->read_mask),
291 test->init_val);
292
293 exec_batch(fd, handle,
294 lri, sizeof(lri),
295 I915_EXEC_RENDER,
296 expected_errno);
297 gem_sync(fd, handle);
298
299 igt_assert_eq_u32((intel_register_read(test->reg) &
300 test->read_mask),
301 expect);
302 }
303
test_allocations(int fd)304 static void test_allocations(int fd)
305 {
306 const uint32_t bbe = MI_BATCH_BUFFER_END;
307 struct drm_i915_gem_execbuffer2 execbuf;
308 struct drm_i915_gem_exec_object2 obj[17];
309 unsigned long count;
310
311 intel_require_memory(2, 1ull<<(12 + ARRAY_SIZE(obj)), CHECK_RAM);
312
313 memset(obj, 0, sizeof(obj));
314 for (int i = 0; i < ARRAY_SIZE(obj); i++) {
315 uint64_t size = 1ull << (12 + i);
316
317 obj[i].handle = gem_create(fd, size);
318 for (uint64_t page = 4096; page <= size; page += 4096)
319 gem_write(fd, obj[i].handle,
320 page - sizeof(bbe), &bbe, sizeof(bbe));
321 }
322
323 memset(&execbuf, 0, sizeof(execbuf));
324 execbuf.buffer_count = 1;
325
326 count = 0;
327 igt_until_timeout(20) {
328 int i = rand() % ARRAY_SIZE(obj);
329 execbuf.buffers_ptr = to_user_pointer(&obj[i]);
330 execbuf.batch_start_offset = (rand() % (1ull<<i)) << 12;
331 execbuf.batch_start_offset += 64 * (rand() % 64);
332 execbuf.batch_len = (1ull<<(12+i)) - execbuf.batch_start_offset;
333 gem_execbuf(fd, &execbuf);
334 count++;
335 }
336 igt_info("Submitted %lu execbufs\n", count);
337 igt_drop_caches_set(fd, DROP_RESET_ACTIVE); /* Cancel the queued work */
338
339 for (int i = 0; i < ARRAY_SIZE(obj); i++) {
340 gem_sync(fd, obj[i].handle);
341 gem_close(fd, obj[i].handle);
342 }
343 }
344
hsw_load_register_reg(void)345 static void hsw_load_register_reg(void)
346 {
347 uint32_t init_gpr0[16] = {
348 MI_LOAD_REGISTER_IMM | (3 - 2),
349 HSW_CS_GPR0,
350 0xabcdabc0, /* leave [1:0] zero */
351 MI_BATCH_BUFFER_END,
352 };
353 uint32_t store_gpr0[16] = {
354 MI_STORE_REGISTER_MEM | (3 - 2),
355 HSW_CS_GPR0,
356 0, /* reloc*/
357 MI_BATCH_BUFFER_END,
358 };
359 uint32_t do_lrr[16] = {
360 MI_LOAD_REGISTER_REG | (3 - 2),
361 0, /* [1] = src */
362 HSW_CS_GPR0, /* dst */
363 MI_BATCH_BUFFER_END,
364 };
365 uint32_t allowed_regs[] = {
366 HSW_CS_GPR1,
367 SO_WRITE_OFFSET_0,
368 };
369 uint32_t disallowed_regs[] = {
370 0,
371 OACONTROL, /* filtered */
372 DERRMR, /* master only */
373 0x2038, /* RING_START: invalid */
374 };
375 int fd;
376 uint32_t handle;
377 int bad_lrr_errno = parser_version >= 8 ? 0 : -EINVAL;
378
379 /* Open again to get a non-master file descriptor */
380 fd = drm_open_driver(DRIVER_INTEL);
381
382 igt_require(IS_HASWELL(intel_get_drm_devid(fd)));
383 igt_require(parser_version >= 7);
384
385 handle = gem_create(fd, 4096);
386
387 for (int i = 0 ; i < ARRAY_SIZE(allowed_regs); i++) {
388 uint32_t var;
389
390 exec_batch(fd, handle, init_gpr0, sizeof(init_gpr0),
391 I915_EXEC_RENDER,
392 0);
393 exec_batch_patched(fd, handle,
394 store_gpr0, sizeof(store_gpr0),
395 2 * sizeof(uint32_t), /* reloc */
396 0xabcdabc0);
397 do_lrr[1] = allowed_regs[i];
398 exec_batch(fd, handle, do_lrr, sizeof(do_lrr),
399 I915_EXEC_RENDER,
400 0);
401 var = __exec_batch_patched(fd, handle,
402 store_gpr0, sizeof(store_gpr0),
403 2 * sizeof(uint32_t)); /* reloc */
404 igt_assert_neq(var, 0xabcdabc0);
405 }
406
407 for (int i = 0 ; i < ARRAY_SIZE(disallowed_regs); i++) {
408 exec_batch(fd, handle, init_gpr0, sizeof(init_gpr0),
409 I915_EXEC_RENDER,
410 0);
411 exec_batch_patched(fd, handle,
412 store_gpr0, sizeof(store_gpr0),
413 2 * sizeof(uint32_t), /* reloc */
414 0xabcdabc0);
415 do_lrr[1] = disallowed_regs[i];
416 exec_batch(fd, handle, do_lrr, sizeof(do_lrr),
417 I915_EXEC_RENDER,
418 bad_lrr_errno);
419 exec_batch_patched(fd, handle,
420 store_gpr0, sizeof(store_gpr0),
421 2 * sizeof(uint32_t), /* reloc */
422 0xabcdabc0);
423 }
424
425 close(fd);
426 }
427
428 igt_main
429 {
430 uint32_t handle;
431 int fd;
432
433 igt_fixture {
434 fd = drm_open_driver(DRIVER_INTEL);
435 igt_require_gem(fd);
436
437 parser_version = command_parser_version(fd);
438 igt_require(parser_version != -1);
439
440 igt_require(gem_uses_ppgtt(fd));
441
442 handle = gem_create(fd, 4096);
443
444 /* ATM cmd parser only exists on gen7. */
445 igt_require(intel_gen(intel_get_drm_devid(fd)) == 7);
446 igt_fork_hang_detector(fd);
447 }
448
449 igt_subtest("basic-allowed") {
450 uint32_t pc[] = {
451 GFX_OP_PIPE_CONTROL,
452 PIPE_CONTROL_QW_WRITE,
453 0, /* To be patched */
454 0x12000000,
455 0,
456 MI_BATCH_BUFFER_END,
457 };
458 exec_batch_patched(fd, handle,
459 pc, sizeof(pc),
460 8, /* patch offset, */
461 0x12000000);
462 }
463
464 igt_subtest("basic-rejected") {
465 uint32_t invalid_cmd[] = {
466 INSTR_INVALID_CLIENT << INSTR_CLIENT_SHIFT,
467 MI_BATCH_BUFFER_END,
468 };
469 uint32_t invalid_set_context[] = {
470 MI_SET_CONTEXT | 32, /* invalid length */
471 MI_BATCH_BUFFER_END,
472 };
473 exec_batch(fd, handle,
474 invalid_cmd, sizeof(invalid_cmd),
475 I915_EXEC_RENDER,
476 -EINVAL);
477 exec_batch(fd, handle,
478 invalid_cmd, sizeof(invalid_cmd),
479 I915_EXEC_BSD,
480 -EINVAL);
481 if (gem_has_blt(fd)) {
482 exec_batch(fd, handle,
483 invalid_cmd, sizeof(invalid_cmd),
484 I915_EXEC_BLT,
485 -EINVAL);
486 }
487 if (gem_has_vebox(fd)) {
488 exec_batch(fd, handle,
489 invalid_cmd, sizeof(invalid_cmd),
490 I915_EXEC_VEBOX,
491 -EINVAL);
492 }
493
494 exec_batch(fd, handle,
495 invalid_set_context, sizeof(invalid_set_context),
496 I915_EXEC_RENDER,
497 -EINVAL);
498 }
499
500 igt_subtest("basic-allocation") {
501 test_allocations(fd);
502 }
503
504 igt_subtest_group {
505 #define REG(R, MSK, INI, V, OK, MIN_V) { #R, R, MSK, INI, V, OK, MIN_V }
506 struct test_lri lris[] = {
507 /* dummy head pointer */
508 REG(OASTATUS2,
509 0xffffff80, 0xdeadf000, 0xbeeff000, false, 0),
510 /* NB: [1:0] MBZ */
511 REG(SO_WRITE_OFFSET_0,
512 0xfffffffc, 0xabcdabc0, 0xbeefbee0, true, 0),
513
514 /* It's really important for us to check that
515 * an LRI to OACONTROL doesn't result in an
516 * EINVAL error because Mesa attempts writing
517 * to OACONTROL to determine what extensions to
518 * expose and will abort() for execbuffer()
519 * errors.
520 *
521 * Mesa can gracefully recognise and handle the
522 * LRI becoming a NOOP.
523 *
524 * The test values represent dummy context IDs
525 * while leaving the OA unit disabled
526 */
527 REG(OACONTROL,
528 0xfffff000, 0xfeed0000, 0x31337000, false, 9)
529 };
530 #undef REG
531
532 igt_fixture {
533 intel_register_access_init(intel_get_pci_device(), 0, fd);
534 }
535
536 for (int i = 0; i < ARRAY_SIZE(lris); i++) {
537 igt_subtest_f("test-lri-%s", lris[i].name) {
538 igt_require_f(parser_version >= lris[i].min_ver,
539 "minimum required parser version for test = %d\n",
540 lris[i].min_ver);
541 test_lri(fd, handle, lris + i);
542 }
543 }
544
545 igt_fixture {
546 intel_register_access_fini();
547 }
548 }
549
550 igt_subtest("bitmasks") {
551 uint32_t pc[] = {
552 GFX_OP_PIPE_CONTROL,
553 (PIPE_CONTROL_QW_WRITE |
554 PIPE_CONTROL_LRI_POST_OP),
555 0, /* To be patched */
556 0x12000000,
557 0,
558 MI_BATCH_BUFFER_END,
559 };
560 if (parser_version >= 8) {
561 /* Expect to read back zero since the command should be
562 * squashed to a NOOP
563 */
564 exec_batch_patched(fd, handle,
565 pc, sizeof(pc),
566 8, /* patch offset, */
567 0x0);
568 } else {
569 exec_batch(fd, handle,
570 pc, sizeof(pc),
571 I915_EXEC_RENDER,
572 -EINVAL);
573 }
574 }
575
576 igt_subtest("batch-without-end") {
577 uint32_t noop[1024] = { 0 };
578 exec_batch(fd, handle,
579 noop, sizeof(noop),
580 I915_EXEC_RENDER,
581 -EINVAL);
582 }
583
584 igt_subtest("cmd-crossing-page") {
585 uint32_t lri_ok[] = {
586 MI_LOAD_REGISTER_IMM,
587 SO_WRITE_OFFSET_0, /* allowed register address */
588 0xdcbaabc0, /* [1:0] MBZ */
589 MI_BATCH_BUFFER_END,
590 };
591 uint32_t store_reg[] = {
592 MI_STORE_REGISTER_MEM | (3 - 2),
593 SO_WRITE_OFFSET_0,
594 0, /* reloc */
595 MI_BATCH_BUFFER_END,
596 };
597 exec_split_batch(fd,
598 lri_ok, sizeof(lri_ok),
599 I915_EXEC_RENDER,
600 0);
601 exec_batch_patched(fd, handle,
602 store_reg,
603 sizeof(store_reg),
604 2 * sizeof(uint32_t), /* reloc */
605 0xdcbaabc0);
606 }
607
608 igt_subtest("oacontrol-tracking") {
609 uint32_t lri_ok[] = {
610 MI_LOAD_REGISTER_IMM,
611 OACONTROL,
612 0x31337000,
613 MI_LOAD_REGISTER_IMM,
614 OACONTROL,
615 0x0,
616 MI_BATCH_BUFFER_END,
617 0
618 };
619 uint32_t lri_bad[] = {
620 MI_LOAD_REGISTER_IMM,
621 OACONTROL,
622 0x31337000,
623 MI_BATCH_BUFFER_END,
624 };
625 uint32_t lri_extra_bad[] = {
626 MI_LOAD_REGISTER_IMM,
627 OACONTROL,
628 0x31337000,
629 MI_LOAD_REGISTER_IMM,
630 OACONTROL,
631 0x0,
632 MI_LOAD_REGISTER_IMM,
633 OACONTROL,
634 0x31337000,
635 MI_BATCH_BUFFER_END,
636 };
637
638 igt_require(parser_version < 9);
639
640 exec_batch(fd, handle,
641 lri_ok, sizeof(lri_ok),
642 I915_EXEC_RENDER,
643 0);
644 exec_batch(fd, handle,
645 lri_bad, sizeof(lri_bad),
646 I915_EXEC_RENDER,
647 -EINVAL);
648 exec_batch(fd, handle,
649 lri_extra_bad, sizeof(lri_extra_bad),
650 I915_EXEC_RENDER,
651 -EINVAL);
652 }
653
654 igt_subtest("chained-batch") {
655 uint32_t pc[] = {
656 GFX_OP_PIPE_CONTROL,
657 PIPE_CONTROL_QW_WRITE,
658 0, /* To be patched */
659 0x12000000,
660 0,
661 MI_BATCH_BUFFER_END,
662 };
663 exec_batch_chained(fd, handle,
664 pc, sizeof(pc),
665 8, /* patch offset, */
666 0x12000000);
667 }
668
669 igt_subtest("load-register-reg")
670 hsw_load_register_reg();
671
672 igt_fixture {
673 igt_stop_hang_detector();
674 gem_close(fd, handle);
675
676 close(fd);
677 }
678 }
679