• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include "igt.h"
25 
26 #define LOCAL_OBJECT_ASYNC (1 << 6)
27 #define LOCAL_PARAM_HAS_EXEC_ASYNC 43
28 
29 IGT_TEST_DESCRIPTION("Check that we can issue concurrent writes across the engines.");
30 
store_dword(int fd,unsigned ring,uint32_t target,uint32_t offset,uint32_t value)31 static void store_dword(int fd, unsigned ring,
32 			uint32_t target, uint32_t offset, uint32_t value)
33 {
34 	const int gen = intel_gen(intel_get_drm_devid(fd));
35 	struct drm_i915_gem_exec_object2 obj[2];
36 	struct drm_i915_gem_relocation_entry reloc;
37 	struct drm_i915_gem_execbuffer2 execbuf;
38 	uint32_t batch[16];
39 	int i;
40 
41 	memset(&execbuf, 0, sizeof(execbuf));
42 	execbuf.buffers_ptr = to_user_pointer(obj);
43 	execbuf.buffer_count = 2;
44 	execbuf.flags = ring;
45 	if (gen < 6)
46 		execbuf.flags |= I915_EXEC_SECURE;
47 
48 	memset(obj, 0, sizeof(obj));
49 	obj[0].handle = target;
50 	obj[0].flags = LOCAL_OBJECT_ASYNC;
51 	obj[1].handle = gem_create(fd, 4096);
52 
53 	memset(&reloc, 0, sizeof(reloc));
54 	reloc.target_handle = obj[0].handle;
55 	reloc.presumed_offset = 0;
56 	reloc.offset = sizeof(uint32_t);
57 	reloc.delta = offset;
58 	reloc.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
59 	reloc.write_domain = I915_GEM_DOMAIN_INSTRUCTION;
60 	obj[1].relocs_ptr = to_user_pointer(&reloc);
61 	obj[1].relocation_count = 1;
62 
63 	i = 0;
64 	batch[i] = MI_STORE_DWORD_IMM | (gen < 6 ? 1 << 22 : 0);
65 	if (gen >= 8) {
66 		batch[++i] = offset;
67 		batch[++i] = 0;
68 	} else if (gen >= 4) {
69 		batch[++i] = 0;
70 		batch[++i] = offset;
71 		reloc.offset += sizeof(uint32_t);
72 	} else {
73 		batch[i]--;
74 		batch[++i] = offset;
75 	}
76 	batch[++i] = value;
77 	batch[++i] = MI_BATCH_BUFFER_END;
78 	gem_write(fd, obj[1].handle, 0, batch, sizeof(batch));
79 	gem_execbuf(fd, &execbuf);
80 	gem_close(fd, obj[1].handle);
81 }
82 
one(int fd,unsigned ring,uint32_t flags)83 static void one(int fd, unsigned ring, uint32_t flags)
84 {
85 	const int gen = intel_gen(intel_get_drm_devid(fd));
86 	struct drm_i915_gem_exec_object2 obj[2];
87 #define SCRATCH 0
88 #define BATCH 1
89 	struct drm_i915_gem_relocation_entry reloc;
90 	struct drm_i915_gem_execbuffer2 execbuf;
91 	unsigned int other;
92 	uint32_t *batch;
93 	int i;
94 
95 	/* On the target ring, create a looping batch that marks
96 	 * the scratch for write. Then on the other rings try and
97 	 * write into that target. If it blocks we hang the GPU...
98 	 */
99 
100 	memset(obj, 0, sizeof(obj));
101 	obj[SCRATCH].handle = gem_create(fd, 4096);
102 
103 	obj[BATCH].handle = gem_create(fd, 4096);
104 	obj[BATCH].relocs_ptr = to_user_pointer(&reloc);
105 	obj[BATCH].relocation_count = 1;
106 
107 	memset(&reloc, 0, sizeof(reloc));
108 	reloc.target_handle = obj[BATCH].handle; /* recurse */
109 	reloc.presumed_offset = 0;
110 	reloc.offset = sizeof(uint32_t);
111 	reloc.delta = 0;
112 	reloc.read_domains = I915_GEM_DOMAIN_COMMAND;
113 	reloc.write_domain = 0;
114 
115 	batch = gem_mmap__wc(fd, obj[BATCH].handle, 0, 4096, PROT_WRITE);
116 	gem_set_domain(fd, obj[BATCH].handle,
117 			I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
118 
119 	i = 0;
120 	batch[i] = MI_BATCH_BUFFER_START;
121 	if (gen >= 8) {
122 		batch[i] |= 1 << 8 | 1;
123 		batch[++i] = 0;
124 		batch[++i] = 0;
125 	} else if (gen >= 6) {
126 		batch[i] |= 1 << 8;
127 		batch[++i] = 0;
128 	} else {
129 		batch[i] |= 2 << 6;
130 		batch[++i] = 0;
131 		if (gen < 4) {
132 			batch[i] |= 1;
133 			reloc.delta = 1;
134 		}
135 	}
136 	i++;
137 
138 	memset(&execbuf, 0, sizeof(execbuf));
139 	execbuf.buffers_ptr = to_user_pointer(obj);
140 	execbuf.buffer_count = 2;
141 	execbuf.flags = ring | flags;
142 	igt_require(__gem_execbuf(fd, &execbuf) == 0);
143 	gem_close(fd, obj[BATCH].handle);
144 
145 	i = 0;
146 	for_each_physical_engine(fd, other) {
147 		if (other == ring)
148 			continue;
149 
150 		if (!gem_can_store_dword(fd, other))
151 			continue;
152 
153 		store_dword(fd, other, obj[SCRATCH].handle, 4*i, i);
154 		i++;
155 	}
156 
157 	*batch = MI_BATCH_BUFFER_END;
158 	__sync_synchronize();
159 	munmap(batch, 4096);
160 
161 	batch = gem_mmap__wc(fd, obj[SCRATCH].handle, 0, 4096, PROT_READ);
162 	/* The kernel only tracks the last *submitted* write (but all reads),
163 	 * so to ensure *all* rings are flushed, we flush all reads even
164 	 * though we only need read access for ourselves.
165 	 */
166 	gem_set_domain(fd, obj[SCRATCH].handle,
167 		       I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
168 	gem_close(fd, obj[SCRATCH].handle);
169 	while (i--)
170 		igt_assert_eq_u32(batch[i], i);
171 	munmap(batch, 4096);
172 }
173 
has_async_execbuf(int fd)174 static bool has_async_execbuf(int fd)
175 {
176 	drm_i915_getparam_t gp;
177 	int async = -1;
178 
179 	gp.param = LOCAL_PARAM_HAS_EXEC_ASYNC;
180 	gp.value = &async;
181 	drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp);
182 
183 	return async > 0;
184 }
185 
186 igt_main
187 {
188 	const struct intel_execution_engine *e;
189 	int fd = -1;
190 
191 	igt_skip_on_simulation();
192 
193 	igt_fixture {
194 		fd = drm_open_driver_master(DRIVER_INTEL);
195 		igt_require_gem(fd);
196 		gem_require_mmap_wc(fd);
197 		igt_require(has_async_execbuf(fd));
198 		igt_require(gem_can_store_dword(fd, 0));
199 		igt_fork_hang_detector(fd);
200 	}
201 
202 	for (e = intel_execution_engines; e->name; e++) {
203 		/* default exec-id is purely symbolic */
204 		if (e->exec_id == 0)
205 			continue;
206 
207 		igt_subtest_f("concurrent-writes-%s", e->name) {
208 			igt_require(gem_ring_has_physical_engine(fd, e->exec_id | e->flags));
209 			igt_require(gem_can_store_dword(fd, e->exec_id | e->flags));
210 			one(fd, e->exec_id, e->flags);
211 		}
212 	}
213 
214 	igt_fixture {
215 		igt_stop_hang_detector();
216 		close(fd);
217 	}
218 }
219