• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Jeff McGee <jeff.mcgee@intel.com>
25  */
26 
27 #include "igt.h"
28 #include <fcntl.h>
29 #include <unistd.h>
30 #include <string.h>
31 #include <errno.h>
32 #include <time.h>
33 #include "i915_drm.h"
34 #include "intel_bufmgr.h"
35 
36 IGT_TEST_DESCRIPTION("Tests slice/subslice/EU power gating functionality.\n");
37 
38 static struct {
39 	int init;
40 	int drm_fd;
41 	int devid;
42 	int gen;
43 	drm_intel_bufmgr *bufmgr;
44 	struct intel_batchbuffer *batch;
45 	igt_media_spinfunc_t spinfunc;
46 	struct igt_buf buf;
47 	uint32_t spins_per_msec;
48 } gem;
49 
50 static double
to_dt(const struct timespec * start,const struct timespec * end)51 to_dt(const struct timespec *start, const struct timespec *end)
52 {
53 	double dt;
54 
55 	dt = (end->tv_sec - start->tv_sec) * 1e3;
56 	dt += (end->tv_nsec - start->tv_nsec) * 1e-6;
57 
58 	return dt;
59 }
60 
61 struct status {
62 	struct {
63 		int slice_total;
64 		int subslice_total;
65 		int subslice_per;
66 		int eu_total;
67 		int eu_per;
68 		bool has_slice_pg;
69 		bool has_subslice_pg;
70 		bool has_eu_pg;
71 	} info;
72 	struct {
73 		int slice_total;
74 		int subslice_total;
75 		int subslice_per;
76 		int eu_total;
77 		int eu_per;
78 	} hw;
79 };
80 
81 #define DBG_STATUS_BUF_SIZE 4096
82 
83 struct {
84 	int init;
85 	int status_fd;
86 	char status_buf[DBG_STATUS_BUF_SIZE];
87 } dbg;
88 
89 static void
dbg_get_status_section(const char * title,char ** first,char ** last)90 dbg_get_status_section(const char *title, char **first, char **last)
91 {
92 	char *pos;
93 
94 	*first = strstr(dbg.status_buf, title);
95 	igt_assert(*first != NULL);
96 
97 	pos = *first;
98 	do {
99 		pos = strchr(pos, '\n');
100 		igt_assert(pos != NULL);
101 		pos++;
102 	} while (*pos == ' '); /* lines in the section begin with a space */
103 	*last = pos - 1;
104 }
105 
106 static bool
dbg_has_line(const char * first,const char * last,const char * name)107 dbg_has_line(const char *first, const char *last, const char *name)
108 {
109 	char *pos = strstr(first, name);
110 
111 	return pos != NULL && pos < last;
112 }
113 
114 static int
dbg_get_int(const char * first,const char * last,const char * name)115 dbg_get_int(const char *first, const char *last, const char *name)
116 {
117 	char *pos;
118 
119 	pos = strstr(first, name);
120 	igt_assert(pos != NULL);
121 	pos = strstr(pos, ":");
122 	igt_assert(pos != NULL);
123 	pos += 2;
124 	igt_assert(pos != last);
125 
126 	return strtol(pos, &pos, 10);
127 }
128 
129 static bool
dbg_get_bool(const char * first,const char * last,const char * name)130 dbg_get_bool(const char *first, const char *last, const char *name)
131 {
132 	char *pos;
133 
134 	pos = strstr(first, name);
135 	igt_assert(pos != NULL);
136 	pos = strstr(pos, ":");
137 	igt_assert(pos != NULL);
138 	pos += 2;
139 	igt_assert(pos < last);
140 
141 	if (*pos == 'y')
142 		return true;
143 	if (*pos == 'n')
144 		return false;
145 
146 	igt_assert_f(false, "Could not read boolean value for %s.\n", name);
147 	return false;
148 }
149 
150 static void
dbg_get_status(struct status * stat)151 dbg_get_status(struct status *stat)
152 {
153 	char *first, *last;
154 	int nread;
155 
156 	lseek(dbg.status_fd, 0, SEEK_SET);
157 	nread = read(dbg.status_fd, dbg.status_buf, DBG_STATUS_BUF_SIZE);
158 	igt_assert_lt(nread, DBG_STATUS_BUF_SIZE);
159 	dbg.status_buf[nread] = '\0';
160 
161 	memset(stat, 0, sizeof(*stat));
162 
163 	dbg_get_status_section("SSEU Device Info", &first, &last);
164 	for (char *tmp = first; tmp < last; tmp++)
165 		igt_debug("%c", *tmp);
166 	igt_debug("\n");
167 	stat->info.slice_total =
168 		dbg_get_int(first, last, "Available Slice Total:");
169 	stat->info.subslice_total =
170 		dbg_get_int(first, last, "Available Subslice Total:");
171 	/* Dealing with a change in 4.17. */
172 	if (dbg_has_line(first, last, "Available Subslice Per Slice:")) {
173 		stat->info.subslice_per =
174 			dbg_get_int(first, last, "Available Subslice Per Slice:");
175 	} else {
176 		stat->info.subslice_per =
177 			dbg_get_int(first, last, "Available Slice0 subslices:");
178 	}
179 	stat->info.eu_total =
180 		dbg_get_int(first, last, "Available EU Total:");
181 	stat->info.eu_per =
182 		dbg_get_int(first, last, "Available EU Per Subslice:");
183 	stat->info.has_slice_pg =
184 		dbg_get_bool(first, last, "Has Slice Power Gating:");
185 	stat->info.has_subslice_pg =
186 		dbg_get_bool(first, last, "Has Subslice Power Gating:");
187 	stat->info.has_eu_pg =
188 		dbg_get_bool(first, last, "Has EU Power Gating:");
189 
190 	dbg_get_status_section("SSEU Device Status", &first, &last);
191 	for (char *tmp = first; tmp < last; tmp++)
192 		igt_debug("%c", *tmp);
193 	igt_debug("\n");
194 	stat->hw.slice_total =
195 		dbg_get_int(first, last, "Enabled Slice Total:");
196 	stat->hw.subslice_total =
197 		dbg_get_int(first, last, "Enabled Subslice Total:");
198 	/* Dealing with a change in 4.17. */
199 	if (dbg_has_line(first, last, "Enabled Subslice Per Slice:")) {
200 		stat->hw.subslice_per =
201 			dbg_get_int(first, last, "Enabled Subslice Per Slice:");
202 	} else if (dbg_has_line(first, last, "Enabled Slice0 subslices:")) {
203 		stat->hw.subslice_per =
204 			dbg_get_int(first, last, "Enabled Slice0 subslices:");
205 	}
206 	stat->hw.eu_total =
207 		dbg_get_int(first, last, "Enabled EU Total:");
208 	stat->hw.eu_per =
209 		dbg_get_int(first, last, "Enabled EU Per Subslice:");
210 }
211 
212 static void
dbg_init(void)213 dbg_init(void)
214 {
215 	igt_assert(gem.init);
216 	dbg.status_fd = igt_debugfs_open(gem.drm_fd, "i915_sseu_status", O_RDONLY);
217 	igt_skip_on_f(dbg.status_fd == -1,
218 		      "debugfs entry 'i915_sseu_status' not found\n");
219 	dbg.init = 1;
220 }
221 
222 static void
dbg_deinit(void)223 dbg_deinit(void)
224 {
225 	switch (dbg.init)
226 	{
227 	case 1:
228 		close(dbg.status_fd);
229 	}
230 }
231 
232 static void
gem_check_spin(uint32_t spins)233 gem_check_spin(uint32_t spins)
234 {
235 	uint32_t *data;
236 
237 	data = (uint32_t*)gem.buf.bo->virtual;
238 	igt_assert_eq_u32(*data, spins);
239 }
240 
241 static uint32_t
gem_get_target_spins(double dt)242 gem_get_target_spins(double dt)
243 {
244 	struct timespec tstart, tdone;
245 	double prev_dt, cur_dt;
246 	uint32_t spins;
247 	int i, ret;
248 
249 	/* Double increments until we bound the target time */
250 	prev_dt = 0.0;
251 	for (i = 0; i < 32; i++) {
252 		spins = 1 << i;
253 		clock_gettime(CLOCK_MONOTONIC, &tstart);
254 
255 		gem.spinfunc(gem.batch, &gem.buf, spins);
256 		ret = drm_intel_bo_map(gem.buf.bo, 0);
257 		igt_assert_eq(ret, 0);
258 		clock_gettime(CLOCK_MONOTONIC, &tdone);
259 
260 		gem_check_spin(spins);
261 		drm_intel_bo_unmap(gem.buf.bo);
262 
263 		cur_dt = to_dt(&tstart, &tdone);
264 		if (cur_dt > dt)
265 			break;
266 		prev_dt = cur_dt;
267 	}
268 	igt_assert_neq(i, 32);
269 
270 	/* Linearly interpolate between i and i-1 to get target increments */
271 	spins = 1 << (i-1); /* lower bound spins */
272 	spins += spins * (dt - prev_dt)/(cur_dt - prev_dt); /* target spins */
273 
274 	return spins;
275 }
276 
277 static void
gem_init(void)278 gem_init(void)
279 {
280 	gem.drm_fd = drm_open_driver(DRIVER_INTEL);
281 	igt_require_gem(gem.drm_fd);
282 	gem.init = 1;
283 
284 	gem.devid = intel_get_drm_devid(gem.drm_fd);
285 	gem.gen = intel_gen(gem.devid);
286 	igt_require_f(gem.gen >= 8,
287 		      "SSEU power gating only relevant for Gen8+");
288 
289 	gem.spinfunc = igt_get_media_spinfunc(gem.devid);
290 	igt_require(gem.spinfunc);
291 
292 	gem.bufmgr = drm_intel_bufmgr_gem_init(gem.drm_fd, 4096);
293 	igt_assert(gem.bufmgr);
294 	gem.init = 2;
295 
296 	drm_intel_bufmgr_gem_enable_reuse(gem.bufmgr);
297 
298 	gem.batch = intel_batchbuffer_alloc(gem.bufmgr, gem.devid);
299 	igt_assert(gem.batch);
300 	gem.init = 3;
301 
302 	gem.buf.stride = sizeof(uint32_t);
303 	gem.buf.tiling = I915_TILING_NONE;
304 	gem.buf.size = gem.buf.stride;
305 	gem.buf.bo = drm_intel_bo_alloc(gem.bufmgr, "", gem.buf.size, 4096);
306 	gem.buf.bpp = 32;
307 	igt_assert(gem.buf.bo);
308 	gem.init = 4;
309 
310 	gem.spins_per_msec = gem_get_target_spins(100) / 100;
311 }
312 
313 static void
gem_deinit(void)314 gem_deinit(void)
315 {
316 	switch (gem.init)
317 	{
318 	case 4:
319 		drm_intel_bo_unmap(gem.buf.bo);
320 		drm_intel_bo_unreference(gem.buf.bo);
321 	case 3:
322 		intel_batchbuffer_free(gem.batch);
323 	case 2:
324 		drm_intel_bufmgr_destroy(gem.bufmgr);
325 	case 1:
326 		close(gem.drm_fd);
327 	}
328 }
329 
330 static void
check_full_enable(struct status * stat)331 check_full_enable(struct status *stat)
332 {
333 	igt_assert_eq(stat->hw.slice_total, stat->info.slice_total);
334 	igt_assert_eq(stat->hw.subslice_total, stat->info.subslice_total);
335 	igt_assert_eq(stat->hw.subslice_per, stat->info.subslice_per);
336 
337 	/*
338 	 * EU are powered in pairs, but it is possible for one EU in the pair
339 	 * to be non-functional due to fusing. The determination of enabled
340 	 * EU does not account for this and can therefore actually exceed the
341 	 * available count. Allow for this small discrepancy in our
342 	 * comparison.
343 	*/
344 	igt_assert_lte(stat->info.eu_total, stat->hw.eu_total);
345 	igt_assert_lte(stat->info.eu_per, stat->hw.eu_per);
346 }
347 
348 static void
full_enable(void)349 full_enable(void)
350 {
351 	struct status stat;
352 	const int spin_msec = 10;
353 	int ret, spins;
354 
355 	/* Simulation doesn't currently model slice/subslice/EU power gating. */
356 	igt_skip_on_simulation();
357 
358 	/*
359 	 * Gen9 SKL is the first case in which render power gating can leave
360 	 * slice/subslice/EU in a partially enabled state upon resumption of
361 	 * render work. So start checking that this is prevented as of Gen9.
362 	*/
363 	igt_require(gem.gen >= 9);
364 
365 	spins = spin_msec * gem.spins_per_msec;
366 
367 	gem.spinfunc(gem.batch, &gem.buf, spins);
368 
369 	usleep(2000); /* 2ms wait to make sure batch is running */
370 	dbg_get_status(&stat);
371 
372 	ret = drm_intel_bo_map(gem.buf.bo, 0);
373 	igt_assert_eq(ret, 0);
374 
375 	gem_check_spin(spins);
376 	drm_intel_bo_unmap(gem.buf.bo);
377 
378 	check_full_enable(&stat);
379 }
380 
381 static void
exit_handler(int sig)382 exit_handler(int sig)
383 {
384 	dbg_deinit();
385 	gem_deinit();
386 }
387 
388 igt_main
389 {
390 	igt_fixture {
391 		igt_install_exit_handler(exit_handler);
392 
393 		gem_init();
394 		dbg_init();
395 	}
396 
397 	igt_subtest("full-enable")
398 		full_enable();
399 }
400