• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2011 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Daniel Vetter <daniel.vetter@ffwll.ch>
25  *
26  */
27 
28 #include "igt.h"
29 #include <stdlib.h>
30 #include <stdio.h>
31 #include <string.h>
32 #include <fcntl.h>
33 #include <inttypes.h>
34 #include <errno.h>
35 #include <sys/stat.h>
36 #include <sys/time.h>
37 
38 #include <drm.h>
39 
40 
41 IGT_TEST_DESCRIPTION("Test pwrite/pread consistency when touching partial"
42 		     " cachelines.");
43 
44 /*
45  * Testcase: pwrite/pread consistency when touching partial cachelines
46  *
47  * Some fancy new pwrite/pread optimizations clflush in-line while
48  * reading/writing. Check whether all required clflushes happen.
49  *
50  * Unfortunately really old mesa used unaligned pread/pwrite for s/w fallback
51  * rendering, so we need to check whether this works on tiled buffers, too.
52  *
53  */
54 
55 static drm_intel_bufmgr *bufmgr;
56 struct intel_batchbuffer *batch;
57 
58 drm_intel_bo *scratch_bo;
59 drm_intel_bo *staging_bo;
60 drm_intel_bo *tiled_staging_bo;
61 unsigned long scratch_pitch;
62 #define BO_SIZE (32*4096)
63 uint32_t devid;
64 int fd;
65 
66 static void
copy_bo(drm_intel_bo * src,int src_tiled,drm_intel_bo * dst,int dst_tiled)67 copy_bo(drm_intel_bo *src, int src_tiled,
68 	drm_intel_bo *dst, int dst_tiled)
69 {
70 	unsigned long dst_pitch = scratch_pitch;
71 	unsigned long src_pitch = scratch_pitch;
72 	uint32_t cmd_bits = 0;
73 
74 	/* dst is tiled ... */
75 	if (intel_gen(devid) >= 4 && dst_tiled) {
76 		dst_pitch /= 4;
77 		cmd_bits |= XY_SRC_COPY_BLT_DST_TILED;
78 	}
79 
80 	if (intel_gen(devid) >= 4 && dst_tiled) {
81 		src_pitch /= 4;
82 		cmd_bits |= XY_SRC_COPY_BLT_SRC_TILED;
83 	}
84 
85 	BLIT_COPY_BATCH_START(cmd_bits);
86 	OUT_BATCH((3 << 24) | /* 32 bits */
87 		  (0xcc << 16) | /* copy ROP */
88 		  dst_pitch);
89 	OUT_BATCH(0 << 16 | 0);
90 	OUT_BATCH(BO_SIZE/scratch_pitch << 16 | 1024);
91 	OUT_RELOC_FENCED(dst, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, 0);
92 	OUT_BATCH(0 << 16 | 0);
93 	OUT_BATCH(src_pitch);
94 	OUT_RELOC_FENCED(src, I915_GEM_DOMAIN_RENDER, 0, 0);
95 	ADVANCE_BATCH();
96 
97 	intel_batchbuffer_flush(batch);
98 }
99 
100 static void
blt_bo_fill(drm_intel_bo * tmp_bo,drm_intel_bo * bo,int val)101 blt_bo_fill(drm_intel_bo *tmp_bo, drm_intel_bo *bo, int val)
102 {
103 	uint8_t *gtt_ptr;
104 	int i;
105 
106 	drm_intel_gem_bo_map_gtt(tmp_bo);
107 	gtt_ptr = tmp_bo->virtual;
108 
109 	for (i = 0; i < BO_SIZE; i++)
110 		gtt_ptr[i] = val;
111 
112 	drm_intel_gem_bo_unmap_gtt(tmp_bo);
113 
114 	igt_drop_caches_set(fd, DROP_BOUND);
115 
116 	copy_bo(tmp_bo, 0, bo, 1);
117 }
118 
119 #define MAX_BLT_SIZE 128
120 #define ROUNDS 200
121 uint8_t tmp[BO_SIZE];
122 uint8_t compare_tmp[BO_SIZE];
123 
test_partial_reads(void)124 static void test_partial_reads(void)
125 {
126 	int i, j;
127 
128 	for (i = 0; i < ROUNDS; i++) {
129 		int start, len;
130 		int val = i % 256;
131 
132 		blt_bo_fill(staging_bo, scratch_bo, i);
133 
134 		start = random() % BO_SIZE;
135 		len = random() % (BO_SIZE-start) + 1;
136 
137 		drm_intel_bo_get_subdata(scratch_bo, start, len, tmp);
138 		for (j = 0; j < len; j++) {
139 			igt_assert_f(tmp[j] == val,
140 				     "mismatch at %i, got: %i, expected: %i\n",
141 				     start + j, tmp[j], val);
142 		}
143 
144 		igt_progress("partial reads test: ", i, ROUNDS);
145 	}
146 }
147 
test_partial_writes(void)148 static void test_partial_writes(void)
149 {
150 	int i, j;
151 
152 	for (i = 0; i < ROUNDS; i++) {
153 		int start, len;
154 		int val = i % 256;
155 
156 		blt_bo_fill(staging_bo, scratch_bo, i);
157 
158 		start = random() % BO_SIZE;
159 		len = random() % (BO_SIZE-start) + 1;
160 
161 		memset(tmp, i + 63, BO_SIZE);
162 
163 		drm_intel_bo_subdata(scratch_bo, start, len, tmp);
164 
165 		copy_bo(scratch_bo, 1, tiled_staging_bo, 1);
166 		drm_intel_bo_get_subdata(tiled_staging_bo, 0, BO_SIZE,
167 					 compare_tmp);
168 
169 		for (j = 0; j < start; j++) {
170 			igt_assert_f(compare_tmp[j] == val,
171 				     "mismatch at %i, got: %i, expected: %i\n",
172 				     j, tmp[j], val);
173 		}
174 		for (; j < start + len; j++) {
175 			igt_assert_f(compare_tmp[j] == tmp[0],
176 				     "mismatch at %i, got: %i, expected: %i\n",
177 				     j, tmp[j], i);
178 		}
179 		for (; j < BO_SIZE; j++) {
180 			igt_assert_f(compare_tmp[j] == val,
181 				     "mismatch at %i, got: %i, expected: %i\n",
182 				     j, tmp[j], val);
183 		}
184 		drm_intel_gem_bo_unmap_gtt(staging_bo);
185 
186 		igt_progress("partial writes test: ", i, ROUNDS);
187 	}
188 }
189 
test_partial_read_writes(void)190 static void test_partial_read_writes(void)
191 {
192 	int i, j;
193 
194 	for (i = 0; i < ROUNDS; i++) {
195 		int start, len;
196 		int val = i % 256;
197 
198 		blt_bo_fill(staging_bo, scratch_bo, i);
199 
200 		/* partial read */
201 		start = random() % BO_SIZE;
202 		len = random() % (BO_SIZE-start) + 1;
203 
204 		drm_intel_bo_get_subdata(scratch_bo, start, len, tmp);
205 		for (j = 0; j < len; j++) {
206 			igt_assert_f(tmp[j] == val,
207 				     "mismatch in read at %i, got: %i, expected: %i\n",
208 				     start + j, tmp[j], val);
209 		}
210 
211 		/* Change contents through gtt to make the pread cachelines
212 		 * stale. */
213 		val = (i + 17) % 256;
214 		blt_bo_fill(staging_bo, scratch_bo, val);
215 
216 		/* partial write */
217 		start = random() % BO_SIZE;
218 		len = random() % (BO_SIZE-start) + 1;
219 
220 		memset(tmp, i + 63, BO_SIZE);
221 
222 		drm_intel_bo_subdata(scratch_bo, start, len, tmp);
223 
224 		copy_bo(scratch_bo, 1, tiled_staging_bo, 1);
225 		drm_intel_bo_get_subdata(tiled_staging_bo, 0, BO_SIZE,
226 					 compare_tmp);
227 
228 		for (j = 0; j < start; j++) {
229 			igt_assert_f(compare_tmp[j] == val,
230 				     "mismatch at %i, got: %i, expected: %i\n",
231 				     j, tmp[j], val);
232 		}
233 		for (; j < start + len; j++) {
234 			igt_assert_f(compare_tmp[j] == tmp[0],
235 				     "mismatch at %i, got: %i, expected: %i\n",
236 				     j, tmp[j], tmp[0]);
237 		}
238 		for (; j < BO_SIZE; j++) {
239 			igt_assert_f(compare_tmp[j] == val,
240 				     "mismatch at %i, got: %i, expected: %i\n",
241 				     j, tmp[j], val);
242 		}
243 		drm_intel_gem_bo_unmap_gtt(staging_bo);
244 
245 		igt_progress("partial read/writes test: ", i, ROUNDS);
246 	}
247 }
248 
known_swizzling(uint32_t handle)249 static bool known_swizzling(uint32_t handle)
250 {
251 	struct drm_i915_gem_get_tiling arg = {
252 		.handle = handle,
253 	};
254 
255 	if (igt_ioctl(fd, DRM_IOCTL_I915_GEM_GET_TILING, &arg))
256 		return false;
257 
258 	return arg.phys_swizzle_mode == arg.swizzle_mode;
259 }
260 
261 igt_main
262 {
263 	uint32_t tiling_mode = I915_TILING_X;
264 
265 	igt_skip_on_simulation();
266 
267 	srandom(0xdeadbeef);
268 
269 	igt_fixture {
270 		fd = drm_open_driver(DRIVER_INTEL);
271 		igt_require_gem(fd);
272 
273 		bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
274 		//drm_intel_bufmgr_gem_enable_reuse(bufmgr);
275 		devid = intel_get_drm_devid(fd);
276 		batch = intel_batchbuffer_alloc(bufmgr, devid);
277 
278 		/* overallocate the buffers we're actually using because */
279 		scratch_bo = drm_intel_bo_alloc_tiled(bufmgr, "scratch bo", 1024,
280 						      BO_SIZE/4096, 4,
281 						      &tiling_mode, &scratch_pitch, 0);
282 		igt_assert(tiling_mode == I915_TILING_X);
283 		igt_assert(scratch_pitch == 4096);
284 
285 		/*
286 		 * As we want to compare our template tiled pattern against
287 		 * the target bo, we need consistent swizzling on both.
288 		 */
289 		igt_require(known_swizzling(scratch_bo->handle));
290 		staging_bo = drm_intel_bo_alloc(bufmgr, "staging bo", BO_SIZE, 4096);
291 		tiled_staging_bo = drm_intel_bo_alloc_tiled(bufmgr, "scratch bo", 1024,
292 							    BO_SIZE/4096, 4,
293 							    &tiling_mode,
294 							    &scratch_pitch, 0);
295 	}
296 
297 	igt_subtest("reads")
298 		test_partial_reads();
299 
300 	igt_subtest("writes")
301 		test_partial_writes();
302 
303 	igt_subtest("writes-after-reads")
304 		test_partial_read_writes();
305 
306 	igt_fixture {
307 		drm_intel_bufmgr_destroy(bufmgr);
308 
309 		close(fd);
310 	}
311 }
312