1 /*
2 * Copyright © 2011 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Daniel Vetter <daniel.vetter@ffwll.ch>
25 *
26 */
27
28 #include "igt.h"
29 #include <stdlib.h>
30 #include <stdio.h>
31 #include <string.h>
32 #include <fcntl.h>
33 #include <inttypes.h>
34 #include <errno.h>
35 #include <sys/stat.h>
36 #include <sys/time.h>
37
38 #include <drm.h>
39
40
41 IGT_TEST_DESCRIPTION("Test pwrite/pread consistency when touching partial"
42 " cachelines.");
43
44 /*
45 * Testcase: pwrite/pread consistency when touching partial cachelines
46 *
47 * Some fancy new pwrite/pread optimizations clflush in-line while
48 * reading/writing. Check whether all required clflushes happen.
49 *
50 */
51
52 static drm_intel_bufmgr *bufmgr;
53 struct intel_batchbuffer *batch;
54
55 drm_intel_bo *scratch_bo;
56 drm_intel_bo *staging_bo;
57 #define BO_SIZE (4*4096)
58 uint32_t devid;
59 int fd;
60
61 static void
copy_bo(drm_intel_bo * src,drm_intel_bo * dst)62 copy_bo(drm_intel_bo *src, drm_intel_bo *dst)
63 {
64 BLIT_COPY_BATCH_START(0);
65 OUT_BATCH((3 << 24) | /* 32 bits */
66 (0xcc << 16) | /* copy ROP */
67 4096);
68 OUT_BATCH(0 << 16 | 0);
69 OUT_BATCH((BO_SIZE/4096) << 16 | 1024);
70 OUT_RELOC_FENCED(dst, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, 0);
71 OUT_BATCH(0 << 16 | 0);
72 OUT_BATCH(4096);
73 OUT_RELOC_FENCED(src, I915_GEM_DOMAIN_RENDER, 0, 0);
74 ADVANCE_BATCH();
75
76 intel_batchbuffer_flush(batch);
77 }
78
79 static void
blt_bo_fill(drm_intel_bo * tmp_bo,drm_intel_bo * bo,uint8_t val)80 blt_bo_fill(drm_intel_bo *tmp_bo, drm_intel_bo *bo, uint8_t val)
81 {
82 uint8_t *gtt_ptr;
83 int i;
84
85 do_or_die(drm_intel_gem_bo_map_gtt(tmp_bo));
86 gtt_ptr = tmp_bo->virtual;
87
88 for (i = 0; i < BO_SIZE; i++)
89 gtt_ptr[i] = val;
90
91 drm_intel_gem_bo_unmap_gtt(tmp_bo);
92
93 igt_drop_caches_set(fd, DROP_BOUND);
94
95 copy_bo(tmp_bo, bo);
96 }
97
98 #define MAX_BLT_SIZE 128
99 #define ROUNDS 1000
100 uint8_t tmp[BO_SIZE];
101
get_range(int * start,int * len)102 static void get_range(int *start, int *len)
103 {
104 *start = random() % (BO_SIZE - 1);
105 *len = random() % (BO_SIZE - *start - 1) + 1;
106 }
107
test_partial_reads(void)108 static void test_partial_reads(void)
109 {
110 int i, j;
111
112 igt_info("checking partial reads\n");
113 for (i = 0; i < ROUNDS; i++) {
114 uint8_t val = i;
115 int start, len;
116
117 blt_bo_fill(staging_bo, scratch_bo, val);
118
119 get_range(&start, &len);
120 do_or_die(drm_intel_bo_get_subdata(scratch_bo, start, len, tmp));
121 for (j = 0; j < len; j++) {
122 igt_assert_f(tmp[j] == val,
123 "mismatch at %i [%i + %i], got: %i, expected: %i\n",
124 j, start, len, tmp[j], val);
125 }
126
127 igt_progress("partial reads test: ", i, ROUNDS);
128 }
129 }
130
test_partial_writes(void)131 static void test_partial_writes(void)
132 {
133 int i, j;
134 uint8_t *gtt_ptr;
135
136 igt_info("checking partial writes\n");
137 for (i = 0; i < ROUNDS; i++) {
138 uint8_t val = i;
139 int start, len;
140
141 blt_bo_fill(staging_bo, scratch_bo, val);
142
143 memset(tmp, i + 63, BO_SIZE);
144
145 get_range(&start, &len);
146 drm_intel_bo_subdata(scratch_bo, start, len, tmp);
147
148 copy_bo(scratch_bo, staging_bo);
149 drm_intel_gem_bo_map_gtt(staging_bo);
150 gtt_ptr = staging_bo->virtual;
151
152 for (j = 0; j < start; j++) {
153 igt_assert_f(gtt_ptr[j] == val,
154 "mismatch at %i (start=%i), got: %i, expected: %i\n",
155 j, start, tmp[j], val);
156 }
157 for (; j < start + len; j++) {
158 igt_assert_f(gtt_ptr[j] == tmp[0],
159 "mismatch at %i (%i/%i), got: %i, expected: %i\n",
160 j, j-start, len, tmp[j], i);
161 }
162 for (; j < BO_SIZE; j++) {
163 igt_assert_f(gtt_ptr[j] == val,
164 "mismatch at %i (end=%i), got: %i, expected: %i\n",
165 j, start+len, tmp[j], val);
166 }
167 drm_intel_gem_bo_unmap_gtt(staging_bo);
168
169 igt_progress("partial writes test: ", i, ROUNDS);
170 }
171 }
172
test_partial_read_writes(void)173 static void test_partial_read_writes(void)
174 {
175 int i, j;
176 uint8_t *gtt_ptr;
177
178 igt_info("checking partial writes after partial reads\n");
179 for (i = 0; i < ROUNDS; i++) {
180 uint8_t val = i;
181 int start, len;
182
183 blt_bo_fill(staging_bo, scratch_bo, val);
184
185 /* partial read */
186 get_range(&start, &len);
187 drm_intel_bo_get_subdata(scratch_bo, start, len, tmp);
188 for (j = 0; j < len; j++) {
189 igt_assert_f(tmp[j] == val,
190 "mismatch in read at %i [%i + %i], got: %i, expected: %i\n",
191 j, start, len, tmp[j], val);
192 }
193
194 /* Change contents through gtt to make the pread cachelines
195 * stale. */
196 val += 17;
197 blt_bo_fill(staging_bo, scratch_bo, val);
198
199 /* partial write */
200 memset(tmp, i + 63, BO_SIZE);
201
202 get_range(&start, &len);
203 drm_intel_bo_subdata(scratch_bo, start, len, tmp);
204
205 copy_bo(scratch_bo, staging_bo);
206 do_or_die(drm_intel_gem_bo_map_gtt(staging_bo));
207 gtt_ptr = staging_bo->virtual;
208
209 for (j = 0; j < start; j++) {
210 igt_assert_f(gtt_ptr[j] == val,
211 "mismatch at %i (start=%i), got: %i, expected: %i\n",
212 j, start, tmp[j], val);
213 }
214 for (; j < start + len; j++) {
215 igt_assert_f(gtt_ptr[j] == tmp[0],
216 "mismatch at %i (%i/%i), got: %i, expected: %i\n",
217 j, j - start, len, tmp[j], tmp[0]);
218 }
219 for (; j < BO_SIZE; j++) {
220 igt_assert_f(gtt_ptr[j] == val,
221 "mismatch at %i (end=%i), got: %i, expected: %i\n",
222 j, start + len, tmp[j], val);
223 }
224 drm_intel_gem_bo_unmap_gtt(staging_bo);
225
226 igt_progress("partial read/writes test: ", i, ROUNDS);
227 }
228 }
229
do_tests(int cache_level,const char * suffix)230 static void do_tests(int cache_level, const char *suffix)
231 {
232 igt_fixture {
233 if (cache_level != -1)
234 gem_set_caching(fd, scratch_bo->handle, cache_level);
235 }
236
237 igt_subtest_f("reads%s", suffix)
238 test_partial_reads();
239
240 igt_subtest_f("write%s", suffix)
241 test_partial_writes();
242
243 igt_subtest_f("writes-after-reads%s", suffix)
244 test_partial_read_writes();
245 }
246
247 igt_main
248 {
249 srandom(0xdeadbeef);
250
251 igt_skip_on_simulation();
252
253 igt_fixture {
254 fd = drm_open_driver(DRIVER_INTEL);
255 igt_require_gem(fd);
256
257 bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
258 devid = intel_get_drm_devid(fd);
259 batch = intel_batchbuffer_alloc(bufmgr, devid);
260
261 /* overallocate the buffers we're actually using because */
262 scratch_bo = drm_intel_bo_alloc(bufmgr, "scratch bo", BO_SIZE, 4096);
263 staging_bo = drm_intel_bo_alloc(bufmgr, "staging bo", BO_SIZE, 4096);
264 }
265
266 do_tests(-1, "");
267
268 /* Repeat the tests using different levels of snooping */
269 do_tests(0, "-uncached");
270 do_tests(1, "-snoop");
271 do_tests(2, "-display");
272
273 igt_fixture {
274 drm_intel_bufmgr_destroy(bufmgr);
275
276 close(fd);
277 }
278 }
279