1 /*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 */
25
26 /* This file implements randomized SDMA texture blit tests. */
27
28 #include "si_pipe.h"
29 #include "util/rand_xor.h"
30 #include "util/u_surface.h"
31
32 static uint64_t seed_xorshift128plus[2];
33
34 #define RAND_NUM_SIZE 8
35
36 /* The GPU blits are emulated on the CPU using these CPU textures. */
37
38 struct cpu_texture {
39 uint8_t *ptr;
40 uint64_t size;
41 uint64_t layer_stride;
42 unsigned stride;
43 };
44
alloc_cpu_texture(struct cpu_texture * tex,struct pipe_resource * templ)45 static void alloc_cpu_texture(struct cpu_texture *tex, struct pipe_resource *templ)
46 {
47 tex->stride = align(util_format_get_stride(templ->format, templ->width0), RAND_NUM_SIZE);
48 tex->layer_stride = (uint64_t)tex->stride * templ->height0;
49 tex->size = tex->layer_stride * templ->array_size;
50 tex->ptr = malloc(tex->size);
51 assert(tex->ptr);
52 }
53
set_random_pixels(struct pipe_context * ctx,struct pipe_resource * tex,struct cpu_texture * cpu)54 static void set_random_pixels(struct pipe_context *ctx, struct pipe_resource *tex,
55 struct cpu_texture *cpu)
56 {
57 struct pipe_transfer *t;
58 uint8_t *map;
59 int x, y, z;
60
61 map = pipe_transfer_map_3d(ctx, tex, 0, PIPE_MAP_WRITE, 0, 0, 0, tex->width0, tex->height0,
62 tex->array_size, &t);
63 assert(map);
64
65 for (z = 0; z < tex->array_size; z++) {
66 for (y = 0; y < tex->height0; y++) {
67 uint64_t *ptr = (uint64_t *)(map + t->layer_stride * z + t->stride * y);
68 uint64_t *ptr_cpu = (uint64_t *)(cpu->ptr + cpu->layer_stride * z + cpu->stride * y);
69 unsigned size = cpu->stride / RAND_NUM_SIZE;
70
71 assert(t->stride % RAND_NUM_SIZE == 0);
72 assert(cpu->stride % RAND_NUM_SIZE == 0);
73
74 for (x = 0; x < size; x++) {
75 *ptr++ = *ptr_cpu++ = rand_xorshift128plus(seed_xorshift128plus);
76 }
77 }
78 }
79
80 pipe_transfer_unmap(ctx, t);
81 }
82
compare_textures(struct pipe_context * ctx,struct pipe_resource * tex,struct cpu_texture * cpu)83 static bool compare_textures(struct pipe_context *ctx, struct pipe_resource *tex,
84 struct cpu_texture *cpu)
85 {
86 struct pipe_transfer *t;
87 uint8_t *map;
88 int y, z;
89 bool pass = true;
90 unsigned stride = util_format_get_stride(tex->format, tex->width0);
91
92 map = pipe_transfer_map_3d(ctx, tex, 0, PIPE_MAP_READ, 0, 0, 0, tex->width0, tex->height0,
93 tex->array_size, &t);
94 assert(map);
95
96 for (z = 0; z < tex->array_size; z++) {
97 for (y = 0; y < tex->height0; y++) {
98 uint8_t *ptr = map + t->layer_stride * z + t->stride * y;
99 uint8_t *cpu_ptr = cpu->ptr + cpu->layer_stride * z + cpu->stride * y;
100
101 if (memcmp(ptr, cpu_ptr, stride)) {
102 pass = false;
103 goto done;
104 }
105 }
106 }
107 done:
108 pipe_transfer_unmap(ctx, t);
109 return pass;
110 }
111
choose_format()112 static enum pipe_format choose_format()
113 {
114 enum pipe_format formats[] = {
115 PIPE_FORMAT_R8_UINT, PIPE_FORMAT_R16_UINT, PIPE_FORMAT_R32_UINT,
116 PIPE_FORMAT_R32G32_UINT, PIPE_FORMAT_R32G32B32A32_UINT, PIPE_FORMAT_G8R8_B8R8_UNORM,
117 };
118 return formats[rand() % ARRAY_SIZE(formats)];
119 }
120
array_mode_to_string(struct si_screen * sscreen,struct radeon_surf * surf)121 static const char *array_mode_to_string(struct si_screen *sscreen, struct radeon_surf *surf)
122 {
123 if (sscreen->info.chip_class >= GFX9) {
124 switch (surf->u.gfx9.surf.swizzle_mode) {
125 case 0:
126 return " LINEAR";
127 case 21:
128 return " 4KB_S_X";
129 case 22:
130 return " 4KB_D_X";
131 case 25:
132 return "64KB_S_X";
133 case 26:
134 return "64KB_D_X";
135 default:
136 printf("Unhandled swizzle mode = %u\n", surf->u.gfx9.surf.swizzle_mode);
137 return " UNKNOWN";
138 }
139 } else {
140 switch (surf->u.legacy.level[0].mode) {
141 case RADEON_SURF_MODE_LINEAR_ALIGNED:
142 return "LINEAR_ALIGNED";
143 case RADEON_SURF_MODE_1D:
144 return "1D_TILED_THIN1";
145 case RADEON_SURF_MODE_2D:
146 return "2D_TILED_THIN1";
147 default:
148 assert(0);
149 return " UNKNOWN";
150 }
151 }
152 }
153
generate_max_tex_side(unsigned max_tex_side)154 static unsigned generate_max_tex_side(unsigned max_tex_side)
155 {
156 switch (rand() % 4) {
157 case 0:
158 /* Try to hit large sizes in 1/4 of the cases. */
159 return max_tex_side;
160 case 1:
161 /* Try to hit 1D tiling in 1/4 of the cases. */
162 return 128;
163 default:
164 /* Try to hit common sizes in 2/4 of the cases. */
165 return 2048;
166 }
167 }
168
si_test_dma(struct si_screen * sscreen)169 void si_test_dma(struct si_screen *sscreen)
170 {
171 struct pipe_screen *screen = &sscreen->b;
172 struct pipe_context *ctx = screen->context_create(screen, NULL, 0);
173 struct si_context *sctx = (struct si_context *)ctx;
174 uint64_t max_alloc_size;
175 unsigned i, iterations, num_partial_copies, max_tex_side;
176 unsigned num_pass = 0, num_fail = 0;
177
178 max_tex_side = screen->get_param(screen, PIPE_CAP_MAX_TEXTURE_2D_SIZE);
179
180 /* Max 128 MB allowed for both textures. */
181 max_alloc_size = 128 * 1024 * 1024;
182
183 /* the seed for random test parameters */
184 srand(0x9b47d95b);
185 /* the seed for random pixel data */
186 s_rand_xorshift128plus(seed_xorshift128plus, false);
187
188 iterations = 1000000000; /* just kill it when you are bored */
189 num_partial_copies = 30;
190
191 /* These parameters are randomly generated per test:
192 * - whether to do one whole-surface copy or N partial copies per test
193 * - which tiling modes to use (LINEAR_ALIGNED, 1D, 2D)
194 * - which texture dimensions to use
195 * - whether to use VRAM (all tiling modes) and GTT (staging, linear
196 * only) allocations
197 * - random initial pixels in src
198 * - generate random subrectangle copies for partial blits
199 */
200 for (i = 0; i < iterations; i++) {
201 struct pipe_resource tsrc = {}, tdst = {}, *src, *dst;
202 struct si_texture *sdst;
203 struct si_texture *ssrc;
204 struct cpu_texture src_cpu, dst_cpu;
205 unsigned max_width, max_height, max_depth, j, num;
206 unsigned gfx_blits = 0, dma_blits = 0, cs_blits = 0, max_tex_side_gen;
207 unsigned max_tex_layers;
208 bool pass;
209 bool do_partial_copies = rand() & 1;
210
211 /* generate a random test case */
212 tsrc.target = tdst.target = PIPE_TEXTURE_2D_ARRAY;
213 tsrc.depth0 = tdst.depth0 = 1;
214
215 tsrc.format = tdst.format = choose_format();
216
217 max_tex_side_gen = generate_max_tex_side(max_tex_side);
218 max_tex_layers = rand() % 4 ? 1 : 5;
219
220 tsrc.width0 = (rand() % max_tex_side_gen) + 1;
221 tsrc.height0 = (rand() % max_tex_side_gen) + 1;
222 tsrc.array_size = (rand() % max_tex_layers) + 1;
223
224 if (tsrc.format == PIPE_FORMAT_G8R8_B8R8_UNORM)
225 tsrc.width0 = align(tsrc.width0, 2);
226
227 /* Have a 1/4 chance of getting power-of-two dimensions. */
228 if (rand() % 4 == 0) {
229 tsrc.width0 = util_next_power_of_two(tsrc.width0);
230 tsrc.height0 = util_next_power_of_two(tsrc.height0);
231 }
232
233 if (!do_partial_copies) {
234 /* whole-surface copies only, same dimensions */
235 tdst = tsrc;
236 } else {
237 max_tex_side_gen = generate_max_tex_side(max_tex_side);
238 max_tex_layers = rand() % 4 ? 1 : 5;
239
240 /* many partial copies, dimensions can be different */
241 tdst.width0 = (rand() % max_tex_side_gen) + 1;
242 tdst.height0 = (rand() % max_tex_side_gen) + 1;
243 tdst.array_size = (rand() % max_tex_layers) + 1;
244
245 /* Have a 1/4 chance of getting power-of-two dimensions. */
246 if (rand() % 4 == 0) {
247 tdst.width0 = util_next_power_of_two(tdst.width0);
248 tdst.height0 = util_next_power_of_two(tdst.height0);
249 }
250 }
251
252 /* check texture sizes */
253 if ((uint64_t)util_format_get_nblocks(tsrc.format, tsrc.width0, tsrc.height0) *
254 tsrc.array_size * util_format_get_blocksize(tsrc.format) +
255 (uint64_t)util_format_get_nblocks(tdst.format, tdst.width0, tdst.height0) *
256 tdst.array_size * util_format_get_blocksize(tdst.format) >
257 max_alloc_size) {
258 /* too large, try again */
259 i--;
260 continue;
261 }
262
263 /* VRAM + the tiling mode depends on dimensions (3/4 of cases),
264 * or GTT + linear only (1/4 of cases)
265 */
266 tsrc.usage = rand() % 4 ? PIPE_USAGE_DEFAULT : PIPE_USAGE_STAGING;
267 tdst.usage = rand() % 4 ? PIPE_USAGE_DEFAULT : PIPE_USAGE_STAGING;
268
269 /* Allocate textures (both the GPU and CPU copies).
270 * The CPU will emulate what the GPU should be doing.
271 */
272 src = screen->resource_create(screen, &tsrc);
273 dst = screen->resource_create(screen, &tdst);
274 assert(src);
275 assert(dst);
276 sdst = (struct si_texture *)dst;
277 ssrc = (struct si_texture *)src;
278 alloc_cpu_texture(&src_cpu, &tsrc);
279 alloc_cpu_texture(&dst_cpu, &tdst);
280
281 printf("%4u: dst = (%5u x %5u x %u, %s), "
282 " src = (%5u x %5u x %u, %s), format = %s, ",
283 i, tdst.width0, tdst.height0, tdst.array_size,
284 array_mode_to_string(sscreen, &sdst->surface), tsrc.width0, tsrc.height0,
285 tsrc.array_size, array_mode_to_string(sscreen, &ssrc->surface),
286 util_format_description(tsrc.format)->name);
287 fflush(stdout);
288
289 /* set src pixels */
290 set_random_pixels(ctx, src, &src_cpu);
291
292 /* clear dst pixels */
293 uint32_t zero = 0;
294 si_clear_buffer(sctx, dst, 0, sdst->surface.surf_size, &zero, 4, SI_COHERENCY_SHADER, false);
295 memset(dst_cpu.ptr, 0, dst_cpu.layer_stride * tdst.array_size);
296
297 /* preparation */
298 max_width = MIN2(tsrc.width0, tdst.width0);
299 max_height = MIN2(tsrc.height0, tdst.height0);
300 max_depth = MIN2(tsrc.array_size, tdst.array_size);
301
302 num = do_partial_copies ? num_partial_copies : 1;
303 for (j = 0; j < num; j++) {
304 int width, height, depth;
305 int srcx, srcy, srcz, dstx, dsty, dstz;
306 struct pipe_box box;
307 unsigned old_num_draw_calls = sctx->num_draw_calls;
308 unsigned old_num_dma_calls = sctx->num_dma_calls;
309 unsigned old_num_cs_calls = sctx->num_compute_calls;
310
311 if (!do_partial_copies) {
312 /* copy whole src to dst */
313 width = max_width;
314 height = max_height;
315 depth = max_depth;
316
317 srcx = srcy = srcz = dstx = dsty = dstz = 0;
318 } else {
319 /* random sub-rectangle copies from src to dst */
320 depth = (rand() % max_depth) + 1;
321 srcz = rand() % (tsrc.array_size - depth + 1);
322 dstz = rand() % (tdst.array_size - depth + 1);
323
324 /* special code path to hit the tiled partial copies */
325 if (!ssrc->surface.is_linear && !sdst->surface.is_linear && rand() & 1) {
326 if (max_width < 8 || max_height < 8)
327 continue;
328 width = ((rand() % (max_width / 8)) + 1) * 8;
329 height = ((rand() % (max_height / 8)) + 1) * 8;
330
331 srcx = rand() % (tsrc.width0 - width + 1) & ~0x7;
332 srcy = rand() % (tsrc.height0 - height + 1) & ~0x7;
333
334 dstx = rand() % (tdst.width0 - width + 1) & ~0x7;
335 dsty = rand() % (tdst.height0 - height + 1) & ~0x7;
336 } else {
337 /* just make sure that it doesn't divide by zero */
338 assert(max_width > 0 && max_height > 0);
339
340 width = (rand() % max_width) + 1;
341 height = (rand() % max_height) + 1;
342
343 srcx = rand() % (tsrc.width0 - width + 1);
344 srcy = rand() % (tsrc.height0 - height + 1);
345
346 dstx = rand() % (tdst.width0 - width + 1);
347 dsty = rand() % (tdst.height0 - height + 1);
348 }
349
350 /* special code path to hit out-of-bounds reads in L2T */
351 if (ssrc->surface.is_linear && !sdst->surface.is_linear && rand() % 4 == 0) {
352 srcx = 0;
353 srcy = 0;
354 srcz = 0;
355 }
356 }
357
358 /* GPU copy */
359 u_box_3d(srcx, srcy, srcz, width, height, depth, &box);
360 sctx->dma_copy(ctx, dst, 0, dstx, dsty, dstz, src, 0, &box);
361
362 /* See which engine was used. */
363 gfx_blits += sctx->num_draw_calls > old_num_draw_calls;
364 dma_blits += sctx->num_dma_calls > old_num_dma_calls;
365 cs_blits += sctx->num_compute_calls > old_num_cs_calls;
366
367 /* CPU copy */
368 util_copy_box(dst_cpu.ptr, tdst.format, dst_cpu.stride, dst_cpu.layer_stride, dstx, dsty,
369 dstz, width, height, depth, src_cpu.ptr, src_cpu.stride,
370 src_cpu.layer_stride, srcx, srcy, srcz);
371 }
372
373 pass = compare_textures(ctx, dst, &dst_cpu);
374 if (pass)
375 num_pass++;
376 else
377 num_fail++;
378
379 printf("BLITs: GFX = %2u, DMA = %2u, CS = %2u, %s [%u/%u]\n", gfx_blits, dma_blits, cs_blits,
380 pass ? "pass" : "fail", num_pass, num_pass + num_fail);
381
382 /* cleanup */
383 pipe_resource_reference(&src, NULL);
384 pipe_resource_reference(&dst, NULL);
385 free(src_cpu.ptr);
386 free(dst_cpu.ptr);
387 }
388
389 ctx->destroy(ctx);
390 exit(0);
391 }
392