• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2013 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Damien Lespiau <damien.lespiau@intel.com>
25  */
26 
27 /*
28  * This file is a basic test for the render_copy() function, a very simple
29  * workload for the 3D engine.
30  */
31 
32 #include "igt.h"
33 #include "igt_x86.h"
34 #include <stdbool.h>
35 #include <unistd.h>
36 #include <cairo.h>
37 #include <stdlib.h>
38 #include <sys/ioctl.h>
39 #include <stdio.h>
40 #include <string.h>
41 #include <fcntl.h>
42 #include <inttypes.h>
43 #include <errno.h>
44 #include <sys/stat.h>
45 #include <sys/time.h>
46 
47 #include <drm.h>
48 
49 #include "intel_bufmgr.h"
50 
51 IGT_TEST_DESCRIPTION("Basic test for the render_copy() function.");
52 
53 #define WIDTH 512
54 #define HEIGHT 512
55 
56 typedef struct {
57 	int drm_fd;
58 	uint32_t devid;
59 	drm_intel_bufmgr *bufmgr;
60 	struct intel_batchbuffer *batch;
61 	igt_render_copyfunc_t render_copy;
62 } data_t;
63 static int opt_dump_png = false;
64 static int check_all_pixels = false;
65 
make_filename(const char * filename)66 static const char *make_filename(const char *filename)
67 {
68 	static char buf[64];
69 
70 	snprintf(buf, sizeof(buf), "%s_%s", igt_subtest_name(), filename);
71 
72 	return buf;
73 }
74 
yf_ptr(void * ptr,unsigned int x,unsigned int y,unsigned int stride,unsigned int cpp)75 static void *yf_ptr(void *ptr,
76 		    unsigned int x, unsigned int y,
77 		    unsigned int stride, unsigned int cpp)
78 {
79 	const int tile_size = 4 * 1024;
80 	const int tile_width = 128;
81 	int row_size = (stride / tile_width) * tile_size;
82 
83 	x *= cpp; /* convert to Byte offset */
84 
85 
86 	/*
87 	 * Within a 4k Yf tile, the byte swizzling pattern is
88 	 * msb......lsb
89 	 * xyxyxyyyxxxx
90 	 * The tiles themselves are laid out in row major order.
91 	 */
92 	return ptr +
93 		((x & 0xf) * 1) + /* 4x1 pixels(32bpp) = 16B */
94 		((y & 0x3) * 16) + /* 4x4 pixels = 64B */
95 		(((y & 0x4) >> 2) * 64) + /* 1x2 64B blocks */
96 		(((x & 0x10) >> 4) * 128) + /* 2x2 64B blocks = 256B block */
97 		(((y & 0x8) >> 3) * 256) + /* 2x1 256B blocks */
98 		(((x & 0x20) >> 5) * 512) + /* 2x2 256B blocks */
99 		(((y & 0x10) >> 4) * 1024) + /* 4x2 256 blocks */
100 		(((x & 0x40) >> 6) * 2048) + /* 4x4 256B blocks = 4k tile */
101 		(((x & ~0x7f) >> 7) * tile_size) + /* row of tiles */
102 		(((y & ~0x1f) >> 5) * row_size);
103 }
104 
copy_linear_to_yf(data_t * data,struct igt_buf * buf,const uint32_t * linear)105 static void copy_linear_to_yf(data_t *data, struct igt_buf *buf,
106 			      const uint32_t *linear)
107 {
108 	int height = igt_buf_height(buf);
109 	int width = igt_buf_width(buf);
110 	void *map;
111 
112 	gem_set_domain(data->drm_fd, buf->bo->handle,
113 		       I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
114 	map = gem_mmap__cpu(data->drm_fd, buf->bo->handle, 0,
115 			    buf->bo->size, PROT_READ | PROT_WRITE);
116 
117 	for (int y = 0; y < height; y++) {
118 		for (int x = 0; x < width; x++) {
119 			uint32_t *ptr = yf_ptr(map, x, y,
120 					       buf->stride, buf->bpp / 8);
121 
122 			*ptr = linear[y * width + x];
123 		}
124 	}
125 
126 	munmap(map, buf->bo->size);
127 }
128 
copy_yf_to_linear(data_t * data,struct igt_buf * buf,uint32_t * linear)129 static void copy_yf_to_linear(data_t *data, struct igt_buf *buf,
130 			      uint32_t *linear)
131 {
132 	int height = igt_buf_height(buf);
133 	int width = igt_buf_width(buf);
134 	void *map;
135 
136 	gem_set_domain(data->drm_fd, buf->bo->handle,
137 		       I915_GEM_DOMAIN_CPU, 0);
138 	map = gem_mmap__cpu(data->drm_fd, buf->bo->handle, 0,
139 			    buf->bo->size, PROT_READ);
140 
141 	for (int y = 0; y < height; y++) {
142 		for (int x = 0; x < width; x++) {
143 			uint32_t *ptr = yf_ptr(map, x, y,
144 					       buf->stride, buf->bpp / 8);
145 
146 			linear[y * width + x] = *ptr;
147 		}
148 	}
149 
150 	munmap(map, buf->bo->size);
151 }
152 
copy_linear_to_gtt(data_t * data,struct igt_buf * buf,const uint32_t * linear)153 static void copy_linear_to_gtt(data_t *data, struct igt_buf *buf,
154 			       const uint32_t *linear)
155 {
156 	void *map;
157 
158 	gem_set_domain(data->drm_fd, buf->bo->handle,
159 		       I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
160 
161 	map = gem_mmap__gtt(data->drm_fd, buf->bo->handle,
162 			    buf->bo->size, PROT_READ | PROT_WRITE);
163 
164 	memcpy(map, linear, buf->bo->size);
165 
166 	munmap(map, buf->bo->size);
167 }
168 
copy_gtt_to_linear(data_t * data,struct igt_buf * buf,uint32_t * linear)169 static void copy_gtt_to_linear(data_t *data, struct igt_buf *buf,
170 			       uint32_t *linear)
171 {
172 	void *map;
173 
174 	gem_set_domain(data->drm_fd, buf->bo->handle,
175 		       I915_GEM_DOMAIN_GTT, 0);
176 
177 	map = gem_mmap__gtt(data->drm_fd, buf->bo->handle,
178 			    buf->bo->size, PROT_READ);
179 
180 	igt_memcpy_from_wc(linear, map, buf->bo->size);
181 
182 	munmap(map, buf->bo->size);
183 }
184 
linear_copy(data_t * data,struct igt_buf * buf)185 static void *linear_copy(data_t *data, struct igt_buf *buf)
186 {
187 	void *linear;
188 
189 	/* 16B alignment allows to potentially make use of SSE4 for copying */
190 	igt_assert_eq(posix_memalign(&linear, 16, buf->bo->size), 0);
191 
192 	if (buf->tiling == I915_TILING_Yf)
193 		copy_yf_to_linear(data, buf, linear);
194 	else
195 		copy_gtt_to_linear(data, buf, linear);
196 
197 	return linear;
198 }
199 
scratch_buf_write_to_png(data_t * data,struct igt_buf * buf,const char * filename)200 static void scratch_buf_write_to_png(data_t *data, struct igt_buf *buf,
201 				     const char *filename)
202 {
203 	cairo_surface_t *surface;
204 	cairo_status_t ret;
205 	void *linear;
206 
207 	linear = linear_copy(data, buf);
208 
209 	surface = cairo_image_surface_create_for_data(linear,
210 						      CAIRO_FORMAT_RGB24,
211 						      igt_buf_width(buf),
212 						      igt_buf_height(buf),
213 						      buf->stride);
214 	ret = cairo_surface_write_to_png(surface, make_filename(filename));
215 	igt_assert(ret == CAIRO_STATUS_SUCCESS);
216 	cairo_surface_destroy(surface);
217 
218 	free(linear);
219 }
220 
scratch_buf_aux_width(const struct igt_buf * buf)221 static int scratch_buf_aux_width(const struct igt_buf *buf)
222 {
223 	return DIV_ROUND_UP(igt_buf_width(buf), 1024) * 128;
224 }
225 
scratch_buf_aux_height(const struct igt_buf * buf)226 static int scratch_buf_aux_height(const struct igt_buf *buf)
227 {
228 	return DIV_ROUND_UP(igt_buf_height(buf), 512) * 32;
229 }
230 
linear_copy_aux(data_t * data,struct igt_buf * buf)231 static void *linear_copy_aux(data_t *data, struct igt_buf *buf)
232 {
233 	void *map, *linear;
234 	int aux_size = scratch_buf_aux_width(buf) *
235 		scratch_buf_aux_height(buf);
236 
237 	igt_assert_eq(posix_memalign(&linear, 16, aux_size), 0);
238 
239 	gem_set_domain(data->drm_fd, buf->bo->handle,
240 		       I915_GEM_DOMAIN_GTT, 0);
241 
242 	map = gem_mmap__gtt(data->drm_fd, buf->bo->handle,
243 			    buf->bo->size, PROT_READ);
244 
245 	igt_memcpy_from_wc(linear, map + buf->aux.offset, aux_size);
246 
247 	munmap(map, buf->bo->size);
248 
249 	return linear;
250 }
251 
scratch_buf_aux_write_to_png(data_t * data,struct igt_buf * buf,const char * filename)252 static void scratch_buf_aux_write_to_png(data_t *data,
253 					 struct igt_buf *buf,
254 					 const char *filename)
255 {
256 	cairo_surface_t *surface;
257 	cairo_status_t ret;
258 	void *linear;
259 
260 	linear = linear_copy_aux(data, buf);
261 
262 	surface = cairo_image_surface_create_for_data(linear,
263 						      CAIRO_FORMAT_A8,
264 						      scratch_buf_aux_width(buf),
265 						      scratch_buf_aux_height(buf),
266 						      buf->aux.stride);
267 	ret = cairo_surface_write_to_png(surface, make_filename(filename));
268 	igt_assert(ret == CAIRO_STATUS_SUCCESS);
269 	cairo_surface_destroy(surface);
270 
271 	free(linear);
272 }
273 
scratch_buf_draw_pattern(data_t * data,struct igt_buf * buf,int x,int y,int w,int h,int cx,int cy,int cw,int ch,bool use_alternate_colors)274 static void scratch_buf_draw_pattern(data_t *data, struct igt_buf *buf,
275 				     int x, int y, int w, int h,
276 				     int cx, int cy, int cw, int ch,
277 				     bool use_alternate_colors)
278 {
279 	cairo_surface_t *surface;
280 	cairo_pattern_t *pat;
281 	cairo_t *cr;
282 	void *linear;
283 
284 	linear = linear_copy(data, buf);
285 
286 	surface = cairo_image_surface_create_for_data(linear,
287 						      CAIRO_FORMAT_RGB24,
288 						      igt_buf_width(buf),
289 						      igt_buf_height(buf),
290 						      buf->stride);
291 
292 	cr = cairo_create(surface);
293 
294 	cairo_rectangle(cr, cx, cy, cw, ch);
295 	cairo_clip(cr);
296 
297 	pat = cairo_pattern_create_mesh();
298 	cairo_mesh_pattern_begin_patch(pat);
299 	cairo_mesh_pattern_move_to(pat, x,   y);
300 	cairo_mesh_pattern_line_to(pat, x+w, y);
301 	cairo_mesh_pattern_line_to(pat, x+w, y+h);
302 	cairo_mesh_pattern_line_to(pat, x,   y+h);
303 	if (use_alternate_colors) {
304 		cairo_mesh_pattern_set_corner_color_rgb(pat, 0, 0.0, 1.0, 1.0);
305 		cairo_mesh_pattern_set_corner_color_rgb(pat, 1, 1.0, 0.0, 1.0);
306 		cairo_mesh_pattern_set_corner_color_rgb(pat, 2, 1.0, 1.0, 0.0);
307 		cairo_mesh_pattern_set_corner_color_rgb(pat, 3, 0.0, 0.0, 0.0);
308 	} else {
309 		cairo_mesh_pattern_set_corner_color_rgb(pat, 0, 1.0, 0.0, 0.0);
310 		cairo_mesh_pattern_set_corner_color_rgb(pat, 1, 0.0, 1.0, 0.0);
311 		cairo_mesh_pattern_set_corner_color_rgb(pat, 2, 0.0, 0.0, 1.0);
312 		cairo_mesh_pattern_set_corner_color_rgb(pat, 3, 1.0, 1.0, 1.0);
313 	}
314 	cairo_mesh_pattern_end_patch(pat);
315 
316 	cairo_rectangle(cr, x, y, w, h);
317 	cairo_set_source(cr, pat);
318 	cairo_fill(cr);
319 	cairo_pattern_destroy(pat);
320 
321 	cairo_destroy(cr);
322 
323 	cairo_surface_destroy(surface);
324 
325 	if (buf->tiling == I915_TILING_Yf)
326 		copy_linear_to_yf(data, buf, linear);
327 	else
328 		copy_linear_to_gtt(data, buf, linear);
329 
330 	free(linear);
331 }
332 
333 static void
scratch_buf_copy(data_t * data,struct igt_buf * src,int sx,int sy,int w,int h,struct igt_buf * dst,int dx,int dy)334 scratch_buf_copy(data_t *data,
335 		 struct igt_buf *src, int sx, int sy, int w, int h,
336 		 struct igt_buf *dst, int dx, int dy)
337 {
338 	int width = igt_buf_width(dst);
339 	int height  = igt_buf_height(dst);
340 	uint32_t *linear_dst;
341 
342 	igt_assert_eq(igt_buf_width(dst), igt_buf_width(src));
343 	igt_assert_eq(igt_buf_height(dst), igt_buf_height(src));
344 	igt_assert_eq(dst->bo->size, src->bo->size);
345 	igt_assert_eq(dst->bpp, src->bpp);
346 
347 	w = min(w, width - sx);
348 	w = min(w, width - dx);
349 
350 	h = min(h, height - sy);
351 	h = min(h, height - dy);
352 
353 	gem_set_domain(data->drm_fd, dst->bo->handle,
354 		       I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
355 	linear_dst = gem_mmap__gtt(data->drm_fd, dst->bo->handle,
356 				   dst->bo->size, PROT_WRITE);
357 
358 	if (src->tiling == I915_TILING_Yf) {
359 		void *map;
360 
361 		gem_set_domain(data->drm_fd, src->bo->handle,
362 			       I915_GEM_DOMAIN_CPU, 0);
363 		map = gem_mmap__cpu(data->drm_fd, src->bo->handle, 0,
364 				    src->bo->size, PROT_READ);
365 
366 		for (int y = 0; y < h; y++) {
367 			for (int x = 0; x < w; x++) {
368 				const uint32_t *ptr = yf_ptr(map, sx+x, sy+y,
369 							     src->stride,
370 							     src->bpp / 8);
371 
372 				linear_dst[(dy+y) * width + dx+x] = *ptr;
373 			}
374 		}
375 
376 		munmap(map, src->bo->size);
377 	} else {
378 		uint32_t *linear_src;
379 
380 		gem_set_domain(data->drm_fd, src->bo->handle,
381 			       I915_GEM_DOMAIN_GTT, 0);
382 
383 		linear_src = gem_mmap__gtt(data->drm_fd, src->bo->handle,
384 					   src->bo->size, PROT_READ);
385 
386 		for (int y = 0; y < h; y++) {
387 			igt_memcpy_from_wc(&linear_dst[(dy+y) * width + dx],
388 					   &linear_src[(sy+y) * width + sx],
389 					   w * (src->bpp / 8));
390 		}
391 
392 		munmap(linear_src, src->bo->size);
393 	}
394 
395 	munmap(linear_dst, dst->bo->size);
396 }
397 
scratch_buf_init(data_t * data,struct igt_buf * buf,int width,int height,uint32_t req_tiling,bool ccs)398 static void scratch_buf_init(data_t *data, struct igt_buf *buf,
399 			     int width, int height,
400 			     uint32_t req_tiling, bool ccs)
401 {
402 	uint32_t tiling = req_tiling;
403 	unsigned long pitch;
404 	int bpp = 32;
405 
406 	memset(buf, 0, sizeof(*buf));
407 
408 	if (ccs) {
409 		int aux_width, aux_height;
410 		int size;
411 
412 		igt_require(intel_gen(data->devid) >= 9);
413 		igt_assert(tiling == I915_TILING_Y ||
414 			   tiling == I915_TILING_Yf);
415 
416 		buf->stride = ALIGN(width * (bpp / 8), 128);
417 		buf->size = buf->stride * height;
418 		buf->tiling = tiling;
419 		buf->bpp = bpp;
420 
421 		aux_width = scratch_buf_aux_width(buf);
422 		aux_height = scratch_buf_aux_height(buf);
423 
424 		buf->aux.offset = buf->stride * ALIGN(height, 32);
425 		buf->aux.stride = aux_width;
426 
427 		size = buf->aux.offset + aux_width * aux_height;
428 
429 		buf->bo = drm_intel_bo_alloc(data->bufmgr, "", size, 4096);
430 
431 		if (tiling == I915_TILING_Y) {
432 			drm_intel_bo_set_tiling(buf->bo, &tiling, buf->stride);
433 			igt_assert_eq(tiling, req_tiling);
434 		}
435 	} else if (req_tiling == I915_TILING_Yf) {
436 		int size;
437 
438 		buf->stride = ALIGN(width * (bpp / 8), 128);
439 		buf->size = buf->stride * height;
440 		buf->tiling = tiling;
441 		buf->bpp = bpp;
442 
443 		size = buf->stride * ALIGN(height, 32);
444 
445 		buf->bo = drm_intel_bo_alloc(data->bufmgr, "", size, 4096);
446 	} else {
447 		buf->bo = drm_intel_bo_alloc_tiled(data->bufmgr, "",
448 						   width, height, bpp / 8,
449 						   &tiling, &pitch, 0);
450 		igt_assert_eq(tiling, req_tiling);
451 
452 		buf->stride = pitch;
453 		buf->tiling = tiling;
454 		buf->size = pitch * height;
455 		buf->bpp = bpp;
456 	}
457 
458 	igt_assert(igt_buf_width(buf) == width);
459 	igt_assert(igt_buf_height(buf) == height);
460 }
461 
scratch_buf_fini(struct igt_buf * buf)462 static void scratch_buf_fini(struct igt_buf *buf)
463 {
464 	drm_intel_bo_unreference(buf->bo);
465 }
466 
467 static void
scratch_buf_check(data_t * data,struct igt_buf * buf,struct igt_buf * ref,int x,int y)468 scratch_buf_check(data_t *data,
469 		  struct igt_buf *buf,
470 		  struct igt_buf *ref,
471 		  int x, int y)
472 {
473 	int width = igt_buf_width(buf);
474 	uint32_t buf_val, ref_val;
475 	uint32_t *linear;
476 
477 	igt_assert_eq(igt_buf_width(buf), igt_buf_width(ref));
478 	igt_assert_eq(igt_buf_height(buf), igt_buf_height(ref));
479 	igt_assert_eq(buf->bo->size, ref->bo->size);
480 
481 	linear = linear_copy(data, buf);
482 	buf_val = linear[y * width + x];
483 	free(linear);
484 
485 	linear = linear_copy(data, ref);
486 	ref_val = linear[y * width + x];
487 	free(linear);
488 
489 	igt_assert_f(buf_val == ref_val,
490 		     "Expected 0x%08x, found 0x%08x at (%d,%d)\n",
491 		     ref_val, buf_val, x, y);
492 }
493 
494 static void
scratch_buf_check_all(data_t * data,struct igt_buf * buf,struct igt_buf * ref)495 scratch_buf_check_all(data_t *data,
496 		      struct igt_buf *buf,
497 		      struct igt_buf *ref)
498 {
499 	int width = igt_buf_width(buf);
500 	int height  = igt_buf_height(buf);
501 	uint32_t *linear_buf, *linear_ref;
502 
503 	igt_assert_eq(igt_buf_width(buf), igt_buf_width(ref));
504 	igt_assert_eq(igt_buf_height(buf), igt_buf_height(ref));
505 	igt_assert_eq(buf->bo->size, ref->bo->size);
506 
507 	linear_buf = linear_copy(data, buf);
508 	linear_ref = linear_copy(data, ref);
509 
510 	for (int y = 0; y < height; y++) {
511 		for (int x = 0; x < width; x++) {
512 			uint32_t buf_val = linear_buf[y * width + x];
513 			uint32_t ref_val = linear_ref[y * width + x];
514 
515 			igt_assert_f(buf_val == ref_val,
516 				     "Expected 0x%08x, found 0x%08x at (%d,%d)\n",
517 				     ref_val, buf_val, x, y);
518 		}
519 	}
520 
521 	free(linear_ref);
522 	free(linear_buf);
523 }
524 
scratch_buf_aux_check(data_t * data,struct igt_buf * buf)525 static void scratch_buf_aux_check(data_t *data,
526 				  struct igt_buf *buf)
527 {
528 	int aux_size = scratch_buf_aux_width(buf) *
529 		scratch_buf_aux_height(buf);
530 	uint8_t *linear;
531 	int i;
532 
533 	linear = linear_copy_aux(data, buf);
534 
535 	for (i = 0; i < aux_size; i++) {
536 		if (linear[i])
537 			break;
538 	}
539 
540 	free(linear);
541 
542 	igt_assert_f(i < aux_size,
543 		     "Aux surface indicates that nothing was compressed\n");
544 }
545 
test(data_t * data,uint32_t tiling,uint64_t ccs_modifier)546 static void test(data_t *data, uint32_t tiling, uint64_t ccs_modifier)
547 {
548 	struct igt_buf dst, ccs, ref;
549 	struct {
550 		struct igt_buf buf;
551 		const char *filename;
552 		uint32_t tiling;
553 		int x, y;
554 	} src[] = {
555 		{
556 			.filename = "source-linear.png",
557 			.tiling = I915_TILING_NONE,
558 			.x = 1, .y = HEIGHT/2+1,
559 		},
560 		{
561 			.filename = "source-x-tiled.png",
562 			.tiling = I915_TILING_X,
563 			.x = WIDTH/2+1, .y = HEIGHT/2+1,
564 		},
565 		{
566 			.filename = "source-y-tiled.png",
567 			.tiling = I915_TILING_Y,
568 			.x = WIDTH/2+1, .y = 1,
569 		},
570 		{
571 			.filename = "source-yf-tiled.png",
572 			.tiling = I915_TILING_Yf,
573 			.x = 1, .y = 1,
574 		},
575 	};
576 
577 	int opt_dump_aub = igt_aub_dump_enabled();
578 	int num_src = ARRAY_SIZE(src);
579 
580 	/* no Yf before gen9 */
581 	if (intel_gen(data->devid) < 9)
582 		num_src--;
583 
584 	if (tiling == I915_TILING_Yf || ccs_modifier)
585 		igt_require(intel_gen(data->devid) >= 9);
586 
587 	for (int i = 0; i < num_src; i++)
588 		scratch_buf_init(data, &src[i].buf, WIDTH, HEIGHT, src[i].tiling, false);
589 	scratch_buf_init(data, &dst, WIDTH, HEIGHT, tiling, false);
590 	if (ccs_modifier)
591 		scratch_buf_init(data, &ccs, WIDTH, HEIGHT, ccs_modifier, true);
592 	scratch_buf_init(data, &ref, WIDTH, HEIGHT, I915_TILING_NONE, false);
593 
594 	for (int i = 0; i < num_src; i++)
595 		scratch_buf_draw_pattern(data, &src[i].buf,
596 					 0, 0, WIDTH, HEIGHT,
597 					 0, 0, WIDTH, HEIGHT, true);
598 	scratch_buf_draw_pattern(data, &dst,
599 				 0, 0, WIDTH, HEIGHT,
600 				 0, 0, WIDTH, HEIGHT, false);
601 
602 	scratch_buf_copy(data,
603 			 &dst, 0, 0, WIDTH, HEIGHT,
604 			 &ref, 0, 0);
605 	for (int i = 0; i < num_src; i++)
606 		scratch_buf_copy(data,
607 				 &src[i].buf, WIDTH/4, HEIGHT/4, WIDTH/2-2, HEIGHT/2-2,
608 				 &ref, src[i].x, src[i].y);
609 
610 	if (opt_dump_png) {
611 		for (int i = 0; i < num_src; i++)
612 			scratch_buf_write_to_png(data, &src[i].buf, src[i].filename);
613 		scratch_buf_write_to_png(data, &dst, "destination.png");
614 		scratch_buf_write_to_png(data, &ref, "reference.png");
615 	}
616 
617 	if (opt_dump_aub) {
618 		drm_intel_bufmgr_gem_set_aub_filename(data->bufmgr,
619 						      "rendercopy.aub");
620 		drm_intel_bufmgr_gem_set_aub_dump(data->bufmgr, true);
621 	}
622 
623 	/* This will copy the src to the mid point of the dst buffer. Presumably
624 	 * the out of bounds accesses will get clipped.
625 	 * Resulting buffer should look like:
626 	 *	  _______
627 	 *	 |dst|dst|
628 	 *	 |dst|src|
629 	 *	  -------
630 	 */
631 	if (ccs_modifier)
632 		data->render_copy(data->batch, NULL,
633 				  &dst, 0, 0, WIDTH, HEIGHT,
634 				  &ccs, 0, 0);
635 
636 	for (int i = 0; i < num_src; i++)
637 		data->render_copy(data->batch, NULL,
638 				  &src[i].buf, WIDTH/4, HEIGHT/4, WIDTH/2-2, HEIGHT/2-2,
639 				  ccs_modifier ? &ccs : &dst, src[i].x, src[i].y);
640 
641 	if (ccs_modifier)
642 		data->render_copy(data->batch, NULL,
643 				  &ccs, 0, 0, WIDTH, HEIGHT,
644 				  &dst, 0, 0);
645 
646 	if (opt_dump_png){
647 		scratch_buf_write_to_png(data, &dst, "result.png");
648 		if (ccs_modifier) {
649 			scratch_buf_write_to_png(data, &ccs, "compressed.png");
650 			scratch_buf_aux_write_to_png(data, &ccs, "compressed-aux.png");
651 		}
652 	}
653 
654 	if (opt_dump_aub) {
655 		drm_intel_gem_bo_aub_dump_bmp(dst.bo,
656 					      0, 0, igt_buf_width(&dst),
657 					      igt_buf_height(&dst),
658 					      AUB_DUMP_BMP_FORMAT_ARGB_8888,
659 					      dst.stride, 0);
660 		drm_intel_bufmgr_gem_set_aub_dump(data->bufmgr, false);
661 	} else if (check_all_pixels) {
662 		scratch_buf_check_all(data, &dst, &ref);
663 	} else {
664 		scratch_buf_check(data, &dst, &ref, 10, 10);
665 		scratch_buf_check(data, &dst, &ref, WIDTH - 10, HEIGHT - 10);
666 	}
667 
668 	if (ccs_modifier)
669 		scratch_buf_aux_check(data, &ccs);
670 
671 	scratch_buf_fini(&ref);
672 	if (ccs_modifier)
673 		scratch_buf_fini(&ccs);
674 	scratch_buf_fini(&dst);
675 	for (int i = 0; i < num_src; i++)
676 		scratch_buf_fini(&src[i].buf);
677 }
678 
opt_handler(int opt,int opt_index,void * data)679 static int opt_handler(int opt, int opt_index, void *data)
680 {
681 	switch (opt) {
682 	case 'd':
683 		opt_dump_png = true;
684 		break;
685 	case 'a':
686 		check_all_pixels = true;
687 		break;
688 	default:
689 		return IGT_OPT_HANDLER_ERROR;
690 	}
691 
692 	return IGT_OPT_HANDLER_SUCCESS;
693 }
694 
695 const char *help_str =
696 	"  -d\tDump PNG\n"
697 	"  -a\tCheck all pixels\n"
698 	;
699 
700 igt_main_args("da", NULL, help_str, opt_handler, NULL)
701 {
702 	data_t data = {0, };
703 
704 	igt_fixture {
705 		data.drm_fd = drm_open_driver_render(DRIVER_INTEL);
706 		data.devid = intel_get_drm_devid(data.drm_fd);
707 		igt_require_gem(data.drm_fd);
708 
709 		data.bufmgr = drm_intel_bufmgr_gem_init(data.drm_fd, 4096);
710 		igt_assert(data.bufmgr);
711 
712 		data.render_copy = igt_get_render_copyfunc(data.devid);
713 		igt_require_f(data.render_copy,
714 			      "no render-copy function\n");
715 
716 		data.batch = intel_batchbuffer_alloc(data.bufmgr, data.devid);
717 		igt_assert(data.batch);
718 	}
719 
720 	igt_subtest("linear")
721 		test(&data, I915_TILING_NONE, 0);
722 	igt_subtest("x-tiled")
723 		test(&data, I915_TILING_X, 0);
724 	igt_subtest("y-tiled")
725 		test(&data, I915_TILING_Y, 0);
726 	igt_subtest("yf-tiled")
727 		test(&data, I915_TILING_Yf, 0);
728 
729 	igt_subtest("y-tiled-ccs-to-linear")
730 		test(&data, I915_TILING_NONE, I915_TILING_Y);
731 	igt_subtest("y-tiled-ccs-to-x-tiled")
732 		test(&data, I915_TILING_X, I915_TILING_Y);
733 	igt_subtest("y-tiled-ccs-to-y-tiled")
734 		test(&data, I915_TILING_Y, I915_TILING_Y);
735 	igt_subtest("y-tiled-ccs-to-yf-tiled")
736 		test(&data, I915_TILING_Yf, I915_TILING_Y);
737 
738 	igt_subtest("yf-tiled-ccs-to-linear")
739 		test(&data, I915_TILING_NONE, I915_TILING_Yf);
740 	igt_subtest("yf-tiled-ccs-to-x-tiled")
741 		test(&data, I915_TILING_X, I915_TILING_Yf);
742 	igt_subtest("yf-tiled-ccs-to-y-tiled")
743 		test(&data, I915_TILING_Y, I915_TILING_Yf);
744 	igt_subtest("yf-tiled-ccs-to-yf-tiled")
745 		test(&data, I915_TILING_Yf, I915_TILING_Yf);
746 
747 	igt_fixture {
748 		intel_batchbuffer_free(data.batch);
749 		drm_intel_bufmgr_destroy(data.bufmgr);
750 	}
751 }
752