• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #include <assert.h>
2 #include <stdlib.h>
3 #include <sys/ioctl.h>
4 #include <stdio.h>
5 #include <string.h>
6 #include <assert.h>
7 #include <fcntl.h>
8 #include <inttypes.h>
9 #include <errno.h>
10 #include <sys/stat.h>
11 #include <sys/time.h>
12 #include "drm.h"
13 #include "i915_drm.h"
14 #include "drmtest.h"
15 #include "intel_bufmgr.h"
16 #include "intel_batchbuffer.h"
17 #include "intel_io.h"
18 #include "intel_chipset.h"
19 #include "rendercopy.h"
20 #include "gen7_render.h"
21 #include "intel_reg.h"
22 
23 
24 static const uint32_t ps_kernel[][4] = {
25 	{ 0x0080005a, 0x2e2077bd, 0x000000c0, 0x008d0040 },
26 	{ 0x0080005a, 0x2e6077bd, 0x000000d0, 0x008d0040 },
27 	{ 0x02800031, 0x21801fa9, 0x008d0e20, 0x08840001 },
28 	{ 0x00800001, 0x2e2003bd, 0x008d0180, 0x00000000 },
29 	{ 0x00800001, 0x2e6003bd, 0x008d01c0, 0x00000000 },
30 	{ 0x00800001, 0x2ea003bd, 0x008d0200, 0x00000000 },
31 	{ 0x00800001, 0x2ee003bd, 0x008d0240, 0x00000000 },
32 	{ 0x05800031, 0x20001fa8, 0x008d0e20, 0x90031000 },
33 };
34 
35 static void
gen7_render_flush(struct intel_batchbuffer * batch,drm_intel_context * context,uint32_t batch_end)36 gen7_render_flush(struct intel_batchbuffer *batch,
37 		  drm_intel_context *context, uint32_t batch_end)
38 {
39 	int ret;
40 
41 	ret = drm_intel_bo_subdata(batch->bo, 0, 4096, batch->buffer);
42 	if (ret == 0)
43 		ret = drm_intel_gem_bo_context_exec(batch->bo, context,
44 						    batch_end, 0);
45 	igt_assert(ret == 0);
46 }
47 
48 static uint32_t
gen7_tiling_bits(uint32_t tiling)49 gen7_tiling_bits(uint32_t tiling)
50 {
51 	switch (tiling) {
52 	default: igt_assert(0);
53 	case I915_TILING_NONE: return 0;
54 	case I915_TILING_X: return GEN7_SURFACE_TILED;
55 	case I915_TILING_Y: return GEN7_SURFACE_TILED | GEN7_SURFACE_TILED_Y;
56 	}
57 }
58 
59 static uint32_t
gen7_bind_buf(struct intel_batchbuffer * batch,const struct igt_buf * buf,int is_dst)60 gen7_bind_buf(struct intel_batchbuffer *batch,
61 	      const struct igt_buf *buf,
62 	      int is_dst)
63 {
64 	uint32_t format, *ss;
65 	uint32_t write_domain, read_domain;
66 	int ret;
67 
68 	igt_assert_lte(buf->stride, 256*1024);
69 	igt_assert_lte(igt_buf_width(buf), 16384);
70 	igt_assert_lte(igt_buf_height(buf), 16384);
71 
72 	switch (buf->bpp) {
73 		case 8: format = SURFACEFORMAT_R8_UNORM; break;
74 		case 16: format = SURFACEFORMAT_R8G8_UNORM; break;
75 		case 32: format = SURFACEFORMAT_B8G8R8A8_UNORM; break;
76 		case 64: format = SURFACEFORMAT_R16G16B16A16_FLOAT; break;
77 		default: igt_assert(0);
78 	}
79 
80 	if (is_dst) {
81 		write_domain = read_domain = I915_GEM_DOMAIN_RENDER;
82 	} else {
83 		write_domain = 0;
84 		read_domain = I915_GEM_DOMAIN_SAMPLER;
85 	}
86 
87 	ss = intel_batchbuffer_subdata_alloc(batch, 8 * sizeof(*ss), 32);
88 
89 	ss[0] = (SURFACE_2D << GEN7_SURFACE_TYPE_SHIFT |
90 		 gen7_tiling_bits(buf->tiling) |
91 		format << GEN7_SURFACE_FORMAT_SHIFT);
92 	ss[1] = buf->bo->offset;
93 	ss[2] = ((igt_buf_width(buf) - 1)  << GEN7_SURFACE_WIDTH_SHIFT |
94 		 (igt_buf_height(buf) - 1) << GEN7_SURFACE_HEIGHT_SHIFT);
95 	ss[3] = (buf->stride - 1) << GEN7_SURFACE_PITCH_SHIFT;
96 	ss[4] = 0;
97 	if (IS_VALLEYVIEW(batch->devid))
98 		ss[5] = VLV_MOCS_L3 << 16;
99 	else
100 		ss[5] = (IVB_MOCS_L3 | IVB_MOCS_PTE) << 16;
101 	ss[6] = 0;
102 	ss[7] = 0;
103 	if (IS_HASWELL(batch->devid))
104 		ss[7] |= HSW_SURFACE_SWIZZLE(RED, GREEN, BLUE, ALPHA);
105 
106 	ret = drm_intel_bo_emit_reloc(batch->bo,
107 				      intel_batchbuffer_subdata_offset(batch, &ss[1]),
108 				      buf->bo, 0,
109 				      read_domain, write_domain);
110 	igt_assert(ret == 0);
111 
112 	return intel_batchbuffer_subdata_offset(batch, ss);
113 }
114 
115 static void
gen7_emit_vertex_elements(struct intel_batchbuffer * batch)116 gen7_emit_vertex_elements(struct intel_batchbuffer *batch)
117 {
118 	OUT_BATCH(GEN4_3DSTATE_VERTEX_ELEMENTS |
119 		  ((2 * (1 + 2)) + 1 - 2));
120 
121 	OUT_BATCH(0 << GEN6_VE0_VERTEX_BUFFER_INDEX_SHIFT | GEN6_VE0_VALID |
122 		  SURFACEFORMAT_R32G32B32A32_FLOAT << VE0_FORMAT_SHIFT |
123 		  0 << VE0_OFFSET_SHIFT);
124 
125 	OUT_BATCH(GEN4_VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_0_SHIFT |
126 		  GEN4_VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_1_SHIFT |
127 		  GEN4_VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_2_SHIFT |
128 		  GEN4_VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_3_SHIFT);
129 
130 	/* x,y */
131 	OUT_BATCH(0 << GEN6_VE0_VERTEX_BUFFER_INDEX_SHIFT | GEN6_VE0_VALID |
132 		  SURFACEFORMAT_R16G16_SSCALED << VE0_FORMAT_SHIFT |
133 		  0 << VE0_OFFSET_SHIFT); /* offsets vb in bytes */
134 	OUT_BATCH(GEN4_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT |
135 		  GEN4_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT |
136 		  GEN4_VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_2_SHIFT |
137 		  GEN4_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT);
138 
139 	/* s,t */
140 	OUT_BATCH(0 << GEN6_VE0_VERTEX_BUFFER_INDEX_SHIFT | GEN6_VE0_VALID |
141 		  SURFACEFORMAT_R16G16_SSCALED << VE0_FORMAT_SHIFT |
142 		  4 << VE0_OFFSET_SHIFT);  /* offset vb in bytes */
143 	OUT_BATCH(GEN4_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT |
144 		  GEN4_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT |
145 		  GEN4_VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_2_SHIFT |
146 		  GEN4_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT);
147 }
148 
149 static uint32_t
gen7_create_vertex_buffer(struct intel_batchbuffer * batch,uint32_t src_x,uint32_t src_y,uint32_t dst_x,uint32_t dst_y,uint32_t width,uint32_t height)150 gen7_create_vertex_buffer(struct intel_batchbuffer *batch,
151 			  uint32_t src_x, uint32_t src_y,
152 			  uint32_t dst_x, uint32_t dst_y,
153 			  uint32_t width, uint32_t height)
154 {
155 	uint16_t *v;
156 
157 	v = intel_batchbuffer_subdata_alloc(batch, 12 * sizeof(*v), 8);
158 
159 	v[0] = dst_x + width;
160 	v[1] = dst_y + height;
161 	v[2] = src_x + width;
162 	v[3] = src_y + height;
163 
164 	v[4] = dst_x;
165 	v[5] = dst_y + height;
166 	v[6] = src_x;
167 	v[7] = src_y + height;
168 
169 	v[8] = dst_x;
170 	v[9] = dst_y;
171 	v[10] = src_x;
172 	v[11] = src_y;
173 
174 	return intel_batchbuffer_subdata_offset(batch, v);
175 }
176 
gen7_emit_vertex_buffer(struct intel_batchbuffer * batch,int src_x,int src_y,int dst_x,int dst_y,int width,int height,uint32_t offset)177 static void gen7_emit_vertex_buffer(struct intel_batchbuffer *batch,
178 				    int src_x, int src_y,
179 				    int dst_x, int dst_y,
180 				    int width, int height,
181 				    uint32_t offset)
182 {
183 	OUT_BATCH(GEN4_3DSTATE_VERTEX_BUFFERS | (5 - 2));
184 	OUT_BATCH(0 << GEN6_VB0_BUFFER_INDEX_SHIFT |
185 		  GEN6_VB0_VERTEXDATA |
186 		  GEN7_VB0_ADDRESS_MODIFY_ENABLE |
187 		  4 * 2 << VB0_BUFFER_PITCH_SHIFT);
188 
189 	OUT_RELOC(batch->bo, I915_GEM_DOMAIN_VERTEX, 0, offset);
190 	OUT_BATCH(~0);
191 	OUT_BATCH(0);
192 }
193 
194 static uint32_t
gen7_bind_surfaces(struct intel_batchbuffer * batch,const struct igt_buf * src,const struct igt_buf * dst)195 gen7_bind_surfaces(struct intel_batchbuffer *batch,
196 		   const struct igt_buf *src,
197 		   const struct igt_buf *dst)
198 {
199 	uint32_t *binding_table;
200 
201 	binding_table = intel_batchbuffer_subdata_alloc(batch, 8, 32);
202 
203 	binding_table[0] = gen7_bind_buf(batch, dst, 1);
204 	binding_table[1] = gen7_bind_buf(batch, src, 0);
205 
206 	return intel_batchbuffer_subdata_offset(batch, binding_table);
207 }
208 
209 static void
gen7_emit_binding_table(struct intel_batchbuffer * batch,const struct igt_buf * src,const struct igt_buf * dst,uint32_t bind_surf_off)210 gen7_emit_binding_table(struct intel_batchbuffer *batch,
211 			const struct igt_buf *src,
212 			const struct igt_buf *dst,
213 			uint32_t bind_surf_off)
214 {
215 	OUT_BATCH(GEN7_3DSTATE_BINDING_TABLE_POINTERS_PS | (2 - 2));
216 	OUT_BATCH(bind_surf_off);
217 }
218 
219 static void
gen7_emit_drawing_rectangle(struct intel_batchbuffer * batch,const struct igt_buf * dst)220 gen7_emit_drawing_rectangle(struct intel_batchbuffer *batch, const struct igt_buf *dst)
221 {
222 	OUT_BATCH(GEN4_3DSTATE_DRAWING_RECTANGLE | (4 - 2));
223 	OUT_BATCH(0);
224 	OUT_BATCH((igt_buf_height(dst) - 1) << 16 | (igt_buf_width(dst) - 1));
225 	OUT_BATCH(0);
226 }
227 
228 static uint32_t
gen7_create_blend_state(struct intel_batchbuffer * batch)229 gen7_create_blend_state(struct intel_batchbuffer *batch)
230 {
231 	struct gen6_blend_state *blend;
232 
233 	blend = intel_batchbuffer_subdata_alloc(batch, sizeof(*blend), 64);
234 
235 	blend->blend0.dest_blend_factor = GEN6_BLENDFACTOR_ZERO;
236 	blend->blend0.source_blend_factor = GEN6_BLENDFACTOR_ONE;
237 	blend->blend0.blend_func = GEN6_BLENDFUNCTION_ADD;
238 	blend->blend1.post_blend_clamp_enable = 1;
239 	blend->blend1.pre_blend_clamp_enable = 1;
240 
241 	return intel_batchbuffer_subdata_offset(batch, blend);
242 }
243 
244 static void
gen7_emit_state_base_address(struct intel_batchbuffer * batch)245 gen7_emit_state_base_address(struct intel_batchbuffer *batch)
246 {
247 	OUT_BATCH(GEN4_STATE_BASE_ADDRESS | (10 - 2));
248 	OUT_BATCH(0);
249 	OUT_RELOC(batch->bo, I915_GEM_DOMAIN_INSTRUCTION, 0, BASE_ADDRESS_MODIFY);
250 	OUT_RELOC(batch->bo, I915_GEM_DOMAIN_INSTRUCTION, 0, BASE_ADDRESS_MODIFY);
251 	OUT_BATCH(0);
252 	OUT_RELOC(batch->bo, I915_GEM_DOMAIN_INSTRUCTION, 0, BASE_ADDRESS_MODIFY);
253 
254 	OUT_BATCH(0);
255 	OUT_BATCH(0 | BASE_ADDRESS_MODIFY);
256 	OUT_BATCH(0);
257 	OUT_BATCH(0 | BASE_ADDRESS_MODIFY);
258 }
259 
260 static uint32_t
gen7_create_cc_viewport(struct intel_batchbuffer * batch)261 gen7_create_cc_viewport(struct intel_batchbuffer *batch)
262 {
263 	struct gen4_cc_viewport *vp;
264 
265 	vp = intel_batchbuffer_subdata_alloc(batch, sizeof(*vp), 32);
266 	vp->min_depth = -1.e35;
267 	vp->max_depth = 1.e35;
268 
269 	return intel_batchbuffer_subdata_offset(batch, vp);
270 }
271 
272 static void
gen7_emit_cc(struct intel_batchbuffer * batch,uint32_t blend_state,uint32_t cc_viewport)273 gen7_emit_cc(struct intel_batchbuffer *batch, uint32_t blend_state,
274 	     uint32_t cc_viewport)
275 {
276 	OUT_BATCH(GEN7_3DSTATE_BLEND_STATE_POINTERS | (2 - 2));
277 	OUT_BATCH(blend_state);
278 
279 	OUT_BATCH(GEN7_3DSTATE_VIEWPORT_STATE_POINTERS_CC | (2 - 2));
280 	OUT_BATCH(cc_viewport);
281 }
282 
283 static uint32_t
gen7_create_sampler(struct intel_batchbuffer * batch)284 gen7_create_sampler(struct intel_batchbuffer *batch)
285 {
286 	struct gen7_sampler_state *ss;
287 
288 	ss = intel_batchbuffer_subdata_alloc(batch, sizeof(*ss), 32);
289 
290 	ss->ss0.min_filter = GEN4_MAPFILTER_NEAREST;
291 	ss->ss0.mag_filter = GEN4_MAPFILTER_NEAREST;
292 
293 	ss->ss3.r_wrap_mode = GEN4_TEXCOORDMODE_CLAMP;
294 	ss->ss3.s_wrap_mode = GEN4_TEXCOORDMODE_CLAMP;
295 	ss->ss3.t_wrap_mode = GEN4_TEXCOORDMODE_CLAMP;
296 
297 	ss->ss3.non_normalized_coord = 1;
298 
299 	return intel_batchbuffer_subdata_offset(batch, ss);
300 }
301 
302 static void
gen7_emit_sampler(struct intel_batchbuffer * batch,uint32_t sampler_off)303 gen7_emit_sampler(struct intel_batchbuffer *batch, uint32_t sampler_off)
304 {
305 	OUT_BATCH(GEN7_3DSTATE_SAMPLER_STATE_POINTERS_PS | (2 - 2));
306 	OUT_BATCH(sampler_off);
307 }
308 
309 static void
gen7_emit_multisample(struct intel_batchbuffer * batch)310 gen7_emit_multisample(struct intel_batchbuffer *batch)
311 {
312 	OUT_BATCH(GEN6_3DSTATE_MULTISAMPLE | (4 - 2));
313 	OUT_BATCH(GEN6_3DSTATE_MULTISAMPLE_PIXEL_LOCATION_CENTER |
314 		  GEN6_3DSTATE_MULTISAMPLE_NUMSAMPLES_1); /* 1 sample/pixel */
315 	OUT_BATCH(0);
316 	OUT_BATCH(0);
317 
318 	OUT_BATCH(GEN6_3DSTATE_SAMPLE_MASK | (2 - 2));
319 	OUT_BATCH(1);
320 }
321 
322 static void
gen7_emit_urb(struct intel_batchbuffer * batch)323 gen7_emit_urb(struct intel_batchbuffer *batch)
324 {
325 	OUT_BATCH(GEN7_3DSTATE_PUSH_CONSTANT_ALLOC_PS | (2 - 2));
326 	OUT_BATCH(8); /* in 1KBs */
327 
328 	/* num of VS entries must be divisible by 8 if size < 9 */
329 	OUT_BATCH(GEN7_3DSTATE_URB_VS | (2 - 2));
330 	OUT_BATCH((64 << GEN7_URB_ENTRY_NUMBER_SHIFT) |
331 		  (2 - 1) << GEN7_URB_ENTRY_SIZE_SHIFT |
332 		  (1 << GEN7_URB_STARTING_ADDRESS_SHIFT));
333 
334 	OUT_BATCH(GEN7_3DSTATE_URB_HS | (2 - 2));
335 	OUT_BATCH((0 << GEN7_URB_ENTRY_SIZE_SHIFT) |
336 		  (2 << GEN7_URB_STARTING_ADDRESS_SHIFT));
337 
338 	OUT_BATCH(GEN7_3DSTATE_URB_DS | (2 - 2));
339 	OUT_BATCH((0 << GEN7_URB_ENTRY_SIZE_SHIFT) |
340 		  (2 << GEN7_URB_STARTING_ADDRESS_SHIFT));
341 
342 	OUT_BATCH(GEN7_3DSTATE_URB_GS | (2 - 2));
343 	OUT_BATCH((0 << GEN7_URB_ENTRY_SIZE_SHIFT) |
344 		  (1 << GEN7_URB_STARTING_ADDRESS_SHIFT));
345 }
346 
347 static void
gen7_emit_vs(struct intel_batchbuffer * batch)348 gen7_emit_vs(struct intel_batchbuffer *batch)
349 {
350 	OUT_BATCH(GEN6_3DSTATE_VS | (6 - 2));
351 	OUT_BATCH(0); /* no VS kernel */
352 	OUT_BATCH(0);
353 	OUT_BATCH(0);
354 	OUT_BATCH(0);
355 	OUT_BATCH(0); /* pass-through */
356 }
357 
358 static void
gen7_emit_hs(struct intel_batchbuffer * batch)359 gen7_emit_hs(struct intel_batchbuffer *batch)
360 {
361 	OUT_BATCH(GEN7_3DSTATE_HS | (7 - 2));
362 	OUT_BATCH(0); /* no HS kernel */
363 	OUT_BATCH(0);
364 	OUT_BATCH(0);
365 	OUT_BATCH(0);
366 	OUT_BATCH(0);
367 	OUT_BATCH(0); /* pass-through */
368 }
369 
370 static void
gen7_emit_te(struct intel_batchbuffer * batch)371 gen7_emit_te(struct intel_batchbuffer *batch)
372 {
373 	OUT_BATCH(GEN7_3DSTATE_TE | (4 - 2));
374 	OUT_BATCH(0);
375 	OUT_BATCH(0);
376 	OUT_BATCH(0);
377 }
378 
379 static void
gen7_emit_ds(struct intel_batchbuffer * batch)380 gen7_emit_ds(struct intel_batchbuffer *batch)
381 {
382 	OUT_BATCH(GEN7_3DSTATE_DS | (6 - 2));
383 	OUT_BATCH(0);
384 	OUT_BATCH(0);
385 	OUT_BATCH(0);
386 	OUT_BATCH(0);
387 	OUT_BATCH(0);
388 }
389 
390 static void
gen7_emit_gs(struct intel_batchbuffer * batch)391 gen7_emit_gs(struct intel_batchbuffer *batch)
392 {
393 	OUT_BATCH(GEN6_3DSTATE_GS | (7 - 2));
394 	OUT_BATCH(0); /* no GS kernel */
395 	OUT_BATCH(0);
396 	OUT_BATCH(0);
397 	OUT_BATCH(0);
398 	OUT_BATCH(0);
399 	OUT_BATCH(0); /* pass-through  */
400 }
401 
402 static void
gen7_emit_streamout(struct intel_batchbuffer * batch)403 gen7_emit_streamout(struct intel_batchbuffer *batch)
404 {
405 	OUT_BATCH(GEN7_3DSTATE_STREAMOUT | (3 - 2));
406 	OUT_BATCH(0);
407 	OUT_BATCH(0);
408 }
409 
410 static void
gen7_emit_sf(struct intel_batchbuffer * batch)411 gen7_emit_sf(struct intel_batchbuffer *batch)
412 {
413 	OUT_BATCH(GEN6_3DSTATE_SF | (7 - 2));
414 	OUT_BATCH(0);
415 	OUT_BATCH(GEN6_3DSTATE_SF_CULL_NONE);
416 	OUT_BATCH(2 << GEN6_3DSTATE_SF_TRIFAN_PROVOKE_SHIFT);
417 	OUT_BATCH(0);
418 	OUT_BATCH(0);
419 	OUT_BATCH(0);
420 }
421 
422 static void
gen7_emit_sbe(struct intel_batchbuffer * batch)423 gen7_emit_sbe(struct intel_batchbuffer *batch)
424 {
425 	OUT_BATCH(GEN7_3DSTATE_SBE | (14 - 2));
426 	OUT_BATCH(1 << GEN7_SBE_NUM_OUTPUTS_SHIFT |
427 		  1 << GEN7_SBE_URB_ENTRY_READ_LENGTH_SHIFT |
428 		  1 << GEN7_SBE_URB_ENTRY_READ_OFFSET_SHIFT);
429 	OUT_BATCH(0);
430 	OUT_BATCH(0); /* dw4 */
431 	OUT_BATCH(0);
432 	OUT_BATCH(0);
433 	OUT_BATCH(0);
434 	OUT_BATCH(0); /* dw8 */
435 	OUT_BATCH(0);
436 	OUT_BATCH(0);
437 	OUT_BATCH(0);
438 	OUT_BATCH(0); /* dw12 */
439 	OUT_BATCH(0);
440 	OUT_BATCH(0);
441 }
442 
443 static void
gen7_emit_ps(struct intel_batchbuffer * batch,uint32_t kernel_off)444 gen7_emit_ps(struct intel_batchbuffer *batch, uint32_t kernel_off)
445 {
446 	int threads;
447 
448 	if (IS_HASWELL(batch->devid))
449 		threads = 40 << HSW_PS_MAX_THREADS_SHIFT | 1 << HSW_PS_SAMPLE_MASK_SHIFT;
450 	else
451 		threads = 40 << IVB_PS_MAX_THREADS_SHIFT;
452 
453 	OUT_BATCH(GEN7_3DSTATE_PS | (8 - 2));
454 	OUT_BATCH(kernel_off);
455 	OUT_BATCH(1 << GEN7_PS_SAMPLER_COUNT_SHIFT |
456 		  2 << GEN7_PS_BINDING_TABLE_ENTRY_COUNT_SHIFT);
457 	OUT_BATCH(0); /* scratch address */
458 	OUT_BATCH(threads |
459 		  GEN7_PS_16_DISPATCH_ENABLE |
460 		  GEN7_PS_ATTRIBUTE_ENABLE);
461 	OUT_BATCH(6 << GEN7_PS_DISPATCH_START_GRF_SHIFT_0);
462 	OUT_BATCH(0);
463 	OUT_BATCH(0);
464 }
465 
466 static void
gen7_emit_clip(struct intel_batchbuffer * batch)467 gen7_emit_clip(struct intel_batchbuffer *batch)
468 {
469 	OUT_BATCH(GEN6_3DSTATE_CLIP | (4 - 2));
470 	OUT_BATCH(0);
471 	OUT_BATCH(0); /* pass-through */
472 	OUT_BATCH(0);
473 
474 	OUT_BATCH(GEN7_3DSTATE_VIEWPORT_STATE_POINTERS_SF_CL | (2 - 2));
475 	OUT_BATCH(0);
476 }
477 
478 static void
gen7_emit_wm(struct intel_batchbuffer * batch)479 gen7_emit_wm(struct intel_batchbuffer *batch)
480 {
481 	OUT_BATCH(GEN6_3DSTATE_WM | (3 - 2));
482 	OUT_BATCH(GEN7_WM_DISPATCH_ENABLE |
483 		GEN7_WM_PERSPECTIVE_PIXEL_BARYCENTRIC);
484 	OUT_BATCH(0);
485 }
486 
487 static void
gen7_emit_null_depth_buffer(struct intel_batchbuffer * batch)488 gen7_emit_null_depth_buffer(struct intel_batchbuffer *batch)
489 {
490 	OUT_BATCH(GEN7_3DSTATE_DEPTH_BUFFER | (7 - 2));
491 	OUT_BATCH(SURFACE_NULL << GEN4_3DSTATE_DEPTH_BUFFER_TYPE_SHIFT |
492 		  GEN4_DEPTHFORMAT_D32_FLOAT << GEN4_3DSTATE_DEPTH_BUFFER_FORMAT_SHIFT);
493 	OUT_BATCH(0); /* disable depth, stencil and hiz */
494 	OUT_BATCH(0);
495 	OUT_BATCH(0);
496 	OUT_BATCH(0);
497 	OUT_BATCH(0);
498 
499 	OUT_BATCH(GEN7_3DSTATE_CLEAR_PARAMS | (3 - 2));
500 	OUT_BATCH(0);
501 	OUT_BATCH(0);
502 }
503 
504 #define BATCH_STATE_SPLIT 2048
gen7_render_copyfunc(struct intel_batchbuffer * batch,drm_intel_context * context,const struct igt_buf * src,unsigned src_x,unsigned src_y,unsigned width,unsigned height,const struct igt_buf * dst,unsigned dst_x,unsigned dst_y)505 void gen7_render_copyfunc(struct intel_batchbuffer *batch,
506 			  drm_intel_context *context,
507 			  const struct igt_buf *src, unsigned src_x, unsigned src_y,
508 			  unsigned width, unsigned height,
509 			  const struct igt_buf *dst, unsigned dst_x, unsigned dst_y)
510 {
511 	uint32_t ps_binding_table, ps_sampler_off, ps_kernel_off;
512 	uint32_t blend_state, cc_viewport;
513 	uint32_t vertex_buffer;
514 	uint32_t batch_end;
515 
516 	igt_assert(src->bpp == dst->bpp);
517 	intel_batchbuffer_flush_with_context(batch, context);
518 
519 	batch->ptr = &batch->buffer[BATCH_STATE_SPLIT];
520 
521 
522 	blend_state = gen7_create_blend_state(batch);
523 	cc_viewport = gen7_create_cc_viewport(batch);
524 	ps_sampler_off = gen7_create_sampler(batch);
525 	ps_kernel_off = intel_batchbuffer_copy_data(batch, ps_kernel,
526 						    sizeof(ps_kernel), 64);
527 	vertex_buffer = gen7_create_vertex_buffer(batch,
528 						  src_x, src_y,
529 						  dst_x, dst_y,
530 						  width, height);
531 	ps_binding_table = gen7_bind_surfaces(batch, src, dst);
532 
533 	igt_assert(batch->ptr < &batch->buffer[4095]);
534 
535 	batch->ptr = batch->buffer;
536 	OUT_BATCH(G4X_PIPELINE_SELECT | PIPELINE_SELECT_3D);
537 
538 	gen7_emit_state_base_address(batch);
539 	gen7_emit_multisample(batch);
540 	gen7_emit_urb(batch);
541 	gen7_emit_vs(batch);
542 	gen7_emit_hs(batch);
543 	gen7_emit_te(batch);
544 	gen7_emit_ds(batch);
545 	gen7_emit_gs(batch);
546 	gen7_emit_clip(batch);
547 	gen7_emit_sf(batch);
548 	gen7_emit_wm(batch);
549 	gen7_emit_streamout(batch);
550 	gen7_emit_null_depth_buffer(batch);
551 	gen7_emit_cc(batch, blend_state, cc_viewport);
552 	gen7_emit_sampler(batch, ps_sampler_off);
553 	gen7_emit_sbe(batch);
554 	gen7_emit_ps(batch, ps_kernel_off);
555 	gen7_emit_vertex_elements(batch);
556 	gen7_emit_vertex_buffer(batch, src_x, src_y,
557 				dst_x, dst_y, width,
558 				height, vertex_buffer);
559 	gen7_emit_binding_table(batch, src, dst, ps_binding_table);
560 	gen7_emit_drawing_rectangle(batch, dst);
561 
562 	OUT_BATCH(GEN4_3DPRIMITIVE | (7 - 2));
563 	OUT_BATCH(GEN4_3DPRIMITIVE_VERTEX_SEQUENTIAL | _3DPRIM_RECTLIST);
564 	OUT_BATCH(3);
565 	OUT_BATCH(0);
566 	OUT_BATCH(1);   /* single instance */
567 	OUT_BATCH(0);   /* start instance location */
568 	OUT_BATCH(0);   /* index buffer offset, ignored */
569 
570 	OUT_BATCH(MI_BATCH_BUFFER_END);
571 
572 	batch_end = batch->ptr - batch->buffer;
573 	batch_end = ALIGN(batch_end, 8);
574 	igt_assert(batch_end < BATCH_STATE_SPLIT);
575 
576 	gen7_render_flush(batch, context, batch_end);
577 	intel_batchbuffer_reset(batch);
578 }
579