1 /*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #ifndef BLORP_GENX_EXEC_H
25 #define BLORP_GENX_EXEC_H
26
27 #include "blorp_priv.h"
28 #include "common/gen_device_info.h"
29 #include "common/gen_sample_positions.h"
30 #include "genxml/gen_macros.h"
31
32 /**
33 * This file provides the blorp pipeline setup and execution functionality.
34 * It defines the following function:
35 *
36 * static void
37 * blorp_exec(struct blorp_context *blorp, void *batch_data,
38 * const struct blorp_params *params);
39 *
40 * It is the job of whoever includes this header to wrap this in something
41 * to get an externally visible symbol.
42 *
43 * In order for the blorp_exec function to work, the driver must provide
44 * implementations of the following static helper functions.
45 */
46
47 static void *
48 blorp_emit_dwords(struct blorp_batch *batch, unsigned n);
49
50 static uint64_t
51 blorp_emit_reloc(struct blorp_batch *batch,
52 void *location, struct blorp_address address, uint32_t delta);
53
54 static void *
55 blorp_alloc_dynamic_state(struct blorp_batch *batch,
56 uint32_t size,
57 uint32_t alignment,
58 uint32_t *offset);
59 static void *
60 blorp_alloc_vertex_buffer(struct blorp_batch *batch, uint32_t size,
61 struct blorp_address *addr);
62
63 #if GEN_GEN >= 8
64 static struct blorp_address
65 blorp_get_workaround_page(struct blorp_batch *batch);
66 #endif
67
68 static void
69 blorp_alloc_binding_table(struct blorp_batch *batch, unsigned num_entries,
70 unsigned state_size, unsigned state_alignment,
71 uint32_t *bt_offset, uint32_t *surface_offsets,
72 void **surface_maps);
73
74 static void
75 blorp_flush_range(struct blorp_batch *batch, void *start, size_t size);
76
77 static void
78 blorp_surface_reloc(struct blorp_batch *batch, uint32_t ss_offset,
79 struct blorp_address address, uint32_t delta);
80
81 #if GEN_GEN >= 7
82 static struct blorp_address
83 blorp_get_surface_base_address(struct blorp_batch *batch);
84 #endif
85
86 static void
87 blorp_emit_urb_config(struct blorp_batch *batch,
88 unsigned vs_entry_size, unsigned sf_entry_size);
89
90 static void
91 blorp_emit_pipeline(struct blorp_batch *batch,
92 const struct blorp_params *params);
93
94 /***** BEGIN blorp_exec implementation ******/
95
96 static uint64_t
_blorp_combine_address(struct blorp_batch * batch,void * location,struct blorp_address address,uint32_t delta)97 _blorp_combine_address(struct blorp_batch *batch, void *location,
98 struct blorp_address address, uint32_t delta)
99 {
100 if (address.buffer == NULL) {
101 return address.offset + delta;
102 } else {
103 return blorp_emit_reloc(batch, location, address, delta);
104 }
105 }
106
107 #define __gen_address_type struct blorp_address
108 #define __gen_user_data struct blorp_batch
109 #define __gen_combine_address _blorp_combine_address
110
111 #include "genxml/genX_pack.h"
112
113 #define _blorp_cmd_length(cmd) cmd ## _length
114 #define _blorp_cmd_length_bias(cmd) cmd ## _length_bias
115 #define _blorp_cmd_header(cmd) cmd ## _header
116 #define _blorp_cmd_pack(cmd) cmd ## _pack
117
118 #define blorp_emit(batch, cmd, name) \
119 for (struct cmd name = { _blorp_cmd_header(cmd) }, \
120 *_dst = blorp_emit_dwords(batch, _blorp_cmd_length(cmd)); \
121 __builtin_expect(_dst != NULL, 1); \
122 _blorp_cmd_pack(cmd)(batch, (void *)_dst, &name), \
123 _dst = NULL)
124
125 #define blorp_emitn(batch, cmd, n) ({ \
126 uint32_t *_dw = blorp_emit_dwords(batch, n); \
127 if (_dw) { \
128 struct cmd template = { \
129 _blorp_cmd_header(cmd), \
130 .DWordLength = n - _blorp_cmd_length_bias(cmd), \
131 }; \
132 _blorp_cmd_pack(cmd)(batch, _dw, &template); \
133 } \
134 _dw ? _dw + 1 : NULL; /* Array starts at dw[1] */ \
135 })
136
137 #define STRUCT_ZERO(S) ({ struct S t; memset(&t, 0, sizeof(t)); t; })
138
139 #define blorp_emit_dynamic(batch, state, name, align, offset) \
140 for (struct state name = STRUCT_ZERO(state), \
141 *_dst = blorp_alloc_dynamic_state(batch, \
142 _blorp_cmd_length(state) * 4, \
143 align, offset); \
144 __builtin_expect(_dst != NULL, 1); \
145 _blorp_cmd_pack(state)(batch, (void *)_dst, &name), \
146 blorp_flush_range(batch, _dst, _blorp_cmd_length(state) * 4), \
147 _dst = NULL)
148
149 /* 3DSTATE_URB
150 * 3DSTATE_URB_VS
151 * 3DSTATE_URB_HS
152 * 3DSTATE_URB_DS
153 * 3DSTATE_URB_GS
154 *
155 * Assign the entire URB to the VS. Even though the VS disabled, URB space
156 * is still needed because the clipper loads the VUE's from the URB. From
157 * the Sandybridge PRM, Volume 2, Part 1, Section 3DSTATE,
158 * Dword 1.15:0 "VS Number of URB Entries":
159 * This field is always used (even if VS Function Enable is DISABLED).
160 *
161 * The warning below appears in the PRM (Section 3DSTATE_URB), but we can
162 * safely ignore it because this batch contains only one draw call.
163 * Because of URB corruption caused by allocating a previous GS unit
164 * URB entry to the VS unit, software is required to send a “GS NULL
165 * Fence” (Send URB fence with VS URB size == 1 and GS URB size == 0)
166 * plus a dummy DRAW call before any case where VS will be taking over
167 * GS URB space.
168 *
169 * If the 3DSTATE_URB_VS is emitted, than the others must be also.
170 * From the Ivybridge PRM, Volume 2 Part 1, section 1.7.1 3DSTATE_URB_VS:
171 *
172 * 3DSTATE_URB_HS, 3DSTATE_URB_DS, and 3DSTATE_URB_GS must also be
173 * programmed in order for the programming of this state to be
174 * valid.
175 */
176 static void
emit_urb_config(struct blorp_batch * batch,const struct blorp_params * params)177 emit_urb_config(struct blorp_batch *batch,
178 const struct blorp_params *params)
179 {
180 /* Once vertex fetcher has written full VUE entries with complete
181 * header the space requirement is as follows per vertex (in bytes):
182 *
183 * Header Position Program constants
184 * +--------+------------+-------------------+
185 * | 16 | 16 | n x 16 |
186 * +--------+------------+-------------------+
187 *
188 * where 'n' stands for number of varying inputs expressed as vec4s.
189 */
190 const unsigned num_varyings =
191 params->wm_prog_data ? params->wm_prog_data->num_varying_inputs : 0;
192 const unsigned total_needed = 16 + 16 + num_varyings * 16;
193
194 /* The URB size is expressed in units of 64 bytes (512 bits) */
195 const unsigned vs_entry_size = DIV_ROUND_UP(total_needed, 64);
196
197 const unsigned sf_entry_size =
198 params->sf_prog_data ? params->sf_prog_data->urb_entry_size : 0;
199
200 blorp_emit_urb_config(batch, vs_entry_size, sf_entry_size);
201 }
202
203 static void
blorp_emit_vertex_data(struct blorp_batch * batch,const struct blorp_params * params,struct blorp_address * addr,uint32_t * size)204 blorp_emit_vertex_data(struct blorp_batch *batch,
205 const struct blorp_params *params,
206 struct blorp_address *addr,
207 uint32_t *size)
208 {
209 const float vertices[] = {
210 /* v0 */ (float)params->x1, (float)params->y1, params->z,
211 /* v1 */ (float)params->x0, (float)params->y1, params->z,
212 /* v2 */ (float)params->x0, (float)params->y0, params->z,
213 };
214
215 void *data = blorp_alloc_vertex_buffer(batch, sizeof(vertices), addr);
216 memcpy(data, vertices, sizeof(vertices));
217 *size = sizeof(vertices);
218 blorp_flush_range(batch, data, *size);
219 }
220
221 static void
blorp_emit_input_varying_data(struct blorp_batch * batch,const struct blorp_params * params,struct blorp_address * addr,uint32_t * size)222 blorp_emit_input_varying_data(struct blorp_batch *batch,
223 const struct blorp_params *params,
224 struct blorp_address *addr,
225 uint32_t *size)
226 {
227 const unsigned vec4_size_in_bytes = 4 * sizeof(float);
228 const unsigned max_num_varyings =
229 DIV_ROUND_UP(sizeof(params->wm_inputs), vec4_size_in_bytes);
230 const unsigned num_varyings =
231 params->wm_prog_data ? params->wm_prog_data->num_varying_inputs : 0;
232
233 *size = 16 + num_varyings * vec4_size_in_bytes;
234
235 const uint32_t *const inputs_src = (const uint32_t *)¶ms->wm_inputs;
236 void *data = blorp_alloc_vertex_buffer(batch, *size, addr);
237 uint32_t *inputs = data;
238
239 /* Copy in the VS inputs */
240 assert(sizeof(params->vs_inputs) == 16);
241 memcpy(inputs, ¶ms->vs_inputs, sizeof(params->vs_inputs));
242 inputs += 4;
243
244 if (params->wm_prog_data) {
245 /* Walk over the attribute slots, determine if the attribute is used by
246 * the program and when necessary copy the values from the input storage
247 * to the vertex data buffer.
248 */
249 for (unsigned i = 0; i < max_num_varyings; i++) {
250 const gl_varying_slot attr = VARYING_SLOT_VAR0 + i;
251
252 const int input_index = params->wm_prog_data->urb_setup[attr];
253 if (input_index < 0)
254 continue;
255
256 memcpy(inputs, inputs_src + i * 4, vec4_size_in_bytes);
257
258 inputs += 4;
259 }
260 }
261
262 blorp_flush_range(batch, data, *size);
263 }
264
265 static void
blorp_emit_vertex_buffers(struct blorp_batch * batch,const struct blorp_params * params)266 blorp_emit_vertex_buffers(struct blorp_batch *batch,
267 const struct blorp_params *params)
268 {
269 struct GENX(VERTEX_BUFFER_STATE) vb[2];
270 memset(vb, 0, sizeof(vb));
271
272 uint32_t size;
273 blorp_emit_vertex_data(batch, params, &vb[0].BufferStartingAddress, &size);
274 vb[0].VertexBufferIndex = 0;
275 vb[0].BufferPitch = 3 * sizeof(float);
276 #if GEN_GEN >= 6
277 vb[0].VertexBufferMOCS = vb[0].BufferStartingAddress.mocs;
278 #endif
279 #if GEN_GEN >= 7
280 vb[0].AddressModifyEnable = true;
281 #endif
282 #if GEN_GEN >= 8
283 vb[0].BufferSize = size;
284 #elif GEN_GEN >= 5
285 vb[0].BufferAccessType = VERTEXDATA;
286 vb[0].EndAddress = vb[0].BufferStartingAddress;
287 vb[0].EndAddress.offset += size - 1;
288 #elif GEN_GEN == 4
289 vb[0].BufferAccessType = VERTEXDATA;
290 vb[0].MaxIndex = 2;
291 #endif
292
293 blorp_emit_input_varying_data(batch, params,
294 &vb[1].BufferStartingAddress, &size);
295 vb[1].VertexBufferIndex = 1;
296 vb[1].BufferPitch = 0;
297 #if GEN_GEN >= 6
298 vb[1].VertexBufferMOCS = vb[1].BufferStartingAddress.mocs;
299 #endif
300 #if GEN_GEN >= 7
301 vb[1].AddressModifyEnable = true;
302 #endif
303 #if GEN_GEN >= 8
304 vb[1].BufferSize = size;
305 #elif GEN_GEN >= 5
306 vb[1].BufferAccessType = INSTANCEDATA;
307 vb[1].EndAddress = vb[1].BufferStartingAddress;
308 vb[1].EndAddress.offset += size - 1;
309 #elif GEN_GEN == 4
310 vb[1].BufferAccessType = INSTANCEDATA;
311 vb[1].MaxIndex = 0;
312 #endif
313
314 const unsigned num_dwords = 1 + GENX(VERTEX_BUFFER_STATE_length) * 2;
315 uint32_t *dw = blorp_emitn(batch, GENX(3DSTATE_VERTEX_BUFFERS), num_dwords);
316 if (!dw)
317 return;
318
319 for (unsigned i = 0; i < 2; i++) {
320 GENX(VERTEX_BUFFER_STATE_pack)(batch, dw, &vb[i]);
321 dw += GENX(VERTEX_BUFFER_STATE_length);
322 }
323 }
324
325 static void
blorp_emit_vertex_elements(struct blorp_batch * batch,const struct blorp_params * params)326 blorp_emit_vertex_elements(struct blorp_batch *batch,
327 const struct blorp_params *params)
328 {
329 const unsigned num_varyings =
330 params->wm_prog_data ? params->wm_prog_data->num_varying_inputs : 0;
331 bool need_ndc = batch->blorp->compiler->devinfo->gen <= 5;
332 const unsigned num_elements = 2 + need_ndc + num_varyings;
333
334 struct GENX(VERTEX_ELEMENT_STATE) ve[num_elements];
335 memset(ve, 0, num_elements * sizeof(*ve));
336
337 /* Setup VBO for the rectangle primitive..
338 *
339 * A rectangle primitive (3DPRIM_RECTLIST) consists of only three
340 * vertices. The vertices reside in screen space with DirectX
341 * coordinates (that is, (0, 0) is the upper left corner).
342 *
343 * v2 ------ implied
344 * | |
345 * | |
346 * v1 ----- v0
347 *
348 * Since the VS is disabled, the clipper loads each VUE directly from
349 * the URB. This is controlled by the 3DSTATE_VERTEX_BUFFERS and
350 * 3DSTATE_VERTEX_ELEMENTS packets below. The VUE contents are as follows:
351 * dw0: Reserved, MBZ.
352 * dw1: Render Target Array Index. Below vertex fetcher gets programmed
353 * to assign this with primitive instance identifier which will be
354 * used for layered clears. All other renders have only one instance
355 * and therefore the value will be effectively zero.
356 * dw2: Viewport Index. The HiZ op disables viewport mapping and
357 * scissoring, so set the dword to 0.
358 * dw3: Point Width: The HiZ op does not emit the POINTLIST primitive,
359 * so set the dword to 0.
360 * dw4: Vertex Position X.
361 * dw5: Vertex Position Y.
362 * dw6: Vertex Position Z.
363 * dw7: Vertex Position W.
364 *
365 * dw8: Flat vertex input 0
366 * dw9: Flat vertex input 1
367 * ...
368 * dwn: Flat vertex input n - 8
369 *
370 * For details, see the Sandybridge PRM, Volume 2, Part 1, Section 1.5.1
371 * "Vertex URB Entry (VUE) Formats".
372 *
373 * Only vertex position X and Y are going to be variable, Z is fixed to
374 * zero and W to one. Header words dw0,2,3 are zero. There is no need to
375 * include the fixed values in the vertex buffer. Vertex fetcher can be
376 * instructed to fill vertex elements with constant values of one and zero
377 * instead of reading them from the buffer.
378 * Flat inputs are program constants that are not interpolated. Moreover
379 * their values will be the same between vertices.
380 *
381 * See the vertex element setup below.
382 */
383 unsigned slot = 0;
384
385 ve[slot] = (struct GENX(VERTEX_ELEMENT_STATE)) {
386 .VertexBufferIndex = 1,
387 .Valid = true,
388 .SourceElementFormat = (enum GENX(SURFACE_FORMAT)) ISL_FORMAT_R32G32B32A32_FLOAT,
389 .SourceElementOffset = 0,
390 .Component0Control = VFCOMP_STORE_SRC,
391
392 /* From Gen8 onwards hardware is no more instructed to overwrite
393 * components using an element specifier. Instead one has separate
394 * 3DSTATE_VF_SGVS (System Generated Value Setup) state packet for it.
395 */
396 #if GEN_GEN >= 8
397 .Component1Control = VFCOMP_STORE_0,
398 #elif GEN_GEN >= 5
399 .Component1Control = VFCOMP_STORE_IID,
400 #else
401 .Component1Control = VFCOMP_STORE_0,
402 #endif
403 .Component2Control = VFCOMP_STORE_0,
404 .Component3Control = VFCOMP_STORE_0,
405 #if GEN_GEN <= 5
406 .DestinationElementOffset = slot * 4,
407 #endif
408 };
409 slot++;
410
411 #if GEN_GEN <= 5
412 /* On Iron Lake and earlier, a native device coordinates version of the
413 * position goes right after the normal VUE header and before position.
414 * Since w == 1 for all of our coordinates, this is just a copy of the
415 * position.
416 */
417 ve[slot] = (struct GENX(VERTEX_ELEMENT_STATE)) {
418 .VertexBufferIndex = 0,
419 .Valid = true,
420 .SourceElementFormat = (enum GENX(SURFACE_FORMAT)) ISL_FORMAT_R32G32B32_FLOAT,
421 .SourceElementOffset = 0,
422 .Component0Control = VFCOMP_STORE_SRC,
423 .Component1Control = VFCOMP_STORE_SRC,
424 .Component2Control = VFCOMP_STORE_SRC,
425 .Component3Control = VFCOMP_STORE_1_FP,
426 .DestinationElementOffset = slot * 4,
427 };
428 slot++;
429 #endif
430
431 ve[slot] = (struct GENX(VERTEX_ELEMENT_STATE)) {
432 .VertexBufferIndex = 0,
433 .Valid = true,
434 .SourceElementFormat = (enum GENX(SURFACE_FORMAT)) ISL_FORMAT_R32G32B32_FLOAT,
435 .SourceElementOffset = 0,
436 .Component0Control = VFCOMP_STORE_SRC,
437 .Component1Control = VFCOMP_STORE_SRC,
438 .Component2Control = VFCOMP_STORE_SRC,
439 .Component3Control = VFCOMP_STORE_1_FP,
440 #if GEN_GEN <= 5
441 .DestinationElementOffset = slot * 4,
442 #endif
443 };
444 slot++;
445
446 for (unsigned i = 0; i < num_varyings; ++i) {
447 ve[slot] = (struct GENX(VERTEX_ELEMENT_STATE)) {
448 .VertexBufferIndex = 1,
449 .Valid = true,
450 .SourceElementFormat = (enum GENX(SURFACE_FORMAT)) ISL_FORMAT_R32G32B32A32_FLOAT,
451 .SourceElementOffset = 16 + i * 4 * sizeof(float),
452 .Component0Control = VFCOMP_STORE_SRC,
453 .Component1Control = VFCOMP_STORE_SRC,
454 .Component2Control = VFCOMP_STORE_SRC,
455 .Component3Control = VFCOMP_STORE_SRC,
456 #if GEN_GEN <= 5
457 .DestinationElementOffset = slot * 4,
458 #endif
459 };
460 slot++;
461 }
462
463 const unsigned num_dwords =
464 1 + GENX(VERTEX_ELEMENT_STATE_length) * num_elements;
465 uint32_t *dw = blorp_emitn(batch, GENX(3DSTATE_VERTEX_ELEMENTS), num_dwords);
466 if (!dw)
467 return;
468
469 for (unsigned i = 0; i < num_elements; i++) {
470 GENX(VERTEX_ELEMENT_STATE_pack)(batch, dw, &ve[i]);
471 dw += GENX(VERTEX_ELEMENT_STATE_length);
472 }
473
474 #if GEN_GEN >= 8
475 /* Overwrite Render Target Array Index (2nd dword) in the VUE header with
476 * primitive instance identifier. This is used for layered clears.
477 */
478 blorp_emit(batch, GENX(3DSTATE_VF_SGVS), sgvs) {
479 sgvs.InstanceIDEnable = true;
480 sgvs.InstanceIDComponentNumber = COMP_1;
481 sgvs.InstanceIDElementOffset = 0;
482 }
483
484 for (unsigned i = 0; i < num_elements; i++) {
485 blorp_emit(batch, GENX(3DSTATE_VF_INSTANCING), vf) {
486 vf.VertexElementIndex = i;
487 vf.InstancingEnable = false;
488 }
489 }
490
491 blorp_emit(batch, GENX(3DSTATE_VF_TOPOLOGY), topo) {
492 topo.PrimitiveTopologyType = _3DPRIM_RECTLIST;
493 }
494 #endif
495 }
496
497 /* 3DSTATE_VIEWPORT_STATE_POINTERS */
498 static uint32_t
blorp_emit_cc_viewport(struct blorp_batch * batch,const struct blorp_params * params)499 blorp_emit_cc_viewport(struct blorp_batch *batch,
500 const struct blorp_params *params)
501 {
502 uint32_t cc_vp_offset;
503 blorp_emit_dynamic(batch, GENX(CC_VIEWPORT), vp, 32, &cc_vp_offset) {
504 vp.MinimumDepth = 0.0;
505 vp.MaximumDepth = 1.0;
506 }
507
508 #if GEN_GEN >= 7
509 blorp_emit(batch, GENX(3DSTATE_VIEWPORT_STATE_POINTERS_CC), vsp) {
510 vsp.CCViewportPointer = cc_vp_offset;
511 }
512 #elif GEN_GEN == 6
513 blorp_emit(batch, GENX(3DSTATE_VIEWPORT_STATE_POINTERS), vsp) {
514 vsp.CCViewportStateChange = true;
515 vsp.PointertoCC_VIEWPORT = cc_vp_offset;
516 }
517 #endif
518
519 return cc_vp_offset;
520 }
521
522 static uint32_t
blorp_emit_sampler_state(struct blorp_batch * batch,const struct blorp_params * params)523 blorp_emit_sampler_state(struct blorp_batch *batch,
524 const struct blorp_params *params)
525 {
526 uint32_t offset;
527 blorp_emit_dynamic(batch, GENX(SAMPLER_STATE), sampler, 32, &offset) {
528 sampler.MipModeFilter = MIPFILTER_NONE;
529 sampler.MagModeFilter = MAPFILTER_LINEAR;
530 sampler.MinModeFilter = MAPFILTER_LINEAR;
531 sampler.MinLOD = 0;
532 sampler.MaxLOD = 0;
533 sampler.TCXAddressControlMode = TCM_CLAMP;
534 sampler.TCYAddressControlMode = TCM_CLAMP;
535 sampler.TCZAddressControlMode = TCM_CLAMP;
536 sampler.MaximumAnisotropy = RATIO21;
537 sampler.RAddressMinFilterRoundingEnable = true;
538 sampler.RAddressMagFilterRoundingEnable = true;
539 sampler.VAddressMinFilterRoundingEnable = true;
540 sampler.VAddressMagFilterRoundingEnable = true;
541 sampler.UAddressMinFilterRoundingEnable = true;
542 sampler.UAddressMagFilterRoundingEnable = true;
543 #if GEN_GEN > 6
544 sampler.NonnormalizedCoordinateEnable = true;
545 #endif
546 }
547
548 #if GEN_GEN >= 7
549 blorp_emit(batch, GENX(3DSTATE_SAMPLER_STATE_POINTERS_PS), ssp) {
550 ssp.PointertoPSSamplerState = offset;
551 }
552 #elif GEN_GEN == 6
553 blorp_emit(batch, GENX(3DSTATE_SAMPLER_STATE_POINTERS), ssp) {
554 ssp.VSSamplerStateChange = true;
555 ssp.GSSamplerStateChange = true;
556 ssp.PSSamplerStateChange = true;
557 ssp.PointertoPSSamplerState = offset;
558 }
559 #endif
560
561 return offset;
562 }
563
564 /* What follows is the code for setting up a "pipeline" on Sandy Bridge and
565 * later hardware. This file will be included by i965 for gen4-5 as well, so
566 * this code is guarded by GEN_GEN >= 6.
567 */
568 #if GEN_GEN >= 6
569
570 static void
blorp_emit_vs_config(struct blorp_batch * batch,const struct blorp_params * params)571 blorp_emit_vs_config(struct blorp_batch *batch,
572 const struct blorp_params *params)
573 {
574 struct brw_vs_prog_data *vs_prog_data = params->vs_prog_data;
575
576 blorp_emit(batch, GENX(3DSTATE_VS), vs) {
577 if (vs_prog_data) {
578 vs.Enable = true;
579
580 vs.KernelStartPointer = params->vs_prog_kernel;
581
582 vs.DispatchGRFStartRegisterForURBData =
583 vs_prog_data->base.base.dispatch_grf_start_reg;
584 vs.VertexURBEntryReadLength =
585 vs_prog_data->base.urb_read_length;
586 vs.VertexURBEntryReadOffset = 0;
587
588 vs.MaximumNumberofThreads =
589 batch->blorp->isl_dev->info->max_vs_threads - 1;
590
591 #if GEN_GEN >= 8
592 vs.SIMD8DispatchEnable =
593 vs_prog_data->base.dispatch_mode == DISPATCH_MODE_SIMD8;
594 #endif
595 }
596 }
597 }
598
599 static void
blorp_emit_sf_config(struct blorp_batch * batch,const struct blorp_params * params)600 blorp_emit_sf_config(struct blorp_batch *batch,
601 const struct blorp_params *params)
602 {
603 const struct brw_wm_prog_data *prog_data = params->wm_prog_data;
604
605 /* 3DSTATE_SF
606 *
607 * Disable ViewportTransformEnable (dw2.1)
608 *
609 * From the SandyBridge PRM, Volume 2, Part 1, Section 1.3, "3D
610 * Primitives Overview":
611 * RECTLIST: Viewport Mapping must be DISABLED (as is typical with the
612 * use of screen- space coordinates).
613 *
614 * A solid rectangle must be rendered, so set FrontFaceFillMode (dw2.4:3)
615 * and BackFaceFillMode (dw2.5:6) to SOLID(0).
616 *
617 * From the Sandy Bridge PRM, Volume 2, Part 1, Section
618 * 6.4.1.1 3DSTATE_SF, Field FrontFaceFillMode:
619 * SOLID: Any triangle or rectangle object found to be front-facing
620 * is rendered as a solid object. This setting is required when
621 * (rendering rectangle (RECTLIST) objects.
622 */
623
624 #if GEN_GEN >= 8
625
626 blorp_emit(batch, GENX(3DSTATE_SF), sf);
627
628 blorp_emit(batch, GENX(3DSTATE_RASTER), raster) {
629 raster.CullMode = CULLMODE_NONE;
630 }
631
632 blorp_emit(batch, GENX(3DSTATE_SBE), sbe) {
633 sbe.VertexURBEntryReadOffset = 1;
634 if (prog_data) {
635 sbe.NumberofSFOutputAttributes = prog_data->num_varying_inputs;
636 sbe.VertexURBEntryReadLength = brw_blorp_get_urb_length(prog_data);
637 sbe.ConstantInterpolationEnable = prog_data->flat_inputs;
638 } else {
639 sbe.NumberofSFOutputAttributes = 0;
640 sbe.VertexURBEntryReadLength = 1;
641 }
642 sbe.ForceVertexURBEntryReadLength = true;
643 sbe.ForceVertexURBEntryReadOffset = true;
644
645 #if GEN_GEN >= 9
646 for (unsigned i = 0; i < 32; i++)
647 sbe.AttributeActiveComponentFormat[i] = ACF_XYZW;
648 #endif
649 }
650
651 #elif GEN_GEN >= 7
652
653 blorp_emit(batch, GENX(3DSTATE_SF), sf) {
654 sf.FrontFaceFillMode = FILL_MODE_SOLID;
655 sf.BackFaceFillMode = FILL_MODE_SOLID;
656
657 sf.MultisampleRasterizationMode = params->num_samples > 1 ?
658 MSRASTMODE_ON_PATTERN : MSRASTMODE_OFF_PIXEL;
659
660 #if GEN_GEN == 7
661 sf.DepthBufferSurfaceFormat = params->depth_format;
662 #endif
663 }
664
665 blorp_emit(batch, GENX(3DSTATE_SBE), sbe) {
666 sbe.VertexURBEntryReadOffset = 1;
667 if (prog_data) {
668 sbe.NumberofSFOutputAttributes = prog_data->num_varying_inputs;
669 sbe.VertexURBEntryReadLength = brw_blorp_get_urb_length(prog_data);
670 sbe.ConstantInterpolationEnable = prog_data->flat_inputs;
671 } else {
672 sbe.NumberofSFOutputAttributes = 0;
673 sbe.VertexURBEntryReadLength = 1;
674 }
675 }
676
677 #else /* GEN_GEN <= 6 */
678
679 blorp_emit(batch, GENX(3DSTATE_SF), sf) {
680 sf.FrontFaceFillMode = FILL_MODE_SOLID;
681 sf.BackFaceFillMode = FILL_MODE_SOLID;
682
683 sf.MultisampleRasterizationMode = params->num_samples > 1 ?
684 MSRASTMODE_ON_PATTERN : MSRASTMODE_OFF_PIXEL;
685
686 sf.VertexURBEntryReadOffset = 1;
687 if (prog_data) {
688 sf.NumberofSFOutputAttributes = prog_data->num_varying_inputs;
689 sf.VertexURBEntryReadLength = brw_blorp_get_urb_length(prog_data);
690 sf.ConstantInterpolationEnable = prog_data->flat_inputs;
691 } else {
692 sf.NumberofSFOutputAttributes = 0;
693 sf.VertexURBEntryReadLength = 1;
694 }
695 }
696
697 #endif /* GEN_GEN */
698 }
699
700 static void
blorp_emit_ps_config(struct blorp_batch * batch,const struct blorp_params * params)701 blorp_emit_ps_config(struct blorp_batch *batch,
702 const struct blorp_params *params)
703 {
704 const struct brw_wm_prog_data *prog_data = params->wm_prog_data;
705
706 /* Even when thread dispatch is disabled, max threads (dw5.25:31) must be
707 * nonzero to prevent the GPU from hanging. While the documentation doesn't
708 * mention this explicitly, it notes that the valid range for the field is
709 * [1,39] = [2,40] threads, which excludes zero.
710 *
711 * To be safe (and to minimize extraneous code) we go ahead and fully
712 * configure the WM state whether or not there is a WM program.
713 */
714
715 #if GEN_GEN >= 8
716
717 blorp_emit(batch, GENX(3DSTATE_WM), wm);
718
719 blorp_emit(batch, GENX(3DSTATE_PS), ps) {
720 if (params->src.enabled) {
721 ps.SamplerCount = 1; /* Up to 4 samplers */
722 ps.BindingTableEntryCount = 2;
723 } else {
724 ps.BindingTableEntryCount = 1;
725 }
726
727 if (prog_data) {
728 ps.DispatchGRFStartRegisterForConstantSetupData0 =
729 prog_data->base.dispatch_grf_start_reg;
730 ps.DispatchGRFStartRegisterForConstantSetupData2 =
731 prog_data->dispatch_grf_start_reg_2;
732
733 ps._8PixelDispatchEnable = prog_data->dispatch_8;
734 ps._16PixelDispatchEnable = prog_data->dispatch_16;
735
736 ps.KernelStartPointer0 = params->wm_prog_kernel;
737 ps.KernelStartPointer2 =
738 params->wm_prog_kernel + prog_data->prog_offset_2;
739 }
740
741 /* 3DSTATE_PS expects the number of threads per PSD, which is always 64;
742 * it implicitly scales for different GT levels (which have some # of
743 * PSDs).
744 *
745 * In Gen8 the format is U8-2 whereas in Gen9 it is U8-1.
746 */
747 if (GEN_GEN >= 9)
748 ps.MaximumNumberofThreadsPerPSD = 64 - 1;
749 else
750 ps.MaximumNumberofThreadsPerPSD = 64 - 2;
751
752 switch (params->fast_clear_op) {
753 case BLORP_FAST_CLEAR_OP_NONE:
754 break;
755 #if GEN_GEN >= 9
756 case BLORP_FAST_CLEAR_OP_RESOLVE_PARTIAL:
757 ps.RenderTargetResolveType = RESOLVE_PARTIAL;
758 break;
759 case BLORP_FAST_CLEAR_OP_RESOLVE_FULL:
760 ps.RenderTargetResolveType = RESOLVE_FULL;
761 break;
762 #else
763 case BLORP_FAST_CLEAR_OP_RESOLVE_FULL:
764 ps.RenderTargetResolveEnable = true;
765 break;
766 #endif
767 case BLORP_FAST_CLEAR_OP_CLEAR:
768 ps.RenderTargetFastClearEnable = true;
769 break;
770 default:
771 unreachable("Invalid fast clear op");
772 }
773 }
774
775 blorp_emit(batch, GENX(3DSTATE_PS_EXTRA), psx) {
776 if (prog_data) {
777 psx.PixelShaderValid = true;
778 psx.AttributeEnable = prog_data->num_varying_inputs > 0;
779 psx.PixelShaderIsPerSample = prog_data->persample_dispatch;
780 }
781
782 if (params->src.enabled)
783 psx.PixelShaderKillsPixel = true;
784 }
785
786 #elif GEN_GEN >= 7
787
788 blorp_emit(batch, GENX(3DSTATE_WM), wm) {
789 switch (params->hiz_op) {
790 case BLORP_HIZ_OP_DEPTH_CLEAR:
791 wm.DepthBufferClear = true;
792 break;
793 case BLORP_HIZ_OP_DEPTH_RESOLVE:
794 wm.DepthBufferResolveEnable = true;
795 break;
796 case BLORP_HIZ_OP_HIZ_RESOLVE:
797 wm.HierarchicalDepthBufferResolveEnable = true;
798 break;
799 case BLORP_HIZ_OP_NONE:
800 break;
801 default:
802 unreachable("not reached");
803 }
804
805 if (prog_data)
806 wm.ThreadDispatchEnable = true;
807
808 if (params->src.enabled)
809 wm.PixelShaderKillsPixel = true;
810
811 if (params->num_samples > 1) {
812 wm.MultisampleRasterizationMode = MSRASTMODE_ON_PATTERN;
813 wm.MultisampleDispatchMode =
814 (prog_data && prog_data->persample_dispatch) ?
815 MSDISPMODE_PERSAMPLE : MSDISPMODE_PERPIXEL;
816 } else {
817 wm.MultisampleRasterizationMode = MSRASTMODE_OFF_PIXEL;
818 wm.MultisampleDispatchMode = MSDISPMODE_PERSAMPLE;
819 }
820 }
821
822 blorp_emit(batch, GENX(3DSTATE_PS), ps) {
823 ps.MaximumNumberofThreads =
824 batch->blorp->isl_dev->info->max_wm_threads - 1;
825
826 #if GEN_IS_HASWELL
827 ps.SampleMask = 1;
828 #endif
829
830 if (prog_data) {
831 ps.DispatchGRFStartRegisterForConstantSetupData0 =
832 prog_data->base.dispatch_grf_start_reg;
833 ps.DispatchGRFStartRegisterForConstantSetupData2 =
834 prog_data->dispatch_grf_start_reg_2;
835
836 ps.KernelStartPointer0 = params->wm_prog_kernel;
837 ps.KernelStartPointer2 =
838 params->wm_prog_kernel + prog_data->prog_offset_2;
839
840 ps._8PixelDispatchEnable = prog_data->dispatch_8;
841 ps._16PixelDispatchEnable = prog_data->dispatch_16;
842
843 ps.AttributeEnable = prog_data->num_varying_inputs > 0;
844 } else {
845 /* Gen7 hardware gets angry if we don't enable at least one dispatch
846 * mode, so just enable 16-pixel dispatch if we don't have a program.
847 */
848 ps._16PixelDispatchEnable = true;
849 }
850
851 if (params->src.enabled)
852 ps.SamplerCount = 1; /* Up to 4 samplers */
853
854 switch (params->fast_clear_op) {
855 case BLORP_FAST_CLEAR_OP_NONE:
856 break;
857 case BLORP_FAST_CLEAR_OP_RESOLVE_FULL:
858 ps.RenderTargetResolveEnable = true;
859 break;
860 case BLORP_FAST_CLEAR_OP_CLEAR:
861 ps.RenderTargetFastClearEnable = true;
862 break;
863 default:
864 unreachable("Invalid fast clear op");
865 }
866 }
867
868 #else /* GEN_GEN <= 6 */
869
870 blorp_emit(batch, GENX(3DSTATE_WM), wm) {
871 wm.MaximumNumberofThreads =
872 batch->blorp->isl_dev->info->max_wm_threads - 1;
873
874 switch (params->hiz_op) {
875 case BLORP_HIZ_OP_DEPTH_CLEAR:
876 wm.DepthBufferClear = true;
877 break;
878 case BLORP_HIZ_OP_DEPTH_RESOLVE:
879 wm.DepthBufferResolveEnable = true;
880 break;
881 case BLORP_HIZ_OP_HIZ_RESOLVE:
882 wm.HierarchicalDepthBufferResolveEnable = true;
883 break;
884 case BLORP_HIZ_OP_NONE:
885 break;
886 default:
887 unreachable("not reached");
888 }
889
890 if (prog_data) {
891 wm.ThreadDispatchEnable = true;
892
893 wm.DispatchGRFStartRegisterForConstantSetupData0 =
894 prog_data->base.dispatch_grf_start_reg;
895 wm.DispatchGRFStartRegisterForConstantSetupData2 =
896 prog_data->dispatch_grf_start_reg_2;
897
898 wm.KernelStartPointer0 = params->wm_prog_kernel;
899 wm.KernelStartPointer2 =
900 params->wm_prog_kernel + prog_data->prog_offset_2;
901
902 wm._8PixelDispatchEnable = prog_data->dispatch_8;
903 wm._16PixelDispatchEnable = prog_data->dispatch_16;
904
905 wm.NumberofSFOutputAttributes = prog_data->num_varying_inputs;
906 }
907
908 if (params->src.enabled) {
909 wm.SamplerCount = 1; /* Up to 4 samplers */
910 wm.PixelShaderKillsPixel = true; /* TODO: temporarily smash on */
911 }
912
913 if (params->num_samples > 1) {
914 wm.MultisampleRasterizationMode = MSRASTMODE_ON_PATTERN;
915 wm.MultisampleDispatchMode =
916 (prog_data && prog_data->persample_dispatch) ?
917 MSDISPMODE_PERSAMPLE : MSDISPMODE_PERPIXEL;
918 } else {
919 wm.MultisampleRasterizationMode = MSRASTMODE_OFF_PIXEL;
920 wm.MultisampleDispatchMode = MSDISPMODE_PERSAMPLE;
921 }
922 }
923
924 #endif /* GEN_GEN */
925 }
926
927 static uint32_t
blorp_emit_blend_state(struct blorp_batch * batch,const struct blorp_params * params)928 blorp_emit_blend_state(struct blorp_batch *batch,
929 const struct blorp_params *params)
930 {
931 struct GENX(BLEND_STATE) blend;
932 memset(&blend, 0, sizeof(blend));
933
934 uint32_t offset;
935 int size = GENX(BLEND_STATE_length) * 4;
936 size += GENX(BLEND_STATE_ENTRY_length) * 4 * params->num_draw_buffers;
937 uint32_t *state = blorp_alloc_dynamic_state(batch, size, 64, &offset);
938 uint32_t *pos = state;
939
940 GENX(BLEND_STATE_pack)(NULL, pos, &blend);
941 pos += GENX(BLEND_STATE_length);
942
943 for (unsigned i = 0; i < params->num_draw_buffers; ++i) {
944 struct GENX(BLEND_STATE_ENTRY) entry = {
945 .PreBlendColorClampEnable = true,
946 .PostBlendColorClampEnable = true,
947 .ColorClampRange = COLORCLAMP_RTFORMAT,
948
949 .WriteDisableRed = params->color_write_disable[0],
950 .WriteDisableGreen = params->color_write_disable[1],
951 .WriteDisableBlue = params->color_write_disable[2],
952 .WriteDisableAlpha = params->color_write_disable[3],
953 };
954 GENX(BLEND_STATE_ENTRY_pack)(NULL, pos, &entry);
955 pos += GENX(BLEND_STATE_ENTRY_length);
956 }
957
958 blorp_flush_range(batch, state, size);
959
960 #if GEN_GEN >= 7
961 blorp_emit(batch, GENX(3DSTATE_BLEND_STATE_POINTERS), sp) {
962 sp.BlendStatePointer = offset;
963 #if GEN_GEN >= 8
964 sp.BlendStatePointerValid = true;
965 #endif
966 }
967 #endif
968
969 #if GEN_GEN >= 8
970 blorp_emit(batch, GENX(3DSTATE_PS_BLEND), ps_blend) {
971 ps_blend.HasWriteableRT = true;
972 }
973 #endif
974
975 return offset;
976 }
977
978 static uint32_t
blorp_emit_color_calc_state(struct blorp_batch * batch,const struct blorp_params * params)979 blorp_emit_color_calc_state(struct blorp_batch *batch,
980 const struct blorp_params *params)
981 {
982 uint32_t offset;
983 blorp_emit_dynamic(batch, GENX(COLOR_CALC_STATE), cc, 64, &offset) {
984 #if GEN_GEN <= 8
985 cc.StencilReferenceValue = params->stencil_ref;
986 #endif
987 }
988
989 #if GEN_GEN >= 7
990 blorp_emit(batch, GENX(3DSTATE_CC_STATE_POINTERS), sp) {
991 sp.ColorCalcStatePointer = offset;
992 #if GEN_GEN >= 8
993 sp.ColorCalcStatePointerValid = true;
994 #endif
995 }
996 #endif
997
998 return offset;
999 }
1000
1001 static uint32_t
blorp_emit_depth_stencil_state(struct blorp_batch * batch,const struct blorp_params * params)1002 blorp_emit_depth_stencil_state(struct blorp_batch *batch,
1003 const struct blorp_params *params)
1004 {
1005 #if GEN_GEN >= 8
1006 struct GENX(3DSTATE_WM_DEPTH_STENCIL) ds = {
1007 GENX(3DSTATE_WM_DEPTH_STENCIL_header),
1008 };
1009 #else
1010 struct GENX(DEPTH_STENCIL_STATE) ds = { 0 };
1011 #endif
1012
1013 if (params->depth.enabled) {
1014 ds.DepthBufferWriteEnable = true;
1015
1016 switch (params->hiz_op) {
1017 case BLORP_HIZ_OP_NONE:
1018 ds.DepthTestEnable = true;
1019 ds.DepthTestFunction = COMPAREFUNCTION_ALWAYS;
1020 break;
1021
1022 /* See the following sections of the Sandy Bridge PRM, Volume 2, Part1:
1023 * - 7.5.3.1 Depth Buffer Clear
1024 * - 7.5.3.2 Depth Buffer Resolve
1025 * - 7.5.3.3 Hierarchical Depth Buffer Resolve
1026 */
1027 case BLORP_HIZ_OP_DEPTH_RESOLVE:
1028 ds.DepthTestEnable = true;
1029 ds.DepthTestFunction = COMPAREFUNCTION_NEVER;
1030 break;
1031
1032 case BLORP_HIZ_OP_DEPTH_CLEAR:
1033 case BLORP_HIZ_OP_HIZ_RESOLVE:
1034 ds.DepthTestEnable = false;
1035 break;
1036 }
1037 }
1038
1039 if (params->stencil.enabled) {
1040 ds.StencilBufferWriteEnable = true;
1041 ds.StencilTestEnable = true;
1042 ds.DoubleSidedStencilEnable = false;
1043
1044 ds.StencilTestFunction = COMPAREFUNCTION_ALWAYS;
1045 ds.StencilPassDepthPassOp = STENCILOP_REPLACE;
1046
1047 ds.StencilWriteMask = params->stencil_mask;
1048 #if GEN_GEN >= 9
1049 ds.StencilReferenceValue = params->stencil_ref;
1050 #endif
1051 }
1052
1053 #if GEN_GEN >= 8
1054 uint32_t offset = 0;
1055 uint32_t *dw = blorp_emit_dwords(batch,
1056 GENX(3DSTATE_WM_DEPTH_STENCIL_length));
1057 if (!dw)
1058 return 0;
1059
1060 GENX(3DSTATE_WM_DEPTH_STENCIL_pack)(NULL, dw, &ds);
1061 #else
1062 uint32_t offset;
1063 void *state = blorp_alloc_dynamic_state(batch,
1064 GENX(DEPTH_STENCIL_STATE_length) * 4,
1065 64, &offset);
1066 GENX(DEPTH_STENCIL_STATE_pack)(NULL, state, &ds);
1067 blorp_flush_range(batch, state, GENX(DEPTH_STENCIL_STATE_length) * 4);
1068 #endif
1069
1070 #if GEN_GEN == 7
1071 blorp_emit(batch, GENX(3DSTATE_DEPTH_STENCIL_STATE_POINTERS), sp) {
1072 sp.PointertoDEPTH_STENCIL_STATE = offset;
1073 }
1074 #endif
1075
1076 return offset;
1077 }
1078
1079 static void
blorp_emit_3dstate_multisample(struct blorp_batch * batch,const struct blorp_params * params)1080 blorp_emit_3dstate_multisample(struct blorp_batch *batch,
1081 const struct blorp_params *params)
1082 {
1083 blorp_emit(batch, GENX(3DSTATE_MULTISAMPLE), ms) {
1084 ms.NumberofMultisamples = __builtin_ffs(params->num_samples) - 1;
1085
1086 #if GEN_GEN >= 8
1087 /* The PRM says that this bit is valid only for DX9:
1088 *
1089 * SW can choose to set this bit only for DX9 API. DX10/OGL API's
1090 * should not have any effect by setting or not setting this bit.
1091 */
1092 ms.PixelPositionOffsetEnable = false;
1093 #elif GEN_GEN >= 7
1094
1095 switch (params->num_samples) {
1096 case 1:
1097 GEN_SAMPLE_POS_1X(ms.Sample);
1098 break;
1099 case 2:
1100 GEN_SAMPLE_POS_2X(ms.Sample);
1101 break;
1102 case 4:
1103 GEN_SAMPLE_POS_4X(ms.Sample);
1104 break;
1105 case 8:
1106 GEN_SAMPLE_POS_8X(ms.Sample);
1107 break;
1108 default:
1109 break;
1110 }
1111 #else
1112 GEN_SAMPLE_POS_4X(ms.Sample);
1113 #endif
1114 ms.PixelLocation = CENTER;
1115 }
1116 }
1117
1118 static void
blorp_emit_pipeline(struct blorp_batch * batch,const struct blorp_params * params)1119 blorp_emit_pipeline(struct blorp_batch *batch,
1120 const struct blorp_params *params)
1121 {
1122 uint32_t blend_state_offset = 0;
1123 uint32_t color_calc_state_offset;
1124 uint32_t depth_stencil_state_offset;
1125
1126 emit_urb_config(batch, params);
1127
1128 if (params->wm_prog_data) {
1129 blend_state_offset = blorp_emit_blend_state(batch, params);
1130 }
1131 color_calc_state_offset = blorp_emit_color_calc_state(batch, params);
1132 depth_stencil_state_offset = blorp_emit_depth_stencil_state(batch, params);
1133
1134 #if GEN_GEN == 6
1135 /* 3DSTATE_CC_STATE_POINTERS
1136 *
1137 * The pointer offsets are relative to
1138 * CMD_STATE_BASE_ADDRESS.DynamicStateBaseAddress.
1139 *
1140 * The HiZ op doesn't use BLEND_STATE or COLOR_CALC_STATE.
1141 *
1142 * The dynamic state emit helpers emit their own STATE_POINTERS packets on
1143 * gen7+. However, on gen6 and earlier, they're all lumpped together in
1144 * one CC_STATE_POINTERS packet so we have to emit that here.
1145 */
1146 blorp_emit(batch, GENX(3DSTATE_CC_STATE_POINTERS), cc) {
1147 cc.BLEND_STATEChange = true;
1148 cc.ColorCalcStatePointerValid = true;
1149 cc.DEPTH_STENCIL_STATEChange = true;
1150 cc.PointertoBLEND_STATE = blend_state_offset;
1151 cc.ColorCalcStatePointer = color_calc_state_offset;
1152 cc.PointertoDEPTH_STENCIL_STATE = depth_stencil_state_offset;
1153 }
1154 #else
1155 (void)blend_state_offset;
1156 (void)color_calc_state_offset;
1157 (void)depth_stencil_state_offset;
1158 #endif
1159
1160 blorp_emit(batch, GENX(3DSTATE_CONSTANT_VS), vs);
1161 #if GEN_GEN >= 7
1162 blorp_emit(batch, GENX(3DSTATE_CONSTANT_HS), hs);
1163 blorp_emit(batch, GENX(3DSTATE_CONSTANT_DS), DS);
1164 #endif
1165 blorp_emit(batch, GENX(3DSTATE_CONSTANT_GS), gs);
1166 blorp_emit(batch, GENX(3DSTATE_CONSTANT_PS), ps);
1167
1168 if (params->src.enabled)
1169 blorp_emit_sampler_state(batch, params);
1170
1171 blorp_emit_3dstate_multisample(batch, params);
1172
1173 blorp_emit(batch, GENX(3DSTATE_SAMPLE_MASK), mask) {
1174 mask.SampleMask = (1 << params->num_samples) - 1;
1175 }
1176
1177 /* From the BSpec, 3D Pipeline > Geometry > Vertex Shader > State,
1178 * 3DSTATE_VS, Dword 5.0 "VS Function Enable":
1179 *
1180 * [DevSNB] A pipeline flush must be programmed prior to a
1181 * 3DSTATE_VS command that causes the VS Function Enable to
1182 * toggle. Pipeline flush can be executed by sending a PIPE_CONTROL
1183 * command with CS stall bit set and a post sync operation.
1184 *
1185 * We've already done one at the start of the BLORP operation.
1186 */
1187 blorp_emit_vs_config(batch, params);
1188 #if GEN_GEN >= 7
1189 blorp_emit(batch, GENX(3DSTATE_HS), hs);
1190 blorp_emit(batch, GENX(3DSTATE_TE), te);
1191 blorp_emit(batch, GENX(3DSTATE_DS), DS);
1192 blorp_emit(batch, GENX(3DSTATE_STREAMOUT), so);
1193 #endif
1194 blorp_emit(batch, GENX(3DSTATE_GS), gs);
1195
1196 blorp_emit(batch, GENX(3DSTATE_CLIP), clip) {
1197 clip.PerspectiveDivideDisable = true;
1198 }
1199
1200 blorp_emit_sf_config(batch, params);
1201 blorp_emit_ps_config(batch, params);
1202
1203 blorp_emit_cc_viewport(batch, params);
1204 }
1205
1206 /******** This is the end of the pipeline setup code ********/
1207
1208 #endif /* GEN_GEN >= 6 */
1209
1210 #if GEN_GEN >= 7 && GEN_GEN <= 10
1211 static void
blorp_emit_memcpy(struct blorp_batch * batch,struct blorp_address dst,struct blorp_address src,uint32_t size)1212 blorp_emit_memcpy(struct blorp_batch *batch,
1213 struct blorp_address dst,
1214 struct blorp_address src,
1215 uint32_t size)
1216 {
1217 assert(size % 4 == 0);
1218
1219 for (unsigned dw = 0; dw < size; dw += 4) {
1220 #if GEN_GEN >= 8
1221 blorp_emit(batch, GENX(MI_COPY_MEM_MEM), cp) {
1222 cp.DestinationMemoryAddress = dst;
1223 cp.SourceMemoryAddress = src;
1224 }
1225 #else
1226 /* IVB does not have a general purpose register for command streamer
1227 * commands. Therefore, we use an alternate temporary register.
1228 */
1229 #define BLORP_TEMP_REG 0x2440 /* GEN7_3DPRIM_BASE_VERTEX */
1230 blorp_emit(batch, GENX(MI_LOAD_REGISTER_MEM), load) {
1231 load.RegisterAddress = BLORP_TEMP_REG;
1232 load.MemoryAddress = src;
1233 }
1234 blorp_emit(batch, GENX(MI_STORE_REGISTER_MEM), store) {
1235 store.RegisterAddress = BLORP_TEMP_REG;
1236 store.MemoryAddress = dst;
1237 }
1238 #undef BLORP_TEMP_REG
1239 #endif
1240 dst.offset += 4;
1241 src.offset += 4;
1242 }
1243 }
1244 #endif
1245
1246 static void
blorp_emit_surface_state(struct blorp_batch * batch,const struct brw_blorp_surface_info * surface,void * state,uint32_t state_offset,const bool color_write_disables[4],bool is_render_target)1247 blorp_emit_surface_state(struct blorp_batch *batch,
1248 const struct brw_blorp_surface_info *surface,
1249 void *state, uint32_t state_offset,
1250 const bool color_write_disables[4],
1251 bool is_render_target)
1252 {
1253 const struct isl_device *isl_dev = batch->blorp->isl_dev;
1254 struct isl_surf surf = surface->surf;
1255
1256 if (surf.dim == ISL_SURF_DIM_1D &&
1257 surf.dim_layout == ISL_DIM_LAYOUT_GEN4_2D) {
1258 assert(surf.logical_level0_px.height == 1);
1259 surf.dim = ISL_SURF_DIM_2D;
1260 }
1261
1262 /* Blorp doesn't support HiZ in any of the blit or slow-clear paths */
1263 enum isl_aux_usage aux_usage = surface->aux_usage;
1264 if (aux_usage == ISL_AUX_USAGE_HIZ)
1265 aux_usage = ISL_AUX_USAGE_NONE;
1266
1267 isl_channel_mask_t write_disable_mask = 0;
1268 if (is_render_target && GEN_GEN <= 5) {
1269 if (color_write_disables[0])
1270 write_disable_mask |= ISL_CHANNEL_RED_BIT;
1271 if (color_write_disables[1])
1272 write_disable_mask |= ISL_CHANNEL_GREEN_BIT;
1273 if (color_write_disables[2])
1274 write_disable_mask |= ISL_CHANNEL_BLUE_BIT;
1275 if (color_write_disables[3])
1276 write_disable_mask |= ISL_CHANNEL_ALPHA_BIT;
1277 }
1278
1279 isl_surf_fill_state(batch->blorp->isl_dev, state,
1280 .surf = &surf, .view = &surface->view,
1281 .aux_surf = &surface->aux_surf, .aux_usage = aux_usage,
1282 .mocs = surface->addr.mocs,
1283 .clear_color = surface->clear_color,
1284 .write_disables = write_disable_mask);
1285
1286 blorp_surface_reloc(batch, state_offset + isl_dev->ss.addr_offset,
1287 surface->addr, 0);
1288
1289 if (aux_usage != ISL_AUX_USAGE_NONE) {
1290 /* On gen7 and prior, the bottom 12 bits of the MCS base address are
1291 * used to store other information. This should be ok, however, because
1292 * surface buffer addresses are always 4K page alinged.
1293 */
1294 assert((surface->aux_addr.offset & 0xfff) == 0);
1295 uint32_t *aux_addr = state + isl_dev->ss.aux_addr_offset;
1296 blorp_surface_reloc(batch, state_offset + isl_dev->ss.aux_addr_offset,
1297 surface->aux_addr, *aux_addr);
1298 }
1299
1300 blorp_flush_range(batch, state, GENX(RENDER_SURFACE_STATE_length) * 4);
1301
1302 if (surface->clear_color_addr.buffer) {
1303 #if GEN_GEN > 10
1304 unreachable("Implement indirect clear support on gen11+");
1305 #elif GEN_GEN >= 7 && GEN_GEN <= 10
1306 struct blorp_address dst_addr = blorp_get_surface_base_address(batch);
1307 dst_addr.offset += state_offset + isl_dev->ss.clear_value_offset;
1308 blorp_emit_memcpy(batch, dst_addr, surface->clear_color_addr,
1309 isl_dev->ss.clear_value_size);
1310 #else
1311 unreachable("Fast clears are only supported on gen7+");
1312 #endif
1313 }
1314 }
1315
1316 static void
blorp_emit_null_surface_state(struct blorp_batch * batch,const struct brw_blorp_surface_info * surface,uint32_t * state)1317 blorp_emit_null_surface_state(struct blorp_batch *batch,
1318 const struct brw_blorp_surface_info *surface,
1319 uint32_t *state)
1320 {
1321 struct GENX(RENDER_SURFACE_STATE) ss = {
1322 .SurfaceType = SURFTYPE_NULL,
1323 .SurfaceFormat = (enum GENX(SURFACE_FORMAT)) ISL_FORMAT_R8G8B8A8_UNORM,
1324 .Width = surface->surf.logical_level0_px.width - 1,
1325 .Height = surface->surf.logical_level0_px.height - 1,
1326 .MIPCountLOD = surface->view.base_level,
1327 .MinimumArrayElement = surface->view.base_array_layer,
1328 .Depth = surface->view.array_len - 1,
1329 .RenderTargetViewExtent = surface->view.array_len - 1,
1330 #if GEN_GEN >= 6
1331 .NumberofMultisamples = ffs(surface->surf.samples) - 1,
1332 #endif
1333
1334 #if GEN_GEN >= 7
1335 .SurfaceArray = surface->surf.dim != ISL_SURF_DIM_3D,
1336 #endif
1337
1338 #if GEN_GEN >= 8
1339 .TileMode = YMAJOR,
1340 #else
1341 .TiledSurface = true,
1342 #endif
1343 };
1344
1345 GENX(RENDER_SURFACE_STATE_pack)(NULL, state, &ss);
1346
1347 blorp_flush_range(batch, state, GENX(RENDER_SURFACE_STATE_length) * 4);
1348 }
1349
1350 static void
blorp_emit_surface_states(struct blorp_batch * batch,const struct blorp_params * params)1351 blorp_emit_surface_states(struct blorp_batch *batch,
1352 const struct blorp_params *params)
1353 {
1354 const struct isl_device *isl_dev = batch->blorp->isl_dev;
1355 uint32_t bind_offset, surface_offsets[2];
1356 void *surface_maps[2];
1357
1358 MAYBE_UNUSED bool has_indirect_clear_color = false;
1359 if (params->use_pre_baked_binding_table) {
1360 bind_offset = params->pre_baked_binding_table_offset;
1361 } else {
1362 unsigned num_surfaces = 1 + params->src.enabled;
1363 blorp_alloc_binding_table(batch, num_surfaces,
1364 isl_dev->ss.size, isl_dev->ss.align,
1365 &bind_offset, surface_offsets, surface_maps);
1366
1367 if (params->dst.enabled) {
1368 blorp_emit_surface_state(batch, ¶ms->dst,
1369 surface_maps[BLORP_RENDERBUFFER_BT_INDEX],
1370 surface_offsets[BLORP_RENDERBUFFER_BT_INDEX],
1371 params->color_write_disable, true);
1372 if (params->dst.clear_color_addr.buffer != NULL)
1373 has_indirect_clear_color = true;
1374 } else {
1375 assert(params->depth.enabled || params->stencil.enabled);
1376 const struct brw_blorp_surface_info *surface =
1377 params->depth.enabled ? ¶ms->depth : ¶ms->stencil;
1378 blorp_emit_null_surface_state(batch, surface,
1379 surface_maps[BLORP_RENDERBUFFER_BT_INDEX]);
1380 }
1381
1382 if (params->src.enabled) {
1383 blorp_emit_surface_state(batch, ¶ms->src,
1384 surface_maps[BLORP_TEXTURE_BT_INDEX],
1385 surface_offsets[BLORP_TEXTURE_BT_INDEX],
1386 NULL, false);
1387 if (params->src.clear_color_addr.buffer != NULL)
1388 has_indirect_clear_color = true;
1389 }
1390 }
1391
1392 #if GEN_GEN >= 7 && GEN_GEN <= 10
1393 if (has_indirect_clear_color) {
1394 /* Updating a surface state object may require that the state cache be
1395 * invalidated. From the SKL PRM, Shared Functions -> State -> State
1396 * Caching:
1397 *
1398 * Whenever the RENDER_SURFACE_STATE object in memory pointed to by
1399 * the Binding Table Pointer (BTP) and Binding Table Index (BTI) is
1400 * modified [...], the L1 state cache must be invalidated to ensure
1401 * the new surface or sampler state is fetched from system memory.
1402 */
1403 blorp_emit(batch, GENX(PIPE_CONTROL), pipe) {
1404 pipe.StateCacheInvalidationEnable = true;
1405 }
1406 }
1407 #endif
1408
1409 #if GEN_GEN >= 7
1410 blorp_emit(batch, GENX(3DSTATE_BINDING_TABLE_POINTERS_VS), bt);
1411 blorp_emit(batch, GENX(3DSTATE_BINDING_TABLE_POINTERS_HS), bt);
1412 blorp_emit(batch, GENX(3DSTATE_BINDING_TABLE_POINTERS_DS), bt);
1413 blorp_emit(batch, GENX(3DSTATE_BINDING_TABLE_POINTERS_GS), bt);
1414
1415 blorp_emit(batch, GENX(3DSTATE_BINDING_TABLE_POINTERS_PS), bt) {
1416 bt.PointertoPSBindingTable = bind_offset;
1417 }
1418 #elif GEN_GEN >= 6
1419 blorp_emit(batch, GENX(3DSTATE_BINDING_TABLE_POINTERS), bt) {
1420 bt.PSBindingTableChange = true;
1421 bt.PointertoPSBindingTable = bind_offset;
1422 }
1423 #else
1424 blorp_emit(batch, GENX(3DSTATE_BINDING_TABLE_POINTERS), bt) {
1425 bt.PointertoPSBindingTable = bind_offset;
1426 }
1427 #endif
1428 }
1429
1430 static void
blorp_emit_depth_stencil_config(struct blorp_batch * batch,const struct blorp_params * params)1431 blorp_emit_depth_stencil_config(struct blorp_batch *batch,
1432 const struct blorp_params *params)
1433 {
1434 const struct isl_device *isl_dev = batch->blorp->isl_dev;
1435
1436 uint32_t *dw = blorp_emit_dwords(batch, isl_dev->ds.size / 4);
1437 if (dw == NULL)
1438 return;
1439
1440 struct isl_depth_stencil_hiz_emit_info info = { };
1441
1442 if (params->depth.enabled) {
1443 info.view = ¶ms->depth.view;
1444 info.mocs = params->depth.addr.mocs;
1445 } else if (params->stencil.enabled) {
1446 info.view = ¶ms->stencil.view;
1447 info.mocs = params->stencil.addr.mocs;
1448 }
1449
1450 if (params->depth.enabled) {
1451 info.depth_surf = ¶ms->depth.surf;
1452
1453 info.depth_address =
1454 blorp_emit_reloc(batch, dw + isl_dev->ds.depth_offset / 4,
1455 params->depth.addr, 0);
1456
1457 info.hiz_usage = params->depth.aux_usage;
1458 if (info.hiz_usage == ISL_AUX_USAGE_HIZ) {
1459 info.hiz_surf = ¶ms->depth.aux_surf;
1460
1461 struct blorp_address hiz_address = params->depth.aux_addr;
1462 #if GEN_GEN == 6
1463 /* Sandy bridge hardware does not technically support mipmapped HiZ.
1464 * However, we have a special layout that allows us to make it work
1465 * anyway by manually offsetting to the specified miplevel.
1466 */
1467 assert(info.hiz_surf->dim_layout == ISL_DIM_LAYOUT_GEN6_STENCIL_HIZ);
1468 uint32_t offset_B;
1469 isl_surf_get_image_offset_B_tile_sa(info.hiz_surf,
1470 info.view->base_level, 0, 0,
1471 &offset_B, NULL, NULL);
1472 hiz_address.offset += offset_B;
1473 #endif
1474
1475 info.hiz_address =
1476 blorp_emit_reloc(batch, dw + isl_dev->ds.hiz_offset / 4,
1477 hiz_address, 0);
1478
1479 info.depth_clear_value = params->depth.clear_color.f32[0];
1480 }
1481 }
1482
1483 if (params->stencil.enabled) {
1484 info.stencil_surf = ¶ms->stencil.surf;
1485
1486 struct blorp_address stencil_address = params->stencil.addr;
1487 #if GEN_GEN == 6
1488 /* Sandy bridge hardware does not technically support mipmapped stencil.
1489 * However, we have a special layout that allows us to make it work
1490 * anyway by manually offsetting to the specified miplevel.
1491 */
1492 assert(info.stencil_surf->dim_layout == ISL_DIM_LAYOUT_GEN6_STENCIL_HIZ);
1493 uint32_t offset_B;
1494 isl_surf_get_image_offset_B_tile_sa(info.stencil_surf,
1495 info.view->base_level, 0, 0,
1496 &offset_B, NULL, NULL);
1497 stencil_address.offset += offset_B;
1498 #endif
1499
1500 info.stencil_address =
1501 blorp_emit_reloc(batch, dw + isl_dev->ds.stencil_offset / 4,
1502 stencil_address, 0);
1503 }
1504
1505 isl_emit_depth_stencil_hiz_s(isl_dev, dw, &info);
1506 }
1507
1508 #if GEN_GEN >= 8
1509 /* Emits the Optimized HiZ sequence specified in the BDW+ PRMs. The
1510 * depth/stencil buffer extents are ignored to handle APIs which perform
1511 * clearing operations without such information.
1512 * */
1513 static void
blorp_emit_gen8_hiz_op(struct blorp_batch * batch,const struct blorp_params * params)1514 blorp_emit_gen8_hiz_op(struct blorp_batch *batch,
1515 const struct blorp_params *params)
1516 {
1517 /* We should be performing an operation on a depth or stencil buffer.
1518 */
1519 assert(params->depth.enabled || params->stencil.enabled);
1520
1521 /* The stencil buffer should only be enabled if a fast clear operation is
1522 * requested.
1523 */
1524 if (params->stencil.enabled)
1525 assert(params->hiz_op == BLORP_HIZ_OP_DEPTH_CLEAR);
1526
1527 /* From the BDW PRM Volume 2, 3DSTATE_WM_HZ_OP:
1528 *
1529 * 3DSTATE_MULTISAMPLE packet must be used prior to this packet to change
1530 * the Number of Multisamples. This packet must not be used to change
1531 * Number of Multisamples in a rendering sequence.
1532 *
1533 * Since HIZ may be the first thing in a batch buffer, play safe and always
1534 * emit 3DSTATE_MULTISAMPLE.
1535 */
1536 blorp_emit_3dstate_multisample(batch, params);
1537
1538 /* If we can't alter the depth stencil config and multiple layers are
1539 * involved, the HiZ op will fail. This is because the op requires that a
1540 * new config is emitted for each additional layer.
1541 */
1542 if (batch->flags & BLORP_BATCH_NO_EMIT_DEPTH_STENCIL) {
1543 assert(params->num_layers <= 1);
1544 } else {
1545 blorp_emit_depth_stencil_config(batch, params);
1546 }
1547
1548 blorp_emit(batch, GENX(3DSTATE_WM_HZ_OP), hzp) {
1549 switch (params->hiz_op) {
1550 case BLORP_HIZ_OP_DEPTH_CLEAR:
1551 hzp.StencilBufferClearEnable = params->stencil.enabled;
1552 hzp.DepthBufferClearEnable = params->depth.enabled;
1553 hzp.StencilClearValue = params->stencil_ref;
1554 hzp.FullSurfaceDepthandStencilClear = params->full_surface_hiz_op;
1555 break;
1556 case BLORP_HIZ_OP_DEPTH_RESOLVE:
1557 assert(params->full_surface_hiz_op);
1558 hzp.DepthBufferResolveEnable = true;
1559 break;
1560 case BLORP_HIZ_OP_HIZ_RESOLVE:
1561 assert(params->full_surface_hiz_op);
1562 hzp.HierarchicalDepthBufferResolveEnable = true;
1563 break;
1564 case BLORP_HIZ_OP_NONE:
1565 unreachable("Invalid HIZ op");
1566 }
1567
1568 hzp.NumberofMultisamples = ffs(params->num_samples) - 1;
1569 hzp.SampleMask = 0xFFFF;
1570
1571 /* Due to a hardware issue, this bit MBZ */
1572 assert(hzp.ScissorRectangleEnable == false);
1573
1574 /* Contrary to the HW docs both fields are inclusive */
1575 hzp.ClearRectangleXMin = params->x0;
1576 hzp.ClearRectangleYMin = params->y0;
1577
1578 /* Contrary to the HW docs both fields are exclusive */
1579 hzp.ClearRectangleXMax = params->x1;
1580 hzp.ClearRectangleYMax = params->y1;
1581 }
1582
1583 /* PIPE_CONTROL w/ all bits clear except for “Post-Sync Operation” must set
1584 * to “Write Immediate Data” enabled.
1585 */
1586 blorp_emit(batch, GENX(PIPE_CONTROL), pc) {
1587 pc.PostSyncOperation = WriteImmediateData;
1588 pc.Address = blorp_get_workaround_page(batch);
1589 }
1590
1591 blorp_emit(batch, GENX(3DSTATE_WM_HZ_OP), hzp);
1592 }
1593 #endif
1594
1595 /**
1596 * \brief Execute a blit or render pass operation.
1597 *
1598 * To execute the operation, this function manually constructs and emits a
1599 * batch to draw a rectangle primitive. The batchbuffer is flushed before
1600 * constructing and after emitting the batch.
1601 *
1602 * This function alters no GL state.
1603 */
1604 static void
blorp_exec(struct blorp_batch * batch,const struct blorp_params * params)1605 blorp_exec(struct blorp_batch *batch, const struct blorp_params *params)
1606 {
1607 #if GEN_GEN >= 8
1608 if (params->hiz_op != BLORP_HIZ_OP_NONE) {
1609 blorp_emit_gen8_hiz_op(batch, params);
1610 return;
1611 }
1612 #endif
1613
1614 blorp_emit_vertex_buffers(batch, params);
1615 blorp_emit_vertex_elements(batch, params);
1616
1617 blorp_emit_pipeline(batch, params);
1618
1619 blorp_emit_surface_states(batch, params);
1620
1621 if (!(batch->flags & BLORP_BATCH_NO_EMIT_DEPTH_STENCIL))
1622 blorp_emit_depth_stencil_config(batch, params);
1623
1624 blorp_emit(batch, GENX(3DPRIMITIVE), prim) {
1625 prim.VertexAccessType = SEQUENTIAL;
1626 prim.PrimitiveTopologyType = _3DPRIM_RECTLIST;
1627 #if GEN_GEN >= 7
1628 prim.PredicateEnable = batch->flags & BLORP_BATCH_PREDICATE_ENABLE;
1629 #endif
1630 prim.VertexCountPerInstance = 3;
1631 prim.InstanceCount = params->num_layers;
1632 }
1633 }
1634
1635 #endif /* BLORP_GENX_EXEC_H */
1636