1 /*
2 * Copyright © 2017 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23 #ifndef IRIS_CONTEXT_H
24 #define IRIS_CONTEXT_H
25
26 #include "pipe/p_context.h"
27 #include "pipe/p_state.h"
28 #include "util/perf/u_trace.h"
29 #include "util/set.h"
30 #include "util/slab.h"
31 #include "util/u_debug.h"
32 #include "util/macros.h"
33 #include "util/u_threaded_context.h"
34 #include "intel/blorp/blorp.h"
35 #include "intel/dev/intel_debug.h"
36 #include "intel/common/intel_l3_config.h"
37 #include "intel/compiler/intel_shader_enums.h"
38 #include "intel/ds/intel_driver_ds.h"
39 #include "iris_batch.h"
40 #include "iris_binder.h"
41 #include "iris_fence.h"
42 #include "iris_resource.h"
43 #include "iris_screen.h"
44
45 struct iris_bo;
46 struct iris_context;
47 struct blorp_batch;
48 struct blorp_params;
49
50 #define IRIS_MAX_DRAW_BUFFERS 8
51 #define IRIS_MAX_SOL_BINDINGS 64
52
53 #define IRIS_MAX_TEXTURE_BUFFER_SIZE (1 << 27)
54 /* IRIS_MAX_ABOS and IRIS_MAX_SSBOS must be the same. */
55 #define IRIS_MAX_ABOS 16
56 #define IRIS_MAX_SSBOS 16
57 #define IRIS_MAX_VIEWPORTS 16
58 #define IRIS_MAX_CLIP_PLANES 8
59 #define IRIS_MAX_GLOBAL_BINDINGS 128
60
61 enum {
62 DRI_CONF_BO_REUSE_DISABLED,
63 DRI_CONF_BO_REUSE_ALL
64 };
65
66 enum iris_param_domain {
67 ELK_PARAM_DOMAIN_BUILTIN = 0,
68 ELK_PARAM_DOMAIN_IMAGE,
69 };
70
71 #define ELK_PARAM(domain, val) (ELK_PARAM_DOMAIN_##domain << 24 | (val))
72 #define ELK_PARAM_DOMAIN(param) ((uint32_t)(param) >> 24)
73 #define ELK_PARAM_VALUE(param) ((uint32_t)(param) & 0x00ffffff)
74 #define ELK_PARAM_IMAGE(idx, offset) ELK_PARAM(IMAGE, ((idx) << 8) | (offset))
75 #define ELK_PARAM_IMAGE_IDX(value) (ELK_PARAM_VALUE(value) >> 8)
76 #define ELK_PARAM_IMAGE_OFFSET(value)(ELK_PARAM_VALUE(value) & 0xf)
77
78 /**
79 * Dirty flags. When state changes, we flag some combination of these
80 * to indicate that particular GPU commands need to be re-emitted.
81 *
82 * Each bit typically corresponds to a single 3DSTATE_* command packet, but
83 * in rare cases they map to a group of related packets that need to be
84 * emitted together.
85 *
86 * See iris_upload_render_state().
87 */
88 #define IRIS_DIRTY_COLOR_CALC_STATE (1ull << 0)
89 #define IRIS_DIRTY_POLYGON_STIPPLE (1ull << 1)
90 #define IRIS_DIRTY_SCISSOR_RECT (1ull << 2)
91 #define IRIS_DIRTY_WM_DEPTH_STENCIL (1ull << 3)
92 #define IRIS_DIRTY_CC_VIEWPORT (1ull << 4)
93 #define IRIS_DIRTY_SF_CL_VIEWPORT (1ull << 5)
94 #define IRIS_DIRTY_PS_BLEND (1ull << 6)
95 #define IRIS_DIRTY_BLEND_STATE (1ull << 7)
96 #define IRIS_DIRTY_RASTER (1ull << 8)
97 #define IRIS_DIRTY_CLIP (1ull << 9)
98 #define IRIS_DIRTY_SBE (1ull << 10)
99 #define IRIS_DIRTY_LINE_STIPPLE (1ull << 11)
100 #define IRIS_DIRTY_VERTEX_ELEMENTS (1ull << 12)
101 #define IRIS_DIRTY_MULTISAMPLE (1ull << 13)
102 #define IRIS_DIRTY_VERTEX_BUFFERS (1ull << 14)
103 #define IRIS_DIRTY_SAMPLE_MASK (1ull << 15)
104 #define IRIS_DIRTY_URB (1ull << 16)
105 #define IRIS_DIRTY_DEPTH_BUFFER (1ull << 17)
106 #define IRIS_DIRTY_WM (1ull << 18)
107 #define IRIS_DIRTY_SO_BUFFERS (1ull << 19)
108 #define IRIS_DIRTY_SO_DECL_LIST (1ull << 20)
109 #define IRIS_DIRTY_STREAMOUT (1ull << 21)
110 #define IRIS_DIRTY_VF_SGVS (1ull << 22)
111 #define IRIS_DIRTY_VF (1ull << 23)
112 #define IRIS_DIRTY_VF_TOPOLOGY (1ull << 24)
113 #define IRIS_DIRTY_RENDER_RESOLVES_AND_FLUSHES (1ull << 25)
114 #define IRIS_DIRTY_COMPUTE_RESOLVES_AND_FLUSHES (1ull << 26)
115 #define IRIS_DIRTY_VF_STATISTICS (1ull << 27)
116 #define IRIS_DIRTY_PMA_FIX (1ull << 28)
117 #define IRIS_DIRTY_DEPTH_BOUNDS (1ull << 29)
118 #define IRIS_DIRTY_RENDER_BUFFER (1ull << 30)
119 #define IRIS_DIRTY_STENCIL_REF (1ull << 31)
120 #define IRIS_DIRTY_VERTEX_BUFFER_FLUSHES (1ull << 32)
121 #define IRIS_DIRTY_RENDER_MISC_BUFFER_FLUSHES (1ull << 33)
122 #define IRIS_DIRTY_COMPUTE_MISC_BUFFER_FLUSHES (1ull << 34)
123 #define IRIS_DIRTY_VFG (1ull << 35)
124 #define IRIS_DIRTY_DS_WRITE_ENABLE (1ull << 36)
125
126 #define IRIS_ALL_DIRTY_FOR_COMPUTE (IRIS_DIRTY_COMPUTE_RESOLVES_AND_FLUSHES | \
127 IRIS_DIRTY_COMPUTE_MISC_BUFFER_FLUSHES)
128
129 #define IRIS_ALL_DIRTY_FOR_RENDER (~IRIS_ALL_DIRTY_FOR_COMPUTE)
130
131 /**
132 * Per-stage dirty flags. When state changes, we flag some combination of
133 * these to indicate that particular GPU commands need to be re-emitted.
134 * Unlike the IRIS_DIRTY_* flags these are shader stage-specific and can be
135 * indexed by shifting the mask by the shader stage index.
136 *
137 * See iris_upload_render_state().
138 */
139 #define IRIS_STAGE_DIRTY_SAMPLER_STATES_VS (1ull << 0)
140 #define IRIS_STAGE_DIRTY_SAMPLER_STATES_TCS (1ull << 1)
141 #define IRIS_STAGE_DIRTY_SAMPLER_STATES_TES (1ull << 2)
142 #define IRIS_STAGE_DIRTY_SAMPLER_STATES_GS (1ull << 3)
143 #define IRIS_STAGE_DIRTY_SAMPLER_STATES_PS (1ull << 4)
144 #define IRIS_STAGE_DIRTY_SAMPLER_STATES_CS (1ull << 5)
145 #define IRIS_STAGE_DIRTY_UNCOMPILED_VS (1ull << 6)
146 #define IRIS_STAGE_DIRTY_UNCOMPILED_TCS (1ull << 7)
147 #define IRIS_STAGE_DIRTY_UNCOMPILED_TES (1ull << 8)
148 #define IRIS_STAGE_DIRTY_UNCOMPILED_GS (1ull << 9)
149 #define IRIS_STAGE_DIRTY_UNCOMPILED_FS (1ull << 10)
150 #define IRIS_STAGE_DIRTY_UNCOMPILED_CS (1ull << 11)
151 #define IRIS_STAGE_DIRTY_VS (1ull << 12)
152 #define IRIS_STAGE_DIRTY_TCS (1ull << 13)
153 #define IRIS_STAGE_DIRTY_TES (1ull << 14)
154 #define IRIS_STAGE_DIRTY_GS (1ull << 15)
155 #define IRIS_STAGE_DIRTY_FS (1ull << 16)
156 #define IRIS_STAGE_DIRTY_CS (1ull << 17)
157 #define IRIS_SHIFT_FOR_STAGE_DIRTY_CONSTANTS 18
158 #define IRIS_STAGE_DIRTY_CONSTANTS_VS (1ull << 18)
159 #define IRIS_STAGE_DIRTY_CONSTANTS_TCS (1ull << 19)
160 #define IRIS_STAGE_DIRTY_CONSTANTS_TES (1ull << 20)
161 #define IRIS_STAGE_DIRTY_CONSTANTS_GS (1ull << 21)
162 #define IRIS_STAGE_DIRTY_CONSTANTS_FS (1ull << 22)
163 #define IRIS_STAGE_DIRTY_CONSTANTS_CS (1ull << 23)
164 #define IRIS_SHIFT_FOR_STAGE_DIRTY_BINDINGS 24
165 #define IRIS_STAGE_DIRTY_BINDINGS_VS (1ull << 24)
166 #define IRIS_STAGE_DIRTY_BINDINGS_TCS (1ull << 25)
167 #define IRIS_STAGE_DIRTY_BINDINGS_TES (1ull << 26)
168 #define IRIS_STAGE_DIRTY_BINDINGS_GS (1ull << 27)
169 #define IRIS_STAGE_DIRTY_BINDINGS_FS (1ull << 28)
170 #define IRIS_STAGE_DIRTY_BINDINGS_CS (1ull << 29)
171
172 #define IRIS_ALL_STAGE_DIRTY_FOR_COMPUTE (IRIS_STAGE_DIRTY_CS | \
173 IRIS_STAGE_DIRTY_SAMPLER_STATES_CS | \
174 IRIS_STAGE_DIRTY_UNCOMPILED_CS | \
175 IRIS_STAGE_DIRTY_CONSTANTS_CS | \
176 IRIS_STAGE_DIRTY_BINDINGS_CS)
177
178 #define IRIS_ALL_STAGE_DIRTY_FOR_RENDER (~IRIS_ALL_STAGE_DIRTY_FOR_COMPUTE)
179
180 #define IRIS_ALL_STAGE_DIRTY_BINDINGS_FOR_RENDER (IRIS_STAGE_DIRTY_BINDINGS_VS | \
181 IRIS_STAGE_DIRTY_BINDINGS_TCS | \
182 IRIS_STAGE_DIRTY_BINDINGS_TES | \
183 IRIS_STAGE_DIRTY_BINDINGS_GS | \
184 IRIS_STAGE_DIRTY_BINDINGS_FS)
185
186 #define IRIS_ALL_STAGE_DIRTY_BINDINGS (IRIS_ALL_STAGE_DIRTY_BINDINGS_FOR_RENDER | \
187 IRIS_STAGE_DIRTY_BINDINGS_CS)
188
189 /**
190 * Non-orthogonal state (NOS) dependency flags.
191 *
192 * Shader programs may depend on non-orthogonal state. These flags are
193 * used to indicate that a shader's key depends on the state provided by
194 * a certain Gallium CSO. Changing any CSOs marked as a dependency will
195 * cause the driver to re-compute the shader key, possibly triggering a
196 * shader recompile.
197 */
198 enum iris_nos_dep {
199 IRIS_NOS_FRAMEBUFFER,
200 IRIS_NOS_DEPTH_STENCIL_ALPHA,
201 IRIS_NOS_RASTERIZER,
202 IRIS_NOS_BLEND,
203 IRIS_NOS_LAST_VUE_MAP,
204
205 IRIS_NOS_COUNT,
206 };
207
208 /** @{
209 *
210 * Program cache keys for state based recompiles.
211 */
212
213 /* Provide explicit padding for each member, to ensure that the compiler
214 * initializes every bit in the shader cache keys. The keys will be compared
215 * with memcmp.
216 */
217 PRAGMA_DIAGNOSTIC_PUSH
218 PRAGMA_DIAGNOSTIC_ERROR(-Wpadded)
219
220 /**
221 * Note, we need to take care to have padding explicitly declared
222 * for key since we will directly memcmp the whole struct.
223 */
224 struct iris_base_prog_key {
225 unsigned program_string_id;
226 bool limit_trig_input_range;
227 unsigned padding:24;
228 };
229
230 struct iris_vue_prog_key {
231 struct iris_base_prog_key base;
232
233 unsigned nr_userclip_plane_consts:4;
234 unsigned padding:28;
235 };
236
237 struct iris_vs_prog_key {
238 struct iris_vue_prog_key vue;
239 };
240
241 struct iris_tcs_prog_key {
242 struct iris_vue_prog_key vue;
243
244 enum tess_primitive_mode _tes_primitive_mode;
245
246 uint8_t input_vertices;
247
248 bool quads_workaround;
249 unsigned padding:16;
250
251 /** A bitfield of per-patch outputs written. */
252 uint32_t patch_outputs_written;
253
254 /** A bitfield of per-vertex outputs written. */
255 uint64_t outputs_written;
256 };
257
258 struct iris_tes_prog_key {
259 struct iris_vue_prog_key vue;
260
261 /** A bitfield of per-patch inputs read. */
262 uint32_t patch_inputs_read;
263
264 /** A bitfield of per-vertex inputs read. */
265 uint64_t inputs_read;
266 };
267
268 struct iris_gs_prog_key {
269 struct iris_vue_prog_key vue;
270 };
271
272 struct iris_fs_prog_key {
273 struct iris_base_prog_key base;
274
275 uint64_t input_slots_valid;
276 uint8_t color_outputs_valid;
277
278 unsigned nr_color_regions:5;
279 bool flat_shade:1;
280 bool alpha_test_replicate_alpha:1;
281 bool alpha_to_coverage:1;
282 bool clamp_fragment_color:1;
283 bool persample_interp:1;
284 bool multisample_fbo:1;
285 bool force_dual_color_blend:1;
286 bool coherent_fb_fetch:1;
287 uint64_t padding:43;
288 };
289
290 struct iris_cs_prog_key {
291 struct iris_base_prog_key base;
292 };
293
294 union iris_any_prog_key {
295 struct iris_base_prog_key base;
296 struct iris_vue_prog_key vue;
297 struct iris_vs_prog_key vs;
298 struct iris_tcs_prog_key tcs;
299 struct iris_tes_prog_key tes;
300 struct iris_gs_prog_key gs;
301 struct iris_fs_prog_key fs;
302 struct iris_cs_prog_key cs;
303 };
304
305 /* Restore the pack alignment to default. */
306 PRAGMA_DIAGNOSTIC_POP
307
308 /** @} */
309
310 struct iris_ubo_range
311 {
312 uint16_t block;
313
314 /* In units of 32-byte registers */
315 uint8_t start;
316 uint8_t length;
317 };
318
319 struct iris_fs_data {
320 int urb_setup[VARYING_SLOT_MAX];
321 uint8_t urb_setup_attribs[VARYING_SLOT_MAX];
322 uint8_t urb_setup_attribs_count;
323
324 uint64_t inputs;
325 unsigned num_varying_inputs;
326
327 unsigned msaa_flags_param;
328 uint32_t flat_inputs;
329
330 uint8_t computed_depth_mode;
331 uint8_t max_polygons;
332 uint8_t dispatch_multi;
333
334 bool computed_stencil;
335 bool early_fragment_tests;
336 bool post_depth_coverage;
337 bool inner_coverage;
338 bool dispatch_8;
339 bool dispatch_16;
340 bool dispatch_32;
341 bool dual_src_blend;
342 bool uses_pos_offset;
343 bool uses_omask;
344 bool uses_kill;
345 bool uses_src_depth;
346 bool uses_src_w;
347 bool uses_sample_mask;
348 bool uses_vmask;
349 bool has_side_effects;
350 bool pulls_bary;
351
352 bool uses_sample_offsets;
353 bool uses_npc_bary_coefficients;
354 bool uses_pc_bary_coefficients;
355 bool uses_depth_w_coefficients;
356
357 bool uses_nonperspective_interp_modes;
358
359 bool is_per_sample;
360 };
361
362 struct iris_push_const_block {
363 unsigned dwords; /* Dword count, not reg aligned */
364 unsigned regs;
365 unsigned size; /* Bytes, register aligned */
366 };
367
368 struct iris_cs_data {
369 struct {
370 struct iris_push_const_block cross_thread;
371 struct iris_push_const_block per_thread;
372 } push;
373
374 unsigned local_size[3];
375 unsigned prog_offset[3];
376 unsigned prog_mask;
377
378 uint8_t generate_local_id;
379 enum intel_compute_walk_order walk_order;
380
381 bool uses_barrier;
382 bool uses_sampler;
383 bool first_param_is_builtin_subgroup_id;
384 };
385
386 static inline uint32_t
iris_cs_data_prog_offset(const struct iris_cs_data * prog_data,unsigned dispatch_width)387 iris_cs_data_prog_offset(const struct iris_cs_data *prog_data,
388 unsigned dispatch_width)
389 {
390 assert(dispatch_width == 8 ||
391 dispatch_width == 16 ||
392 dispatch_width == 32);
393 const unsigned index = dispatch_width / 16;
394 assert(prog_data->prog_mask & (1 << index));
395 return prog_data->prog_offset[index];
396 }
397
398 struct iris_vue_data {
399 struct intel_vue_map vue_map;
400 unsigned urb_read_length;
401 uint32_t cull_distance_mask;
402 unsigned urb_entry_size;
403 enum intel_shader_dispatch_mode dispatch_mode;
404 bool include_vue_handles;
405 };
406
407 struct iris_vs_data {
408 struct iris_vue_data base;
409
410 bool uses_vertexid;
411 bool uses_instanceid;
412 bool uses_firstvertex;
413 bool uses_baseinstance;
414 bool uses_drawid;
415 };
416
417 struct iris_tcs_data {
418 struct iris_vue_data base;
419
420 int instances;
421 int patch_count_threshold;
422 bool include_primitive_id;
423 };
424
425 struct iris_tes_data {
426 struct iris_vue_data base;
427
428 enum intel_tess_partitioning partitioning;
429 enum intel_tess_output_topology output_topology;
430 enum intel_tess_domain domain;
431 bool include_primitive_id;
432 };
433
434 struct iris_gs_data {
435 struct iris_vue_data base;
436
437 unsigned vertices_in;
438 unsigned output_vertex_size_hwords;
439 unsigned output_topology;
440 unsigned control_data_header_size_hwords;
441 unsigned control_data_format;
442 int static_vertex_count;
443 int invocations;
444 bool include_primitive_id;
445 };
446
447 struct iris_depth_stencil_alpha_state;
448
449 /**
450 * Cache IDs for the in-memory program cache (ice->shaders.cache).
451 */
452 enum iris_program_cache_id {
453 IRIS_CACHE_VS = MESA_SHADER_VERTEX,
454 IRIS_CACHE_TCS = MESA_SHADER_TESS_CTRL,
455 IRIS_CACHE_TES = MESA_SHADER_TESS_EVAL,
456 IRIS_CACHE_GS = MESA_SHADER_GEOMETRY,
457 IRIS_CACHE_FS = MESA_SHADER_FRAGMENT,
458 IRIS_CACHE_CS = MESA_SHADER_COMPUTE,
459 IRIS_CACHE_BLORP,
460 };
461
462 /** @{
463 *
464 * Defines for PIPE_CONTROL operations, which trigger cache flushes,
465 * synchronization, pipelined memory writes, and so on.
466 *
467 * The bits here are not the actual hardware values. The actual fields
468 * move between various generations, so we just have flags for each
469 * potential operation, and use genxml to encode the actual packet.
470 */
471 enum pipe_control_flags
472 {
473 PIPE_CONTROL_FLUSH_LLC = (1 << 1),
474 PIPE_CONTROL_LRI_POST_SYNC_OP = (1 << 2),
475 PIPE_CONTROL_STORE_DATA_INDEX = (1 << 3),
476 PIPE_CONTROL_CS_STALL = (1 << 4),
477 PIPE_CONTROL_GLOBAL_SNAPSHOT_COUNT_RESET = (1 << 5),
478 PIPE_CONTROL_SYNC_GFDT = (1 << 6),
479 PIPE_CONTROL_TLB_INVALIDATE = (1 << 7),
480 PIPE_CONTROL_MEDIA_STATE_CLEAR = (1 << 8),
481 PIPE_CONTROL_WRITE_IMMEDIATE = (1 << 9),
482 PIPE_CONTROL_WRITE_DEPTH_COUNT = (1 << 10),
483 PIPE_CONTROL_WRITE_TIMESTAMP = (1 << 11),
484 PIPE_CONTROL_DEPTH_STALL = (1 << 12),
485 PIPE_CONTROL_RENDER_TARGET_FLUSH = (1 << 13),
486 PIPE_CONTROL_INSTRUCTION_INVALIDATE = (1 << 14),
487 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE = (1 << 15),
488 PIPE_CONTROL_INDIRECT_STATE_POINTERS_DISABLE = (1 << 16),
489 PIPE_CONTROL_NOTIFY_ENABLE = (1 << 17),
490 PIPE_CONTROL_FLUSH_ENABLE = (1 << 18),
491 PIPE_CONTROL_DATA_CACHE_FLUSH = (1 << 19),
492 PIPE_CONTROL_VF_CACHE_INVALIDATE = (1 << 20),
493 PIPE_CONTROL_CONST_CACHE_INVALIDATE = (1 << 21),
494 PIPE_CONTROL_STATE_CACHE_INVALIDATE = (1 << 22),
495 PIPE_CONTROL_STALL_AT_SCOREBOARD = (1 << 23),
496 PIPE_CONTROL_DEPTH_CACHE_FLUSH = (1 << 24),
497 PIPE_CONTROL_TILE_CACHE_FLUSH = (1 << 25), /* Not available in Gfx20+ */
498 PIPE_CONTROL_FLUSH_HDC = (1 << 26),
499 PIPE_CONTROL_PSS_STALL_SYNC = (1 << 27),
500 PIPE_CONTROL_L3_READ_ONLY_CACHE_INVALIDATE = (1 << 28),
501 PIPE_CONTROL_UNTYPED_DATAPORT_CACHE_FLUSH = (1 << 29),
502 PIPE_CONTROL_CCS_CACHE_FLUSH = (1 << 30),
503 PIPE_CONTROL_L3_FABRIC_FLUSH = (1 << 31),
504 };
505
506 #define PIPE_CONTROL_CACHE_FLUSH_BITS \
507 (PIPE_CONTROL_DEPTH_CACHE_FLUSH | \
508 PIPE_CONTROL_DATA_CACHE_FLUSH | \
509 PIPE_CONTROL_TILE_CACHE_FLUSH | \
510 PIPE_CONTROL_FLUSH_HDC | \
511 PIPE_CONTROL_UNTYPED_DATAPORT_CACHE_FLUSH | \
512 PIPE_CONTROL_RENDER_TARGET_FLUSH)
513
514 #define PIPE_CONTROL_CACHE_INVALIDATE_BITS \
515 (PIPE_CONTROL_STATE_CACHE_INVALIDATE | \
516 PIPE_CONTROL_CONST_CACHE_INVALIDATE | \
517 PIPE_CONTROL_VF_CACHE_INVALIDATE | \
518 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE | \
519 PIPE_CONTROL_INSTRUCTION_INVALIDATE)
520
521 #define PIPE_CONTROL_L3_RO_INVALIDATE_BITS \
522 (PIPE_CONTROL_L3_READ_ONLY_CACHE_INVALIDATE | \
523 PIPE_CONTROL_CONST_CACHE_INVALIDATE)
524
525 #define PIPE_CONTROL_GRAPHICS_BITS \
526 (PIPE_CONTROL_RENDER_TARGET_FLUSH | \
527 PIPE_CONTROL_DEPTH_CACHE_FLUSH | \
528 PIPE_CONTROL_TILE_CACHE_FLUSH | \
529 PIPE_CONTROL_DEPTH_STALL | \
530 PIPE_CONTROL_STALL_AT_SCOREBOARD | \
531 PIPE_CONTROL_PSS_STALL_SYNC | \
532 PIPE_CONTROL_VF_CACHE_INVALIDATE | \
533 PIPE_CONTROL_GLOBAL_SNAPSHOT_COUNT_RESET | \
534 PIPE_CONTROL_L3_READ_ONLY_CACHE_INVALIDATE |\
535 PIPE_CONTROL_WRITE_DEPTH_COUNT)
536
537 enum iris_predicate_state {
538 /* The first two states are used if we can determine whether to draw
539 * without having to look at the values in the query object buffer. This
540 * will happen if there is no conditional render in progress, if the query
541 * object is already completed or if something else has already added
542 * samples to the preliminary result.
543 */
544 IRIS_PREDICATE_STATE_RENDER,
545 IRIS_PREDICATE_STATE_DONT_RENDER,
546
547 /* In this case whether to draw or not depends on the result of an
548 * MI_PREDICATE command so the predicate enable bit needs to be checked.
549 */
550 IRIS_PREDICATE_STATE_USE_BIT,
551 };
552
553 /** @} */
554
555 /**
556 * An uncompiled, API-facing shader. This is the Gallium CSO for shaders.
557 * It primarily contains the NIR for the shader.
558 *
559 * Each API-facing shader can be compiled into multiple shader variants,
560 * based on non-orthogonal state dependencies, recorded in the shader key.
561 *
562 * See iris_compiled_shader, which represents a compiled shader variant.
563 */
564 struct iris_uncompiled_shader {
565 struct pipe_reference ref;
566
567 /**
568 * NIR for the shader.
569 *
570 * Even for shaders that originate as TGSI, this pointer will be non-NULL.
571 */
572 struct nir_shader *nir;
573
574 struct pipe_stream_output_info stream_output;
575
576 /* A SHA1 of the serialized NIR for the disk cache. */
577 unsigned char nir_sha1[20];
578
579 /* Hash value based on shader source program */
580 unsigned source_hash;
581
582 unsigned program_id;
583
584 /** Bitfield of (1 << IRIS_NOS_*) flags. */
585 unsigned nos;
586
587 /** Have any shader variants been compiled yet? */
588 bool compiled_once;
589
590 /* Whether shader uses atomic operations. */
591 bool uses_atomic_load_store;
592
593 /** Size (in bytes) of the kernel input data */
594 unsigned kernel_input_size;
595
596 /** Size (in bytes) of the local (shared) data passed as kernel inputs */
597 unsigned kernel_shared_size;
598
599 /** List of iris_compiled_shader variants */
600 struct list_head variants;
601
602 /** Lock for the variants list */
603 simple_mtx_t lock;
604
605 /** For parallel shader compiles */
606 struct util_queue_fence ready;
607 };
608
609 enum iris_surface_group {
610 IRIS_SURFACE_GROUP_RENDER_TARGET,
611 IRIS_SURFACE_GROUP_RENDER_TARGET_READ,
612 IRIS_SURFACE_GROUP_CS_WORK_GROUPS,
613 IRIS_SURFACE_GROUP_TEXTURE_LOW64,
614 IRIS_SURFACE_GROUP_TEXTURE_HIGH64,
615 IRIS_SURFACE_GROUP_IMAGE,
616 IRIS_SURFACE_GROUP_UBO,
617 IRIS_SURFACE_GROUP_SSBO,
618
619 IRIS_SURFACE_GROUP_COUNT,
620 };
621
622 enum {
623 /* Invalid value for a binding table index. */
624 IRIS_SURFACE_NOT_USED = 0xa0a0a0a0,
625 };
626
627 struct iris_binding_table {
628 uint32_t size_bytes;
629
630 /** Number of surfaces in each group, before compacting. */
631 uint32_t sizes[IRIS_SURFACE_GROUP_COUNT];
632
633 /** Initial offset of each group. */
634 uint32_t offsets[IRIS_SURFACE_GROUP_COUNT];
635
636 /** Mask of surfaces used in each group. */
637 uint64_t used_mask[IRIS_SURFACE_GROUP_COUNT];
638
639 uint64_t samplers_used_mask;
640
641 /** Whether the first render target is a null fb surface */
642 uint8_t use_null_rt;
643 };
644
645 /**
646 * A compiled shader variant, containing a pointer to the GPU assembly,
647 * as well as program data and other packets needed by state upload.
648 *
649 * There can be several iris_compiled_shader variants per API-level shader
650 * (iris_uncompiled_shader), due to state-based recompiles (brw_*_prog_key).
651 */
652 struct iris_compiled_shader {
653 struct pipe_reference ref;
654
655 /** Link in the iris_uncompiled_shader::variants list */
656 struct list_head link;
657
658 /** Key for this variant (but not for BLORP programs) */
659 union iris_any_prog_key key;
660
661 /**
662 * Is the variant fully compiled and ready?
663 *
664 * Variants are added to \c iris_uncompiled_shader::variants before
665 * compilation actually occurs. This signals that compilation has
666 * completed.
667 */
668 struct util_queue_fence ready;
669
670 /** Variant is ready, but compilation failed. */
671 bool compilation_failed;
672
673 /** Reference to the uploaded assembly. */
674 struct iris_state_ref assembly;
675
676 /** Pointer to the assembly in the BO's map. */
677 void *map;
678
679 /** The program data (owned by the program cache hash table) */
680 struct brw_stage_prog_data *brw_prog_data;
681 struct elk_stage_prog_data *elk_prog_data;
682
683 /** A list of system values to be uploaded as uniforms. */
684 uint32_t *system_values;
685 unsigned num_system_values;
686
687 /** Size (in bytes) of the kernel input data */
688 unsigned kernel_input_size;
689
690 /** Number of constbufs expected by the shader. */
691 unsigned num_cbufs;
692
693 /**
694 * Derived 3DSTATE_STREAMOUT and 3DSTATE_SO_DECL_LIST packets
695 * (the VUE-based information for transform feedback outputs).
696 */
697 uint32_t *streamout;
698
699 struct iris_binding_table bt;
700
701 gl_shader_stage stage;
702
703 /**
704 * Data derived from prog_data.
705 */
706 struct iris_ubo_range ubo_ranges[4];
707
708 unsigned nr_params;
709 unsigned total_scratch;
710 unsigned total_shared;
711 unsigned program_size;
712 unsigned const_data_offset;
713 unsigned dispatch_grf_start_reg;
714 bool has_ubo_pull;
715 bool use_alt_mode;
716
717 union {
718 struct iris_fs_data fs;
719 struct iris_cs_data cs;
720 struct iris_vs_data vs;
721 struct iris_tcs_data tcs;
722 struct iris_tes_data tes;
723 struct iris_gs_data gs;
724 };
725
726 /**
727 * Shader packets and other data derived from prog_data. These must be
728 * completely determined from prog_data.
729 */
730 uint8_t derived_data[0];
731 };
732
733 static inline uint64_t
KSP(const struct iris_compiled_shader * shader)734 KSP(const struct iris_compiled_shader *shader)
735 {
736 struct iris_resource *res = (void *) shader->assembly.res;
737 return iris_bo_offset_from_base_address(res->bo) + shader->assembly.offset;
738 }
739
740 #define DEFINE_IRIS_SHADER_DATA(TYPE, STAGE, FIELD) \
741 static inline TYPE * \
742 iris_ ## FIELD ## _data(struct iris_compiled_shader *shader) \
743 { \
744 assert(shader->stage == STAGE); \
745 return &shader->FIELD; \
746 } \
747 static inline const TYPE * \
748 iris_ ## FIELD ## _data_const(const struct iris_compiled_shader *shader) \
749 { \
750 assert(shader->stage == STAGE); \
751 return &shader->FIELD; \
752 }
753
DEFINE_IRIS_SHADER_DATA(struct iris_fs_data,MESA_SHADER_FRAGMENT,fs)754 DEFINE_IRIS_SHADER_DATA(struct iris_fs_data, MESA_SHADER_FRAGMENT, fs)
755 DEFINE_IRIS_SHADER_DATA(struct iris_cs_data, MESA_SHADER_COMPUTE, cs)
756 DEFINE_IRIS_SHADER_DATA(struct iris_vs_data, MESA_SHADER_VERTEX, vs)
757 DEFINE_IRIS_SHADER_DATA(struct iris_tcs_data, MESA_SHADER_TESS_CTRL, tcs)
758 DEFINE_IRIS_SHADER_DATA(struct iris_tes_data, MESA_SHADER_TESS_EVAL, tes)
759 DEFINE_IRIS_SHADER_DATA(struct iris_gs_data, MESA_SHADER_GEOMETRY, gs)
760
761 #undef DEFINE_IRIS_SHADER_DATA
762
763 static inline struct iris_vue_data *
764 iris_vue_data(struct iris_compiled_shader *shader)
765 {
766 switch (shader->stage) {
767 case MESA_SHADER_VERTEX: return &shader->vs.base;
768 case MESA_SHADER_TESS_CTRL: return &shader->tcs.base;
769 case MESA_SHADER_TESS_EVAL: return &shader->tes.base;
770 case MESA_SHADER_GEOMETRY: return &shader->gs.base;
771 default:
772 unreachable("invalid shader stage for vue prog data");
773 return NULL;
774 }
775 }
776
777 /**
778 * API context state that is replicated per shader stage.
779 */
780 struct iris_shader_state {
781 /** Uniform Buffers */
782 struct pipe_shader_buffer constbuf[PIPE_MAX_CONSTANT_BUFFERS];
783 struct iris_state_ref constbuf_surf_state[PIPE_MAX_CONSTANT_BUFFERS];
784
785 bool sysvals_need_upload;
786
787 /** Shader Storage Buffers */
788 struct pipe_shader_buffer ssbo[PIPE_MAX_SHADER_BUFFERS];
789 struct iris_state_ref ssbo_surf_state[PIPE_MAX_SHADER_BUFFERS];
790
791 /** Shader Storage Images (image load store) */
792 struct iris_image_view image[PIPE_MAX_SHADER_IMAGES];
793
794 struct iris_state_ref sampler_table;
795 struct iris_sampler_state *samplers[IRIS_MAX_SAMPLERS];
796 struct iris_sampler_view *textures[IRIS_MAX_TEXTURES];
797
798 /** Bitfield of which constant buffers are bound (non-null). */
799 uint32_t bound_cbufs;
800 uint32_t dirty_cbufs;
801
802 /** Bitfield of which image views are bound (non-null). */
803 uint64_t bound_image_views;
804
805 /** Bitfield of which sampler views are bound (non-null). */
806 BITSET_DECLARE(bound_sampler_views, IRIS_MAX_TEXTURES);
807
808 /** Bitfield of which shader storage buffers are bound (non-null). */
809 uint32_t bound_ssbos;
810
811 /** Bitfield of which shader storage buffers are writable. */
812 uint32_t writable_ssbos;
813
814 /** Array of aux usages used for our shader's images in the current draw */
815 enum isl_aux_usage image_aux_usage[PIPE_MAX_SHADER_IMAGES];
816 };
817
818 /**
819 * Gallium CSO for stream output (transform feedback) targets.
820 */
821 struct iris_stream_output_target {
822 struct pipe_stream_output_target base;
823
824 /** Storage holding the offset where we're writing in the buffer */
825 struct iris_state_ref offset;
826
827 /** Stride (bytes-per-vertex) during this transform feedback operation */
828 uint16_t stride;
829
830 /** Does the next 3DSTATE_SO_BUFFER need to zero the offsets? */
831 bool zero_offset;
832 };
833
834 enum iris_context_priority {
835 IRIS_CONTEXT_MEDIUM_PRIORITY = 0,
836 IRIS_CONTEXT_LOW_PRIORITY,
837 IRIS_CONTEXT_HIGH_PRIORITY
838 };
839
840 /**
841 * The API context (derived from pipe_context).
842 *
843 * Most driver state is tracked here.
844 */
845 struct iris_context {
846 struct pipe_context ctx;
847 struct threaded_context *thrctx;
848
849 /** A debug callback for KHR_debug output. */
850 struct util_debug_callback dbg;
851
852 /** Whether the context protected (through EGL_EXT_protected_content) */
853 bool protected;
854
855 /** Whether a banned context was already signalled */
856 bool context_reset_signaled;
857
858 /** A device reset status callback for notifying that the GPU is hosed. */
859 struct pipe_device_reset_callback reset;
860
861 /** A set of dmabuf resources dirtied beyond their default aux-states. */
862 struct set *dirty_dmabufs;
863
864 /** Slab allocator for iris_transfer_map objects. */
865 struct slab_child_pool transfer_pool;
866
867 /** Slab allocator for threaded_context's iris_transfer_map objects */
868 struct slab_child_pool transfer_pool_unsync;
869
870 struct blorp_context blorp;
871
872 struct iris_batch batches[IRIS_BATCH_COUNT];
873 enum iris_context_priority priority;
874 bool has_engines_context; /* i915 specific */
875
876 struct u_upload_mgr *query_buffer_uploader;
877
878 struct intel_ds_device ds;
879
880 struct {
881 struct {
882 /**
883 * Either the value of BaseVertex for indexed draw calls or the value
884 * of the argument <first> for non-indexed draw calls.
885 */
886 int firstvertex;
887 int baseinstance;
888 } params;
889
890 /**
891 * Are the above values the ones stored in the draw_params buffer?
892 * If so, we can compare them against new values to see if anything
893 * changed. If not, we need to assume they changed.
894 */
895 bool params_valid;
896
897 /**
898 * Resource and offset that stores draw_parameters from the indirect
899 * buffer or to the buffer that stures the previous values for non
900 * indirect draws.
901 */
902 struct iris_state_ref draw_params;
903
904 struct {
905 /**
906 * The value of DrawID. This always comes in from it's own vertex
907 * buffer since it's not part of the indirect draw parameters.
908 */
909 int drawid;
910
911 /**
912 * Stores if an indexed or non-indexed draw (~0/0). Useful to
913 * calculate BaseVertex as an AND of firstvertex and is_indexed_draw.
914 */
915 int is_indexed_draw;
916 } derived_params;
917
918 /**
919 * Resource and offset used for GL_ARB_shader_draw_parameters which
920 * contains parameters that are not present in the indirect buffer as
921 * drawid and is_indexed_draw. They will go in their own vertex element.
922 */
923 struct iris_state_ref derived_draw_params;
924
925 struct {
926 /**
927 * Generation fragment shader
928 */
929 struct iris_compiled_shader *shader;
930
931 /**
932 * Ring buffer where to generate indirect draw commands
933 */
934 struct iris_bo *ring_bo;
935
936 /**
937 * Allocated iris_gen_indirect_params
938 */
939 struct iris_state_ref params;
940
941 /**
942 * Vertices used to dispatch the generated fragment shaders
943 */
944 struct iris_state_ref vertices;
945 } generation;
946 } draw;
947
948 struct {
949 struct iris_uncompiled_shader *uncompiled[MESA_SHADER_STAGES];
950 struct iris_compiled_shader *prog[MESA_SHADER_STAGES];
951 struct iris_compiled_shader *last_vue_shader;
952 struct {
953 struct intel_urb_config cfg;
954 bool constrained;
955 } urb;
956
957 /** Last urb emitted by the driver. */
958 struct intel_urb_config last_urb;
959
960 /** Uploader for shader assembly from the driver thread */
961 struct u_upload_mgr *uploader_driver;
962 /** Uploader for shader assembly from the threaded context */
963 struct u_upload_mgr *uploader_unsync;
964 struct hash_table *cache;
965
966 /** Is a GS or TES outputting points or lines? */
967 bool output_topology_is_points_or_lines;
968
969 /**
970 * Scratch buffers for various sizes and stages.
971 *
972 * Indexed by the "Per-Thread Scratch Space" field's 4-bit encoding,
973 * and shader stage.
974 */
975 struct iris_bo *scratch_bos[1 << 4][MESA_SHADER_STAGES];
976
977 /**
978 * Scratch buffer surface states on Gfx12.5+
979 */
980 struct iris_state_ref scratch_surfs[1 << 4];
981 } shaders;
982
983 struct intel_perf_context *perf_ctx;
984
985 /** Frame number for u_trace */
986 struct {
987 uint32_t begin_frame;
988 uint32_t end_frame;
989 uint64_t last_full_timestamp;
990 void *last_compute_walker;
991 } utrace;
992
993 /** Frame number for debug prints */
994 uint32_t frame;
995
996 /** Track draw call count for adding GPU breakpoint on 3DPRIMITIVE */
997 uint32_t draw_call_count;
998
999 struct {
1000 uint64_t dirty;
1001 uint64_t stage_dirty;
1002 uint64_t stage_dirty_for_nos[IRIS_NOS_COUNT];
1003
1004 unsigned num_viewports;
1005 unsigned sample_mask;
1006 struct iris_blend_state *cso_blend;
1007 struct iris_rasterizer_state *cso_rast;
1008 struct iris_depth_stencil_alpha_state *cso_zsa;
1009 struct iris_vertex_element_state *cso_vertex_elements;
1010 struct pipe_blend_color blend_color;
1011 struct pipe_poly_stipple poly_stipple;
1012 struct pipe_viewport_state viewports[IRIS_MAX_VIEWPORTS];
1013 struct pipe_scissor_state scissors[IRIS_MAX_VIEWPORTS];
1014 struct pipe_stencil_ref stencil_ref;
1015 struct pipe_framebuffer_state framebuffer;
1016 struct pipe_clip_state clip_planes;
1017 /* width and height treated like x2 and y2 */
1018 struct pipe_box render_area;
1019
1020 float default_outer_level[4];
1021 float default_inner_level[2];
1022
1023 /** Bitfield of which vertex buffers are bound (non-null). */
1024 uint64_t bound_vertex_buffers;
1025
1026 uint8_t patch_vertices;
1027 bool primitive_restart;
1028 unsigned cut_index;
1029 enum mesa_prim prim_mode:8;
1030 bool prim_is_points_or_lines;
1031 uint8_t vertices_per_patch;
1032
1033 bool window_space_position;
1034
1035 /** The last compute group size */
1036 uint32_t last_block[3];
1037
1038 /** The last compute grid size */
1039 uint32_t last_grid[3];
1040 /** The last compute grid dimensions */
1041 uint32_t last_grid_dim;
1042 /** Reference to the BO containing the compute grid size */
1043 struct iris_state_ref grid_size;
1044 /** Reference to the SURFACE_STATE for the compute grid resource */
1045 struct iris_state_ref grid_surf_state;
1046
1047 /**
1048 * Array of aux usages for drawing, altered to account for any
1049 * self-dependencies from resources bound for sampling and rendering.
1050 */
1051 enum isl_aux_usage draw_aux_usage[IRIS_MAX_DRAW_BUFFERS];
1052
1053 /** Aux usage of the fb's depth buffer (which may or may not exist). */
1054 enum isl_aux_usage hiz_usage;
1055
1056 enum intel_urb_deref_block_size urb_deref_block_size;
1057
1058 /** Are depth writes enabled? (Depth buffer may or may not exist.) */
1059 bool depth_writes_enabled;
1060
1061 /** Are stencil writes enabled? (Stencil buffer may or may not exist.) */
1062 bool stencil_writes_enabled;
1063
1064 /** Current/upcoming ds_write_state for Wa_18019816803. */
1065 bool ds_write_state;
1066
1067 /** State tracking for Wa_14018912822. */
1068 bool color_blend_zero;
1069 bool alpha_blend_zero;
1070
1071 /** State tracking for Wa_18020335297. */
1072 bool viewport_ptr_set;
1073
1074 /** State for Wa_14015055625, Wa_14019166699 */
1075 bool uses_primitive_id;
1076
1077 /** Do we have integer RT in current framebuffer state? */
1078 bool has_integer_rt;
1079
1080 /** GenX-specific current state */
1081 struct iris_genx_state *genx;
1082
1083 struct iris_shader_state shaders[MESA_SHADER_STAGES];
1084
1085 /** Do vertex shader uses shader draw parameters ? */
1086 bool vs_uses_draw_params;
1087 bool vs_uses_derived_draw_params;
1088 bool vs_needs_sgvs_element;
1089
1090 /** Do vertex shader uses edge flag ? */
1091 bool vs_needs_edge_flag;
1092
1093 /** Do any samplers need border color? One bit per shader stage. */
1094 uint8_t need_border_colors;
1095
1096 /** Global resource bindings */
1097 struct pipe_resource *global_bindings[IRIS_MAX_GLOBAL_BINDINGS];
1098
1099 struct pipe_stream_output_target *so_target[PIPE_MAX_SO_BUFFERS];
1100 bool streamout_active;
1101
1102 bool statistics_counters_enabled;
1103
1104 /** Current conditional rendering mode */
1105 enum iris_predicate_state predicate;
1106
1107 /**
1108 * Query BO with a MI_PREDICATE_RESULT snapshot calculated on the
1109 * render context that needs to be uploaded to the compute context.
1110 */
1111 struct iris_bo *compute_predicate;
1112
1113 /** Is a PIPE_QUERY_PRIMITIVES_GENERATED query active? */
1114 bool prims_generated_query_active;
1115
1116 /** Is a PIPE_QUERY_OCCLUSION_COUNTER query active? */
1117 bool occlusion_query_active;
1118
1119 /** 3DSTATE_STREAMOUT and 3DSTATE_SO_DECL_LIST packets */
1120 uint32_t *streamout;
1121
1122 /** The SURFACE_STATE for a 1x1x1 null surface. */
1123 struct iris_state_ref unbound_tex;
1124
1125 /** The SURFACE_STATE for a framebuffer-sized null surface. */
1126 struct iris_state_ref null_fb;
1127
1128 struct u_upload_mgr *surface_uploader;
1129 struct u_upload_mgr *scratch_surface_uploader;
1130 struct u_upload_mgr *dynamic_uploader;
1131
1132 struct iris_binder binder;
1133
1134 /** The high 16-bits of the last VBO/index buffer addresses */
1135 uint16_t last_vbo_high_bits[33];
1136 uint16_t last_index_bo_high_bits;
1137
1138 /**
1139 * Resources containing streamed state which our render context
1140 * currently points to. Used to re-add these to the validation
1141 * list when we start a new batch and haven't resubmitted commands.
1142 */
1143 struct {
1144 struct pipe_resource *cc_vp;
1145 struct pipe_resource *sf_cl_vp;
1146 struct pipe_resource *color_calc;
1147 struct pipe_resource *scissor;
1148 struct pipe_resource *blend;
1149 struct pipe_resource *index_buffer;
1150 struct pipe_resource *cs_thread_ids;
1151 struct pipe_resource *cs_desc;
1152 } last_res;
1153
1154 /** Records the size of variable-length state for INTEL_DEBUG=bat */
1155 struct hash_table_u64 *sizes;
1156
1157 /** Last rendering scale argument provided to genX(emit_hashing_mode). */
1158 unsigned current_hash_scale;
1159
1160 /** Resource holding the pixel pipe hashing tables. */
1161 struct pipe_resource *pixel_hashing_tables;
1162
1163 bool use_tbimr;
1164 } state;
1165 };
1166
1167 /**
1168 * Push constant data handed over to the indirect draw generation shader
1169 */
1170 struct iris_gen_indirect_params {
1171 /**
1172 * Address of iris_context:draw:generation:ring_bo
1173 */
1174 uint64_t generated_cmds_addr;
1175 /**
1176 * Address of indirect data to draw with
1177 */
1178 uint64_t indirect_data_addr;
1179 /**
1180 * Address inside iris_context:draw:generation:ring_bo where to draw ids
1181 */
1182 uint64_t draw_id_addr;
1183 /**
1184 * Address of the indirect count (can be null, in which case max_draw_count
1185 * is used)
1186 */
1187 uint64_t draw_count_addr;
1188 /**
1189 * Address to jump to in order to generate more draws
1190 */
1191 uint64_t gen_addr;
1192 /**
1193 * Address to jump to to end generated draws
1194 */
1195 uint64_t end_addr;
1196 /**
1197 * Stride between the indirect draw data
1198 */
1199 uint32_t indirect_data_stride;
1200 /**
1201 * Base index of the current generated draws in the ring buffer (increments
1202 * by ring_count)
1203 */
1204 uint32_t draw_base;
1205 /**
1206 * Maximum number of generated draw if draw_count_addr is null
1207 */
1208 uint32_t max_draw_count;
1209 /**
1210 * bits 0-7: ANV_GENERATED_FLAG_*
1211 * bits 8-15: vertex buffer mocs
1212 * bits 16-23: stride between generated commands
1213 */
1214 uint32_t flags;
1215 /**
1216 * Number of items to generate in the ring buffer
1217 */
1218 uint32_t ring_count;
1219 };
1220
1221 #define perf_debug(dbg, ...) do { \
1222 if (INTEL_DEBUG(DEBUG_PERF)) \
1223 dbg_printf(__VA_ARGS__); \
1224 if (unlikely(dbg)) \
1225 util_debug_message(dbg, PERF_INFO, __VA_ARGS__); \
1226 } while(0)
1227
1228 struct pipe_context *
1229 iris_create_context(struct pipe_screen *screen, void *priv, unsigned flags);
1230 void iris_destroy_context(struct pipe_context *ctx);
1231
1232 void iris_lost_context_state(struct iris_batch *batch);
1233
1234 void iris_mark_dirty_dmabuf(struct iris_context *ice,
1235 struct pipe_resource *res);
1236 void iris_flush_dirty_dmabufs(struct iris_context *ice);
1237
1238 void iris_init_blit_functions(struct pipe_context *ctx);
1239 void iris_init_clear_functions(struct pipe_context *ctx);
1240 void iris_init_program_functions(struct pipe_context *ctx);
1241 void iris_init_screen_program_functions(struct pipe_screen *pscreen);
1242 void iris_init_resource_functions(struct pipe_context *ctx);
1243 void iris_init_perfquery_functions(struct pipe_context *ctx);
1244 void iris_update_compiled_shaders(struct iris_context *ice);
1245 void iris_update_compiled_compute_shader(struct iris_context *ice);
1246 void iris_fill_cs_push_const_buffer(struct iris_screen *screen,
1247 struct iris_compiled_shader *shader,
1248 unsigned threads,
1249 uint32_t *dst);
1250
1251
1252 /* iris_blit.c */
1253 #define IRIS_BLORP_RELOC_FLAGS_EXEC_OBJECT_WRITE (1 << 2)
1254
1255 void iris_blorp_surf_for_resource(struct iris_batch *batch,
1256 struct blorp_surf *surf,
1257 struct pipe_resource *p_res,
1258 enum isl_aux_usage aux_usage,
1259 unsigned level,
1260 bool is_render_target);
1261 void iris_copy_region(struct blorp_context *blorp,
1262 struct iris_batch *batch,
1263 struct pipe_resource *dst,
1264 unsigned dst_level,
1265 unsigned dstx, unsigned dsty, unsigned dstz,
1266 struct pipe_resource *src,
1267 unsigned src_level,
1268 const struct pipe_box *src_box);
1269
1270 static inline enum blorp_batch_flags
iris_blorp_flags_for_batch(struct iris_batch * batch)1271 iris_blorp_flags_for_batch(struct iris_batch *batch)
1272 {
1273 if (batch->name == IRIS_BATCH_COMPUTE)
1274 return BLORP_BATCH_USE_COMPUTE;
1275
1276 if (batch->name == IRIS_BATCH_BLITTER)
1277 return BLORP_BATCH_USE_BLITTER;
1278
1279 return 0;
1280 }
1281
1282 static inline isl_surf_usage_flags_t
iris_blorp_batch_usage(struct iris_batch * batch,bool is_dest)1283 iris_blorp_batch_usage(struct iris_batch *batch, bool is_dest)
1284 {
1285 switch (batch->name) {
1286 case IRIS_BATCH_RENDER:
1287 return is_dest ? ISL_SURF_USAGE_RENDER_TARGET_BIT : ISL_SURF_USAGE_TEXTURE_BIT;
1288 case IRIS_BATCH_COMPUTE:
1289 return is_dest ? ISL_SURF_USAGE_STORAGE_BIT : ISL_SURF_USAGE_TEXTURE_BIT;
1290 case IRIS_BATCH_BLITTER:
1291 return is_dest ? ISL_SURF_USAGE_BLITTER_DST_BIT : ISL_SURF_USAGE_BLITTER_SRC_BIT;
1292 default:
1293 unreachable("Unhandled batch type");
1294 }
1295 }
1296
1297 /* iris_draw.c */
1298
1299 void iris_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *info,
1300 unsigned drawid_offset,
1301 const struct pipe_draw_indirect_info *indirect,
1302 const struct pipe_draw_start_count_bias *draws,
1303 unsigned num_draws);
1304 void iris_launch_grid(struct pipe_context *, const struct pipe_grid_info *);
1305
1306 /* iris_pipe_control.c */
1307
1308 void iris_emit_pipe_control_flush(struct iris_batch *batch,
1309 const char *reason, uint32_t flags);
1310 void iris_emit_pipe_control_write(struct iris_batch *batch,
1311 const char *reason, uint32_t flags,
1312 struct iris_bo *bo, uint32_t offset,
1313 uint64_t imm);
1314 void iris_emit_end_of_pipe_sync(struct iris_batch *batch,
1315 const char *reason, uint32_t flags);
1316 void iris_emit_buffer_barrier_for(struct iris_batch *batch,
1317 struct iris_bo *bo,
1318 enum iris_domain access);
1319 void iris_flush_all_caches(struct iris_batch *batch);
1320
1321 #define iris_handle_always_flush_cache(batch) \
1322 if (unlikely(batch->screen->driconf.always_flush_cache)) \
1323 iris_flush_all_caches(batch);
1324
1325 void iris_init_flush_functions(struct pipe_context *ctx);
1326
1327 /* iris_program.c */
1328 void iris_compiler_init(struct iris_screen *screen);
1329 void iris_upload_ubo_ssbo_surf_state(struct iris_context *ice,
1330 struct pipe_shader_buffer *buf,
1331 struct iris_state_ref *surf_state,
1332 isl_surf_usage_flags_t usage);
1333 const struct shader_info *iris_get_shader_info(const struct iris_context *ice,
1334 gl_shader_stage stage);
1335 struct iris_bo *iris_get_scratch_space(struct iris_context *ice,
1336 unsigned per_thread_scratch,
1337 gl_shader_stage stage);
1338 const struct iris_state_ref *iris_get_scratch_surf(struct iris_context *ice,
1339 unsigned per_thread_scratch);
1340 uint32_t iris_group_index_to_bti(const struct iris_binding_table *bt,
1341 enum iris_surface_group group,
1342 uint32_t index);
1343 uint32_t iris_bti_to_group_index(const struct iris_binding_table *bt,
1344 enum iris_surface_group group,
1345 uint32_t bti);
1346 void iris_apply_brw_prog_data(struct iris_compiled_shader *shader,
1347 struct brw_stage_prog_data *prog_data);
1348 void iris_apply_elk_prog_data(struct iris_compiled_shader *shader,
1349 struct elk_stage_prog_data *prog_data);
1350 struct intel_cs_dispatch_info
1351 iris_get_cs_dispatch_info(const struct intel_device_info *devinfo,
1352 const struct iris_compiled_shader *shader,
1353 const uint32_t block[3]);
1354 unsigned
1355 iris_cs_push_const_total_size(const struct iris_compiled_shader *shader,
1356 unsigned threads);
1357 uint32_t
1358 iris_fs_barycentric_modes(const struct iris_compiled_shader *shader,
1359 enum intel_msaa_flags pushed_msaa_flags);
1360 bool iris_use_tcs_multi_patch(struct iris_screen *screen);
1361 bool iris_indirect_ubos_use_sampler(struct iris_screen *screen);
1362 const void *iris_get_compiler_options(struct pipe_screen *pscreen,
1363 enum pipe_shader_ir ir,
1364 enum pipe_shader_type pstage);
1365
1366 /* iris_disk_cache.c */
1367
1368 void iris_disk_cache_store(struct disk_cache *cache,
1369 const struct iris_uncompiled_shader *ish,
1370 const struct iris_compiled_shader *shader,
1371 const void *prog_key,
1372 uint32_t prog_key_size);
1373 bool
1374 iris_disk_cache_retrieve(struct iris_screen *screen,
1375 struct u_upload_mgr *uploader,
1376 struct iris_uncompiled_shader *ish,
1377 struct iris_compiled_shader *shader,
1378 const void *prog_key,
1379 uint32_t prog_key_size);
1380
1381 /* iris_program_cache.c */
1382
1383 void iris_init_program_cache(struct iris_context *ice);
1384 void iris_destroy_program_cache(struct iris_context *ice);
1385 struct iris_compiled_shader *iris_find_cached_shader(struct iris_context *ice,
1386 enum iris_program_cache_id,
1387 uint32_t key_size,
1388 const void *key);
1389
1390 struct iris_compiled_shader *iris_create_shader_variant(const struct iris_screen *,
1391 void *mem_ctx,
1392 gl_shader_stage stage,
1393 enum iris_program_cache_id cache_id,
1394 uint32_t key_size,
1395 const void *key);
1396
1397 void iris_finalize_program(struct iris_compiled_shader *shader,
1398 uint32_t *streamout,
1399 uint32_t *system_values,
1400 unsigned num_system_values,
1401 unsigned kernel_input_size,
1402 unsigned num_cbufs,
1403 const struct iris_binding_table *bt);
1404
1405 void iris_upload_shader(struct iris_screen *screen,
1406 struct iris_uncompiled_shader *,
1407 struct iris_compiled_shader *,
1408 struct hash_table *driver_ht,
1409 struct u_upload_mgr *uploader,
1410 enum iris_program_cache_id,
1411 uint32_t key_size,
1412 const void *key,
1413 const void *assembly);
1414 void iris_delete_shader_variant(struct iris_compiled_shader *shader);
1415
1416 void iris_destroy_shader_state(struct pipe_context *ctx, void *state);
1417
1418 static inline void
iris_uncompiled_shader_reference(struct pipe_context * ctx,struct iris_uncompiled_shader ** dst,struct iris_uncompiled_shader * src)1419 iris_uncompiled_shader_reference(struct pipe_context *ctx,
1420 struct iris_uncompiled_shader **dst,
1421 struct iris_uncompiled_shader *src)
1422 {
1423 if (*dst == src)
1424 return;
1425
1426 struct iris_uncompiled_shader *old_dst = *dst;
1427
1428 if (pipe_reference(old_dst != NULL ? &old_dst->ref : NULL,
1429 src != NULL ? &src->ref : NULL)) {
1430 iris_destroy_shader_state(ctx, *dst);
1431 }
1432
1433 *dst = src;
1434 }
1435
1436 static inline void
iris_shader_variant_reference(struct iris_compiled_shader ** dst,struct iris_compiled_shader * src)1437 iris_shader_variant_reference(struct iris_compiled_shader **dst,
1438 struct iris_compiled_shader *src)
1439 {
1440 struct iris_compiled_shader *old_dst = *dst;
1441
1442 if (pipe_reference(old_dst ? &old_dst->ref: NULL, src ? &src->ref : NULL))
1443 iris_delete_shader_variant(old_dst);
1444
1445 *dst = src;
1446 }
1447
1448 bool iris_blorp_lookup_shader(struct blorp_batch *blorp_batch,
1449 const void *key,
1450 uint32_t key_size,
1451 uint32_t *kernel_out,
1452 void *prog_data_out);
1453 bool iris_blorp_upload_shader(struct blorp_batch *blorp_batch, uint32_t stage,
1454 const void *key, uint32_t key_size,
1455 const void *kernel, uint32_t kernel_size,
1456 const void *prog_data,
1457 uint32_t prog_data_size,
1458 uint32_t *kernel_out,
1459 void *prog_data_out);
1460
1461 void iris_ensure_indirect_generation_shader(struct iris_batch *batch);
1462
1463
1464 /* iris_resolve.c */
1465
1466 void iris_predraw_resolve_inputs(struct iris_context *ice,
1467 struct iris_batch *batch,
1468 bool *draw_aux_buffer_disabled,
1469 gl_shader_stage stage,
1470 bool consider_framebuffer);
1471 void iris_predraw_resolve_framebuffer(struct iris_context *ice,
1472 struct iris_batch *batch,
1473 bool *draw_aux_buffer_disabled);
1474 void iris_predraw_flush_buffers(struct iris_context *ice,
1475 struct iris_batch *batch,
1476 gl_shader_stage stage);
1477 void iris_postdraw_update_resolve_tracking(struct iris_context *ice);
1478 void iris_postdraw_update_image_resolve_tracking(struct iris_context *ice,
1479 gl_shader_stage stage);
1480 int iris_get_driver_query_info(struct pipe_screen *pscreen, unsigned index,
1481 struct pipe_driver_query_info *info);
1482 int iris_get_driver_query_group_info(struct pipe_screen *pscreen,
1483 unsigned index,
1484 struct pipe_driver_query_group_info *info);
1485
1486 /* iris_state.c */
1487 void gfx9_toggle_preemption(struct iris_context *ice,
1488 struct iris_batch *batch,
1489 const struct pipe_draw_info *draw);
1490 static const bool
iris_execute_indirect_draw_supported(const struct iris_context * ice,const struct pipe_draw_indirect_info * indirect,const struct pipe_draw_info * draw)1491 iris_execute_indirect_draw_supported(const struct iris_context *ice,
1492 const struct pipe_draw_indirect_info *indirect,
1493 const struct pipe_draw_info *draw)
1494 {
1495 const struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1496 const struct iris_vs_data *vs_data =
1497 iris_vs_data(ice->shaders.prog[MESA_SHADER_VERTEX]);
1498 const size_t struct_size = draw->index_size ?
1499 sizeof(uint32_t) * 5 :
1500 sizeof(uint32_t) * 4;
1501 const bool aligned_stride =
1502 indirect && (indirect->stride == 0 || indirect->stride == struct_size);
1503
1504 return (screen->devinfo->has_indirect_unroll &&
1505 aligned_stride &&
1506 (indirect &&
1507 !indirect->count_from_stream_output) &&
1508 !(vs_data->uses_firstvertex ||
1509 vs_data->uses_baseinstance ||
1510 vs_data->uses_drawid));
1511 }
1512
1513 #ifdef genX
1514 # include "iris_genx_protos.h"
1515 #else
1516 # define genX(x) gfx8_##x
1517 # include "iris_genx_protos.h"
1518 # undef genX
1519 # define genX(x) gfx9_##x
1520 # include "iris_genx_protos.h"
1521 # undef genX
1522 # define genX(x) gfx11_##x
1523 # include "iris_genx_protos.h"
1524 # undef genX
1525 # define genX(x) gfx12_##x
1526 # include "iris_genx_protos.h"
1527 # undef genX
1528 # define genX(x) gfx125_##x
1529 # include "iris_genx_protos.h"
1530 # undef genX
1531 # define genX(x) gfx20_##x
1532 # include "iris_genx_protos.h"
1533 # undef genX
1534 # define genX(x) gfx30_##x
1535 # include "iris_genx_protos.h"
1536 # undef genX
1537 #endif
1538
1539 #endif
1540