• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2017 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * on the rights to use, copy, modify, merge, publish, distribute, sub
8  * license, and/or sell copies of the Software, and to permit persons to whom
9  * the Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21  * USE OR OTHER DEALINGS IN THE SOFTWARE.
22  */
23 #ifndef IRIS_CONTEXT_H
24 #define IRIS_CONTEXT_H
25 
26 #include "pipe/p_context.h"
27 #include "pipe/p_state.h"
28 #include "util/perf/u_trace.h"
29 #include "util/set.h"
30 #include "util/slab.h"
31 #include "util/u_debug.h"
32 #include "util/macros.h"
33 #include "util/u_threaded_context.h"
34 #include "intel/blorp/blorp.h"
35 #include "intel/dev/intel_debug.h"
36 #include "intel/common/intel_l3_config.h"
37 #include "intel/compiler/intel_shader_enums.h"
38 #include "intel/ds/intel_driver_ds.h"
39 #include "iris_batch.h"
40 #include "iris_binder.h"
41 #include "iris_fence.h"
42 #include "iris_resource.h"
43 #include "iris_screen.h"
44 
45 struct iris_bo;
46 struct iris_context;
47 struct blorp_batch;
48 struct blorp_params;
49 
50 #define IRIS_MAX_DRAW_BUFFERS 8
51 #define IRIS_MAX_SOL_BINDINGS 64
52 
53 #define IRIS_MAX_TEXTURE_BUFFER_SIZE (1 << 27)
54 /* IRIS_MAX_ABOS and IRIS_MAX_SSBOS must be the same. */
55 #define IRIS_MAX_ABOS 16
56 #define IRIS_MAX_SSBOS 16
57 #define IRIS_MAX_VIEWPORTS 16
58 #define IRIS_MAX_CLIP_PLANES 8
59 #define IRIS_MAX_GLOBAL_BINDINGS 128
60 
61 enum {
62    DRI_CONF_BO_REUSE_DISABLED,
63    DRI_CONF_BO_REUSE_ALL
64 };
65 
66 enum iris_param_domain {
67    ELK_PARAM_DOMAIN_BUILTIN = 0,
68    ELK_PARAM_DOMAIN_IMAGE,
69 };
70 
71 #define ELK_PARAM(domain, val)   (ELK_PARAM_DOMAIN_##domain << 24 | (val))
72 #define ELK_PARAM_DOMAIN(param)  ((uint32_t)(param) >> 24)
73 #define ELK_PARAM_VALUE(param)   ((uint32_t)(param) & 0x00ffffff)
74 #define ELK_PARAM_IMAGE(idx, offset) ELK_PARAM(IMAGE, ((idx) << 8) | (offset))
75 #define ELK_PARAM_IMAGE_IDX(value)   (ELK_PARAM_VALUE(value) >> 8)
76 #define ELK_PARAM_IMAGE_OFFSET(value)(ELK_PARAM_VALUE(value) & 0xf)
77 
78 /**
79  * Dirty flags.  When state changes, we flag some combination of these
80  * to indicate that particular GPU commands need to be re-emitted.
81  *
82  * Each bit typically corresponds to a single 3DSTATE_* command packet, but
83  * in rare cases they map to a group of related packets that need to be
84  * emitted together.
85  *
86  * See iris_upload_render_state().
87  */
88 #define IRIS_DIRTY_COLOR_CALC_STATE               (1ull <<  0)
89 #define IRIS_DIRTY_POLYGON_STIPPLE                (1ull <<  1)
90 #define IRIS_DIRTY_SCISSOR_RECT                   (1ull <<  2)
91 #define IRIS_DIRTY_WM_DEPTH_STENCIL               (1ull <<  3)
92 #define IRIS_DIRTY_CC_VIEWPORT                    (1ull <<  4)
93 #define IRIS_DIRTY_SF_CL_VIEWPORT                 (1ull <<  5)
94 #define IRIS_DIRTY_PS_BLEND                       (1ull <<  6)
95 #define IRIS_DIRTY_BLEND_STATE                    (1ull <<  7)
96 #define IRIS_DIRTY_RASTER                         (1ull <<  8)
97 #define IRIS_DIRTY_CLIP                           (1ull <<  9)
98 #define IRIS_DIRTY_SBE                            (1ull << 10)
99 #define IRIS_DIRTY_LINE_STIPPLE                   (1ull << 11)
100 #define IRIS_DIRTY_VERTEX_ELEMENTS                (1ull << 12)
101 #define IRIS_DIRTY_MULTISAMPLE                    (1ull << 13)
102 #define IRIS_DIRTY_VERTEX_BUFFERS                 (1ull << 14)
103 #define IRIS_DIRTY_SAMPLE_MASK                    (1ull << 15)
104 #define IRIS_DIRTY_URB                            (1ull << 16)
105 #define IRIS_DIRTY_DEPTH_BUFFER                   (1ull << 17)
106 #define IRIS_DIRTY_WM                             (1ull << 18)
107 #define IRIS_DIRTY_SO_BUFFERS                     (1ull << 19)
108 #define IRIS_DIRTY_SO_DECL_LIST                   (1ull << 20)
109 #define IRIS_DIRTY_STREAMOUT                      (1ull << 21)
110 #define IRIS_DIRTY_VF_SGVS                        (1ull << 22)
111 #define IRIS_DIRTY_VF                             (1ull << 23)
112 #define IRIS_DIRTY_VF_TOPOLOGY                    (1ull << 24)
113 #define IRIS_DIRTY_RENDER_RESOLVES_AND_FLUSHES    (1ull << 25)
114 #define IRIS_DIRTY_COMPUTE_RESOLVES_AND_FLUSHES   (1ull << 26)
115 #define IRIS_DIRTY_VF_STATISTICS                  (1ull << 27)
116 #define IRIS_DIRTY_PMA_FIX                        (1ull << 28)
117 #define IRIS_DIRTY_DEPTH_BOUNDS                   (1ull << 29)
118 #define IRIS_DIRTY_RENDER_BUFFER                  (1ull << 30)
119 #define IRIS_DIRTY_STENCIL_REF                    (1ull << 31)
120 #define IRIS_DIRTY_VERTEX_BUFFER_FLUSHES          (1ull << 32)
121 #define IRIS_DIRTY_RENDER_MISC_BUFFER_FLUSHES     (1ull << 33)
122 #define IRIS_DIRTY_COMPUTE_MISC_BUFFER_FLUSHES    (1ull << 34)
123 #define IRIS_DIRTY_VFG                            (1ull << 35)
124 #define IRIS_DIRTY_DS_WRITE_ENABLE                (1ull << 36)
125 
126 #define IRIS_ALL_DIRTY_FOR_COMPUTE (IRIS_DIRTY_COMPUTE_RESOLVES_AND_FLUSHES | \
127                                     IRIS_DIRTY_COMPUTE_MISC_BUFFER_FLUSHES)
128 
129 #define IRIS_ALL_DIRTY_FOR_RENDER (~IRIS_ALL_DIRTY_FOR_COMPUTE)
130 
131 /**
132  * Per-stage dirty flags.  When state changes, we flag some combination of
133  * these to indicate that particular GPU commands need to be re-emitted.
134  * Unlike the IRIS_DIRTY_* flags these are shader stage-specific and can be
135  * indexed by shifting the mask by the shader stage index.
136  *
137  * See iris_upload_render_state().
138  */
139 #define IRIS_STAGE_DIRTY_SAMPLER_STATES_VS        (1ull << 0)
140 #define IRIS_STAGE_DIRTY_SAMPLER_STATES_TCS       (1ull << 1)
141 #define IRIS_STAGE_DIRTY_SAMPLER_STATES_TES       (1ull << 2)
142 #define IRIS_STAGE_DIRTY_SAMPLER_STATES_GS        (1ull << 3)
143 #define IRIS_STAGE_DIRTY_SAMPLER_STATES_PS        (1ull << 4)
144 #define IRIS_STAGE_DIRTY_SAMPLER_STATES_CS        (1ull << 5)
145 #define IRIS_STAGE_DIRTY_UNCOMPILED_VS            (1ull << 6)
146 #define IRIS_STAGE_DIRTY_UNCOMPILED_TCS           (1ull << 7)
147 #define IRIS_STAGE_DIRTY_UNCOMPILED_TES           (1ull << 8)
148 #define IRIS_STAGE_DIRTY_UNCOMPILED_GS            (1ull << 9)
149 #define IRIS_STAGE_DIRTY_UNCOMPILED_FS            (1ull << 10)
150 #define IRIS_STAGE_DIRTY_UNCOMPILED_CS            (1ull << 11)
151 #define IRIS_STAGE_DIRTY_VS                       (1ull << 12)
152 #define IRIS_STAGE_DIRTY_TCS                      (1ull << 13)
153 #define IRIS_STAGE_DIRTY_TES                      (1ull << 14)
154 #define IRIS_STAGE_DIRTY_GS                       (1ull << 15)
155 #define IRIS_STAGE_DIRTY_FS                       (1ull << 16)
156 #define IRIS_STAGE_DIRTY_CS                       (1ull << 17)
157 #define IRIS_SHIFT_FOR_STAGE_DIRTY_CONSTANTS      18
158 #define IRIS_STAGE_DIRTY_CONSTANTS_VS             (1ull << 18)
159 #define IRIS_STAGE_DIRTY_CONSTANTS_TCS            (1ull << 19)
160 #define IRIS_STAGE_DIRTY_CONSTANTS_TES            (1ull << 20)
161 #define IRIS_STAGE_DIRTY_CONSTANTS_GS             (1ull << 21)
162 #define IRIS_STAGE_DIRTY_CONSTANTS_FS             (1ull << 22)
163 #define IRIS_STAGE_DIRTY_CONSTANTS_CS             (1ull << 23)
164 #define IRIS_SHIFT_FOR_STAGE_DIRTY_BINDINGS       24
165 #define IRIS_STAGE_DIRTY_BINDINGS_VS              (1ull << 24)
166 #define IRIS_STAGE_DIRTY_BINDINGS_TCS             (1ull << 25)
167 #define IRIS_STAGE_DIRTY_BINDINGS_TES             (1ull << 26)
168 #define IRIS_STAGE_DIRTY_BINDINGS_GS              (1ull << 27)
169 #define IRIS_STAGE_DIRTY_BINDINGS_FS              (1ull << 28)
170 #define IRIS_STAGE_DIRTY_BINDINGS_CS              (1ull << 29)
171 
172 #define IRIS_ALL_STAGE_DIRTY_FOR_COMPUTE (IRIS_STAGE_DIRTY_CS | \
173                                           IRIS_STAGE_DIRTY_SAMPLER_STATES_CS | \
174                                           IRIS_STAGE_DIRTY_UNCOMPILED_CS |    \
175                                           IRIS_STAGE_DIRTY_CONSTANTS_CS |     \
176                                           IRIS_STAGE_DIRTY_BINDINGS_CS)
177 
178 #define IRIS_ALL_STAGE_DIRTY_FOR_RENDER (~IRIS_ALL_STAGE_DIRTY_FOR_COMPUTE)
179 
180 #define IRIS_ALL_STAGE_DIRTY_BINDINGS_FOR_RENDER (IRIS_STAGE_DIRTY_BINDINGS_VS  | \
181                                                   IRIS_STAGE_DIRTY_BINDINGS_TCS | \
182                                                   IRIS_STAGE_DIRTY_BINDINGS_TES | \
183                                                   IRIS_STAGE_DIRTY_BINDINGS_GS  | \
184                                                   IRIS_STAGE_DIRTY_BINDINGS_FS)
185 
186 #define IRIS_ALL_STAGE_DIRTY_BINDINGS (IRIS_ALL_STAGE_DIRTY_BINDINGS_FOR_RENDER | \
187                                        IRIS_STAGE_DIRTY_BINDINGS_CS)
188 
189 /**
190  * Non-orthogonal state (NOS) dependency flags.
191  *
192  * Shader programs may depend on non-orthogonal state.  These flags are
193  * used to indicate that a shader's key depends on the state provided by
194  * a certain Gallium CSO.  Changing any CSOs marked as a dependency will
195  * cause the driver to re-compute the shader key, possibly triggering a
196  * shader recompile.
197  */
198 enum iris_nos_dep {
199    IRIS_NOS_FRAMEBUFFER,
200    IRIS_NOS_DEPTH_STENCIL_ALPHA,
201    IRIS_NOS_RASTERIZER,
202    IRIS_NOS_BLEND,
203    IRIS_NOS_LAST_VUE_MAP,
204 
205    IRIS_NOS_COUNT,
206 };
207 
208 /** @{
209  *
210  * Program cache keys for state based recompiles.
211  */
212 
213 /* Provide explicit padding for each member, to ensure that the compiler
214  * initializes every bit in the shader cache keys.  The keys will be compared
215  * with memcmp.
216  */
217 PRAGMA_DIAGNOSTIC_PUSH
218 PRAGMA_DIAGNOSTIC_ERROR(-Wpadded)
219 
220 /**
221  * Note, we need to take care to have padding explicitly declared
222  * for key since we will directly memcmp the whole struct.
223  */
224 struct iris_base_prog_key {
225    unsigned program_string_id;
226    bool limit_trig_input_range;
227    unsigned padding:24;
228 };
229 
230 struct iris_vue_prog_key {
231    struct iris_base_prog_key base;
232 
233    unsigned nr_userclip_plane_consts:4;
234    unsigned padding:28;
235 };
236 
237 struct iris_vs_prog_key {
238    struct iris_vue_prog_key vue;
239 };
240 
241 struct iris_tcs_prog_key {
242    struct iris_vue_prog_key vue;
243 
244    enum tess_primitive_mode _tes_primitive_mode;
245 
246    uint8_t input_vertices;
247 
248    bool quads_workaround;
249    unsigned padding:16;
250 
251    /** A bitfield of per-patch outputs written. */
252    uint32_t patch_outputs_written;
253 
254    /** A bitfield of per-vertex outputs written. */
255    uint64_t outputs_written;
256 };
257 
258 struct iris_tes_prog_key {
259    struct iris_vue_prog_key vue;
260 
261    /** A bitfield of per-patch inputs read. */
262    uint32_t patch_inputs_read;
263 
264    /** A bitfield of per-vertex inputs read. */
265    uint64_t inputs_read;
266 };
267 
268 struct iris_gs_prog_key {
269    struct iris_vue_prog_key vue;
270 };
271 
272 struct iris_fs_prog_key {
273    struct iris_base_prog_key base;
274 
275    uint64_t input_slots_valid;
276    uint8_t color_outputs_valid;
277 
278    unsigned nr_color_regions:5;
279    bool flat_shade:1;
280    bool alpha_test_replicate_alpha:1;
281    bool alpha_to_coverage:1;
282    bool clamp_fragment_color:1;
283    bool persample_interp:1;
284    bool multisample_fbo:1;
285    bool force_dual_color_blend:1;
286    bool coherent_fb_fetch:1;
287    uint64_t padding:43;
288 };
289 
290 struct iris_cs_prog_key {
291    struct iris_base_prog_key base;
292 };
293 
294 union iris_any_prog_key {
295    struct iris_base_prog_key base;
296    struct iris_vue_prog_key vue;
297    struct iris_vs_prog_key vs;
298    struct iris_tcs_prog_key tcs;
299    struct iris_tes_prog_key tes;
300    struct iris_gs_prog_key gs;
301    struct iris_fs_prog_key fs;
302    struct iris_cs_prog_key cs;
303 };
304 
305 /* Restore the pack alignment to default. */
306 PRAGMA_DIAGNOSTIC_POP
307 
308 /** @} */
309 
310 struct iris_ubo_range
311 {
312    uint16_t block;
313 
314    /* In units of 32-byte registers */
315    uint8_t start;
316    uint8_t length;
317 };
318 
319 struct iris_fs_data {
320    int urb_setup[VARYING_SLOT_MAX];
321    uint8_t urb_setup_attribs[VARYING_SLOT_MAX];
322    uint8_t urb_setup_attribs_count;
323 
324    uint64_t inputs;
325    unsigned num_varying_inputs;
326 
327    unsigned msaa_flags_param;
328    uint32_t flat_inputs;
329 
330    uint8_t computed_depth_mode;
331    uint8_t max_polygons;
332    uint8_t dispatch_multi;
333 
334    bool computed_stencil;
335    bool early_fragment_tests;
336    bool post_depth_coverage;
337    bool inner_coverage;
338    bool dispatch_8;
339    bool dispatch_16;
340    bool dispatch_32;
341    bool dual_src_blend;
342    bool uses_pos_offset;
343    bool uses_omask;
344    bool uses_kill;
345    bool uses_src_depth;
346    bool uses_src_w;
347    bool uses_sample_mask;
348    bool uses_vmask;
349    bool has_side_effects;
350    bool pulls_bary;
351 
352    bool uses_nonperspective_interp_modes;
353 
354    bool is_per_sample;
355 };
356 
357 struct iris_push_const_block {
358    unsigned dwords;     /* Dword count, not reg aligned */
359    unsigned regs;
360    unsigned size;       /* Bytes, register aligned */
361 };
362 
363 struct iris_cs_data {
364    struct {
365       struct iris_push_const_block cross_thread;
366       struct iris_push_const_block per_thread;
367    } push;
368 
369    unsigned local_size[3];
370    unsigned prog_offset[3];
371    unsigned prog_mask;
372 
373    uint8_t generate_local_id;
374    enum intel_compute_walk_order walk_order;
375 
376    bool uses_barrier;
377    bool first_param_is_builtin_subgroup_id;
378 };
379 
380 static inline uint32_t
iris_cs_data_prog_offset(const struct iris_cs_data * prog_data,unsigned dispatch_width)381 iris_cs_data_prog_offset(const struct iris_cs_data *prog_data,
382                          unsigned dispatch_width)
383 {
384    assert(dispatch_width == 8 ||
385           dispatch_width == 16 ||
386           dispatch_width == 32);
387    const unsigned index = dispatch_width / 16;
388    assert(prog_data->prog_mask & (1 << index));
389    return prog_data->prog_offset[index];
390 }
391 
392 struct iris_vue_data {
393    struct intel_vue_map            vue_map;
394    unsigned                        urb_read_length;
395    uint32_t                        cull_distance_mask;
396    unsigned                        urb_entry_size;
397    enum intel_shader_dispatch_mode dispatch_mode;
398    bool                            include_vue_handles;
399 };
400 
401 struct iris_vs_data {
402    struct iris_vue_data base;
403 
404    bool uses_vertexid;
405    bool uses_instanceid;
406    bool uses_firstvertex;
407    bool uses_baseinstance;
408    bool uses_drawid;
409 };
410 
411 struct iris_tcs_data {
412    struct iris_vue_data base;
413 
414    int  instances;
415    int  patch_count_threshold;
416    bool include_primitive_id;
417 };
418 
419 struct iris_tes_data {
420    struct iris_vue_data base;
421 
422    enum intel_tess_partitioning    partitioning;
423    enum intel_tess_output_topology output_topology;
424    enum intel_tess_domain          domain;
425    bool                            include_primitive_id;
426 };
427 
428 struct iris_gs_data {
429    struct iris_vue_data base;
430 
431    unsigned vertices_in;
432    unsigned output_vertex_size_hwords;
433    unsigned output_topology;
434    unsigned control_data_header_size_hwords;
435    unsigned control_data_format;
436    int      static_vertex_count;
437    int      invocations;
438    bool     include_primitive_id;
439 };
440 
441 struct iris_depth_stencil_alpha_state;
442 
443 /**
444  * Cache IDs for the in-memory program cache (ice->shaders.cache).
445  */
446 enum iris_program_cache_id {
447    IRIS_CACHE_VS  = MESA_SHADER_VERTEX,
448    IRIS_CACHE_TCS = MESA_SHADER_TESS_CTRL,
449    IRIS_CACHE_TES = MESA_SHADER_TESS_EVAL,
450    IRIS_CACHE_GS  = MESA_SHADER_GEOMETRY,
451    IRIS_CACHE_FS  = MESA_SHADER_FRAGMENT,
452    IRIS_CACHE_CS  = MESA_SHADER_COMPUTE,
453    IRIS_CACHE_BLORP,
454 };
455 
456 /** @{
457  *
458  * Defines for PIPE_CONTROL operations, which trigger cache flushes,
459  * synchronization, pipelined memory writes, and so on.
460  *
461  * The bits here are not the actual hardware values.  The actual fields
462  * move between various generations, so we just have flags for each
463  * potential operation, and use genxml to encode the actual packet.
464  */
465 enum pipe_control_flags
466 {
467    PIPE_CONTROL_FLUSH_LLC                       = (1 << 1),
468    PIPE_CONTROL_LRI_POST_SYNC_OP                = (1 << 2),
469    PIPE_CONTROL_STORE_DATA_INDEX                = (1 << 3),
470    PIPE_CONTROL_CS_STALL                        = (1 << 4),
471    PIPE_CONTROL_GLOBAL_SNAPSHOT_COUNT_RESET     = (1 << 5),
472    PIPE_CONTROL_SYNC_GFDT                       = (1 << 6),
473    PIPE_CONTROL_TLB_INVALIDATE                  = (1 << 7),
474    PIPE_CONTROL_MEDIA_STATE_CLEAR               = (1 << 8),
475    PIPE_CONTROL_WRITE_IMMEDIATE                 = (1 << 9),
476    PIPE_CONTROL_WRITE_DEPTH_COUNT               = (1 << 10),
477    PIPE_CONTROL_WRITE_TIMESTAMP                 = (1 << 11),
478    PIPE_CONTROL_DEPTH_STALL                     = (1 << 12),
479    PIPE_CONTROL_RENDER_TARGET_FLUSH             = (1 << 13),
480    PIPE_CONTROL_INSTRUCTION_INVALIDATE          = (1 << 14),
481    PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE        = (1 << 15),
482    PIPE_CONTROL_INDIRECT_STATE_POINTERS_DISABLE = (1 << 16),
483    PIPE_CONTROL_NOTIFY_ENABLE                   = (1 << 17),
484    PIPE_CONTROL_FLUSH_ENABLE                    = (1 << 18),
485    PIPE_CONTROL_DATA_CACHE_FLUSH                = (1 << 19),
486    PIPE_CONTROL_VF_CACHE_INVALIDATE             = (1 << 20),
487    PIPE_CONTROL_CONST_CACHE_INVALIDATE          = (1 << 21),
488    PIPE_CONTROL_STATE_CACHE_INVALIDATE          = (1 << 22),
489    PIPE_CONTROL_STALL_AT_SCOREBOARD             = (1 << 23),
490    PIPE_CONTROL_DEPTH_CACHE_FLUSH               = (1 << 24),
491    PIPE_CONTROL_TILE_CACHE_FLUSH                = (1 << 25), /* Not available in Gfx20+ */
492    PIPE_CONTROL_FLUSH_HDC                       = (1 << 26),
493    PIPE_CONTROL_PSS_STALL_SYNC                  = (1 << 27),
494    PIPE_CONTROL_L3_READ_ONLY_CACHE_INVALIDATE   = (1 << 28),
495    PIPE_CONTROL_UNTYPED_DATAPORT_CACHE_FLUSH    = (1 << 29),
496    PIPE_CONTROL_CCS_CACHE_FLUSH                 = (1 << 30),
497 };
498 
499 #define PIPE_CONTROL_CACHE_FLUSH_BITS \
500    (PIPE_CONTROL_DEPTH_CACHE_FLUSH |  \
501     PIPE_CONTROL_DATA_CACHE_FLUSH |   \
502     PIPE_CONTROL_TILE_CACHE_FLUSH |   \
503     PIPE_CONTROL_FLUSH_HDC | \
504     PIPE_CONTROL_UNTYPED_DATAPORT_CACHE_FLUSH |   \
505     PIPE_CONTROL_RENDER_TARGET_FLUSH)
506 
507 #define PIPE_CONTROL_CACHE_INVALIDATE_BITS  \
508    (PIPE_CONTROL_STATE_CACHE_INVALIDATE |   \
509     PIPE_CONTROL_CONST_CACHE_INVALIDATE |   \
510     PIPE_CONTROL_VF_CACHE_INVALIDATE |      \
511     PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE | \
512     PIPE_CONTROL_INSTRUCTION_INVALIDATE)
513 
514 #define PIPE_CONTROL_L3_RO_INVALIDATE_BITS       \
515    (PIPE_CONTROL_L3_READ_ONLY_CACHE_INVALIDATE | \
516     PIPE_CONTROL_CONST_CACHE_INVALIDATE)
517 
518 #define PIPE_CONTROL_GRAPHICS_BITS \
519    (PIPE_CONTROL_RENDER_TARGET_FLUSH |          \
520     PIPE_CONTROL_DEPTH_CACHE_FLUSH |            \
521     PIPE_CONTROL_TILE_CACHE_FLUSH |             \
522     PIPE_CONTROL_DEPTH_STALL |                  \
523     PIPE_CONTROL_STALL_AT_SCOREBOARD |          \
524     PIPE_CONTROL_PSS_STALL_SYNC |               \
525     PIPE_CONTROL_VF_CACHE_INVALIDATE |          \
526     PIPE_CONTROL_GLOBAL_SNAPSHOT_COUNT_RESET |  \
527     PIPE_CONTROL_L3_READ_ONLY_CACHE_INVALIDATE |\
528     PIPE_CONTROL_WRITE_DEPTH_COUNT)
529 
530 enum iris_predicate_state {
531    /* The first two states are used if we can determine whether to draw
532     * without having to look at the values in the query object buffer. This
533     * will happen if there is no conditional render in progress, if the query
534     * object is already completed or if something else has already added
535     * samples to the preliminary result.
536     */
537    IRIS_PREDICATE_STATE_RENDER,
538    IRIS_PREDICATE_STATE_DONT_RENDER,
539 
540    /* In this case whether to draw or not depends on the result of an
541     * MI_PREDICATE command so the predicate enable bit needs to be checked.
542     */
543    IRIS_PREDICATE_STATE_USE_BIT,
544 };
545 
546 /** @} */
547 
548 /**
549  * An uncompiled, API-facing shader.  This is the Gallium CSO for shaders.
550  * It primarily contains the NIR for the shader.
551  *
552  * Each API-facing shader can be compiled into multiple shader variants,
553  * based on non-orthogonal state dependencies, recorded in the shader key.
554  *
555  * See iris_compiled_shader, which represents a compiled shader variant.
556  */
557 struct iris_uncompiled_shader {
558    struct pipe_reference ref;
559 
560    /**
561     * NIR for the shader.
562     *
563     * Even for shaders that originate as TGSI, this pointer will be non-NULL.
564     */
565    struct nir_shader *nir;
566 
567    struct pipe_stream_output_info stream_output;
568 
569    /* A SHA1 of the serialized NIR for the disk cache. */
570    unsigned char nir_sha1[20];
571 
572    /* Hash value based on shader source program */
573    unsigned source_hash;
574 
575    unsigned program_id;
576 
577    /** Bitfield of (1 << IRIS_NOS_*) flags. */
578    unsigned nos;
579 
580    /** Have any shader variants been compiled yet? */
581    bool compiled_once;
582 
583    /* Whether shader uses atomic operations. */
584    bool uses_atomic_load_store;
585 
586    /** Size (in bytes) of the kernel input data */
587    unsigned kernel_input_size;
588 
589    /** Size (in bytes) of the local (shared) data passed as kernel inputs */
590    unsigned kernel_shared_size;
591 
592    /** List of iris_compiled_shader variants */
593    struct list_head variants;
594 
595    /** Lock for the variants list */
596    simple_mtx_t lock;
597 
598    /** For parallel shader compiles */
599    struct util_queue_fence ready;
600 };
601 
602 enum iris_surface_group {
603    IRIS_SURFACE_GROUP_RENDER_TARGET,
604    IRIS_SURFACE_GROUP_RENDER_TARGET_READ,
605    IRIS_SURFACE_GROUP_CS_WORK_GROUPS,
606    IRIS_SURFACE_GROUP_TEXTURE_LOW64,
607    IRIS_SURFACE_GROUP_TEXTURE_HIGH64,
608    IRIS_SURFACE_GROUP_IMAGE,
609    IRIS_SURFACE_GROUP_UBO,
610    IRIS_SURFACE_GROUP_SSBO,
611 
612    IRIS_SURFACE_GROUP_COUNT,
613 };
614 
615 enum {
616    /* Invalid value for a binding table index. */
617    IRIS_SURFACE_NOT_USED = 0xa0a0a0a0,
618 };
619 
620 struct iris_binding_table {
621    uint32_t size_bytes;
622 
623    /** Number of surfaces in each group, before compacting. */
624    uint32_t sizes[IRIS_SURFACE_GROUP_COUNT];
625 
626    /** Initial offset of each group. */
627    uint32_t offsets[IRIS_SURFACE_GROUP_COUNT];
628 
629    /** Mask of surfaces used in each group. */
630    uint64_t used_mask[IRIS_SURFACE_GROUP_COUNT];
631 
632    uint64_t samplers_used_mask;
633 };
634 
635 /**
636  * A compiled shader variant, containing a pointer to the GPU assembly,
637  * as well as program data and other packets needed by state upload.
638  *
639  * There can be several iris_compiled_shader variants per API-level shader
640  * (iris_uncompiled_shader), due to state-based recompiles (brw_*_prog_key).
641  */
642 struct iris_compiled_shader {
643    struct pipe_reference ref;
644 
645    /** Link in the iris_uncompiled_shader::variants list */
646    struct list_head link;
647 
648    /** Key for this variant (but not for BLORP programs) */
649    union iris_any_prog_key key;
650 
651    /**
652     * Is the variant fully compiled and ready?
653     *
654     * Variants are added to \c iris_uncompiled_shader::variants before
655     * compilation actually occurs.  This signals that compilation has
656     * completed.
657     */
658    struct util_queue_fence ready;
659 
660    /** Variant is ready, but compilation failed. */
661    bool compilation_failed;
662 
663    /** Reference to the uploaded assembly. */
664    struct iris_state_ref assembly;
665 
666    /** Pointer to the assembly in the BO's map. */
667    void *map;
668 
669    /** The program data (owned by the program cache hash table) */
670    struct brw_stage_prog_data *brw_prog_data;
671    struct elk_stage_prog_data *elk_prog_data;
672 
673    /** A list of system values to be uploaded as uniforms. */
674    uint32_t *system_values;
675    unsigned num_system_values;
676 
677    /** Size (in bytes) of the kernel input data */
678    unsigned kernel_input_size;
679 
680    /** Number of constbufs expected by the shader. */
681    unsigned num_cbufs;
682 
683    /**
684     * Derived 3DSTATE_STREAMOUT and 3DSTATE_SO_DECL_LIST packets
685     * (the VUE-based information for transform feedback outputs).
686     */
687    uint32_t *streamout;
688 
689    struct iris_binding_table bt;
690 
691    gl_shader_stage stage;
692 
693    /**
694     * Data derived from prog_data.
695     */
696    struct iris_ubo_range ubo_ranges[4];
697 
698    unsigned nr_params;
699    unsigned total_scratch;
700    unsigned total_shared;
701    unsigned program_size;
702    unsigned const_data_offset;
703    unsigned dispatch_grf_start_reg;
704    bool     has_ubo_pull;
705    bool     use_alt_mode;
706 
707    union {
708       struct iris_fs_data  fs;
709       struct iris_cs_data  cs;
710       struct iris_vs_data  vs;
711       struct iris_tcs_data tcs;
712       struct iris_tes_data tes;
713       struct iris_gs_data  gs;
714    };
715 
716    /**
717     * Shader packets and other data derived from prog_data.  These must be
718     * completely determined from prog_data.
719     */
720    uint8_t derived_data[0];
721 };
722 
723 static inline uint64_t
KSP(const struct iris_compiled_shader * shader)724 KSP(const struct iris_compiled_shader *shader)
725 {
726    struct iris_resource *res = (void *) shader->assembly.res;
727    return iris_bo_offset_from_base_address(res->bo) + shader->assembly.offset;
728 }
729 
730 #define DEFINE_IRIS_SHADER_DATA(TYPE, STAGE, FIELD)                      \
731 static inline TYPE *                                                     \
732 iris_ ## FIELD ## _data(struct iris_compiled_shader *shader)             \
733 {                                                                        \
734    assert(shader->stage == STAGE);                                       \
735    return &shader->FIELD;                                                \
736 }                                                                        \
737 static inline const TYPE *                                               \
738 iris_ ## FIELD ## _data_const(const struct iris_compiled_shader *shader) \
739 {                                                                        \
740    assert(shader->stage == STAGE);                                       \
741    return &shader->FIELD;                                                \
742 }
743 
DEFINE_IRIS_SHADER_DATA(struct iris_fs_data,MESA_SHADER_FRAGMENT,fs)744 DEFINE_IRIS_SHADER_DATA(struct iris_fs_data,  MESA_SHADER_FRAGMENT,  fs)
745 DEFINE_IRIS_SHADER_DATA(struct iris_cs_data,  MESA_SHADER_COMPUTE,   cs)
746 DEFINE_IRIS_SHADER_DATA(struct iris_vs_data,  MESA_SHADER_VERTEX,    vs)
747 DEFINE_IRIS_SHADER_DATA(struct iris_tcs_data, MESA_SHADER_TESS_CTRL, tcs)
748 DEFINE_IRIS_SHADER_DATA(struct iris_tes_data, MESA_SHADER_TESS_EVAL, tes)
749 DEFINE_IRIS_SHADER_DATA(struct iris_gs_data,  MESA_SHADER_GEOMETRY,  gs)
750 
751 #undef DEFINE_IRIS_SHADER_DATA
752 
753 static inline struct iris_vue_data *
754 iris_vue_data(struct iris_compiled_shader *shader)
755 {
756    switch (shader->stage) {
757    case MESA_SHADER_VERTEX:    return &shader->vs.base;
758    case MESA_SHADER_TESS_CTRL: return &shader->tcs.base;
759    case MESA_SHADER_TESS_EVAL: return &shader->tes.base;
760    case MESA_SHADER_GEOMETRY:  return &shader->gs.base;
761    default:
762       unreachable("invalid shader stage for vue prog data");
763       return NULL;
764    }
765 }
766 
767 /**
768  * API context state that is replicated per shader stage.
769  */
770 struct iris_shader_state {
771    /** Uniform Buffers */
772    struct pipe_shader_buffer constbuf[PIPE_MAX_CONSTANT_BUFFERS];
773    struct iris_state_ref constbuf_surf_state[PIPE_MAX_CONSTANT_BUFFERS];
774 
775    bool sysvals_need_upload;
776 
777    /** Shader Storage Buffers */
778    struct pipe_shader_buffer ssbo[PIPE_MAX_SHADER_BUFFERS];
779    struct iris_state_ref ssbo_surf_state[PIPE_MAX_SHADER_BUFFERS];
780 
781    /** Shader Storage Images (image load store) */
782    struct iris_image_view image[PIPE_MAX_SHADER_IMAGES];
783 
784    struct iris_state_ref sampler_table;
785    struct iris_sampler_state *samplers[IRIS_MAX_SAMPLERS];
786    struct iris_sampler_view *textures[IRIS_MAX_TEXTURES];
787 
788    /** Bitfield of which constant buffers are bound (non-null). */
789    uint32_t bound_cbufs;
790    uint32_t dirty_cbufs;
791 
792    /** Bitfield of which image views are bound (non-null). */
793    uint64_t bound_image_views;
794 
795    /** Bitfield of which sampler views are bound (non-null). */
796    BITSET_DECLARE(bound_sampler_views, IRIS_MAX_TEXTURES);
797 
798    /** Bitfield of which shader storage buffers are bound (non-null). */
799    uint32_t bound_ssbos;
800 
801    /** Bitfield of which shader storage buffers are writable. */
802    uint32_t writable_ssbos;
803 
804    /** Array of aux usages used for our shader's images in the current draw */
805    enum isl_aux_usage image_aux_usage[PIPE_MAX_SHADER_IMAGES];
806 };
807 
808 /**
809  * Gallium CSO for stream output (transform feedback) targets.
810  */
811 struct iris_stream_output_target {
812    struct pipe_stream_output_target base;
813 
814    /** Storage holding the offset where we're writing in the buffer */
815    struct iris_state_ref offset;
816 
817    /** Stride (bytes-per-vertex) during this transform feedback operation */
818    uint16_t stride;
819 
820    /** Does the next 3DSTATE_SO_BUFFER need to zero the offsets? */
821    bool zero_offset;
822 };
823 
824 enum iris_context_priority {
825    IRIS_CONTEXT_MEDIUM_PRIORITY = 0,
826    IRIS_CONTEXT_LOW_PRIORITY,
827    IRIS_CONTEXT_HIGH_PRIORITY
828 };
829 
830 /**
831  * The API context (derived from pipe_context).
832  *
833  * Most driver state is tracked here.
834  */
835 struct iris_context {
836    struct pipe_context ctx;
837    struct threaded_context *thrctx;
838 
839    /** A debug callback for KHR_debug output. */
840    struct util_debug_callback dbg;
841 
842    /** Whether the context protected (through EGL_EXT_protected_content) */
843    bool protected;
844 
845    /** Whether a banned context was already signalled */
846    bool context_reset_signaled;
847 
848    /** A device reset status callback for notifying that the GPU is hosed. */
849    struct pipe_device_reset_callback reset;
850 
851    /** A set of dmabuf resources dirtied beyond their default aux-states. */
852    struct set *dirty_dmabufs;
853 
854    /** Slab allocator for iris_transfer_map objects. */
855    struct slab_child_pool transfer_pool;
856 
857    /** Slab allocator for threaded_context's iris_transfer_map objects */
858    struct slab_child_pool transfer_pool_unsync;
859 
860    struct blorp_context blorp;
861 
862    struct iris_batch batches[IRIS_BATCH_COUNT];
863    enum iris_context_priority priority;
864    bool has_engines_context; /* i915 specific */
865 
866    struct u_upload_mgr *query_buffer_uploader;
867 
868    struct intel_ds_device ds;
869 
870    struct {
871       struct {
872          /**
873           * Either the value of BaseVertex for indexed draw calls or the value
874           * of the argument <first> for non-indexed draw calls.
875           */
876          int firstvertex;
877          int baseinstance;
878       } params;
879 
880       /**
881        * Are the above values the ones stored in the draw_params buffer?
882        * If so, we can compare them against new values to see if anything
883        * changed.  If not, we need to assume they changed.
884        */
885       bool params_valid;
886 
887       /**
888        * Resource and offset that stores draw_parameters from the indirect
889        * buffer or to the buffer that stures the previous values for non
890        * indirect draws.
891        */
892       struct iris_state_ref draw_params;
893 
894       struct {
895          /**
896           * The value of DrawID. This always comes in from it's own vertex
897           * buffer since it's not part of the indirect draw parameters.
898           */
899          int drawid;
900 
901          /**
902           * Stores if an indexed or non-indexed draw (~0/0). Useful to
903           * calculate BaseVertex as an AND of firstvertex and is_indexed_draw.
904           */
905          int is_indexed_draw;
906       } derived_params;
907 
908       /**
909        * Resource and offset used for GL_ARB_shader_draw_parameters which
910        * contains parameters that are not present in the indirect buffer as
911        * drawid and is_indexed_draw. They will go in their own vertex element.
912        */
913       struct iris_state_ref derived_draw_params;
914 
915       struct {
916          /**
917           * Generation fragment shader
918           */
919          struct iris_compiled_shader *shader;
920 
921          /**
922           * Ring buffer where to generate indirect draw commands
923           */
924          struct iris_bo *ring_bo;
925 
926          /**
927           * Allocated iris_gen_indirect_params
928           */
929          struct iris_state_ref params;
930 
931          /**
932           * Vertices used to dispatch the generated fragment shaders
933           */
934          struct iris_state_ref vertices;
935       } generation;
936    } draw;
937 
938    struct {
939       struct iris_uncompiled_shader *uncompiled[MESA_SHADER_STAGES];
940       struct iris_compiled_shader *prog[MESA_SHADER_STAGES];
941       struct iris_compiled_shader *last_vue_shader;
942       struct {
943          struct intel_urb_config cfg;
944          bool constrained;
945       } urb;
946 
947       /** Last urb emitted by the driver. */
948       struct intel_urb_config last_urb;
949 
950       /** Uploader for shader assembly from the driver thread */
951       struct u_upload_mgr *uploader_driver;
952       /** Uploader for shader assembly from the threaded context */
953       struct u_upload_mgr *uploader_unsync;
954       struct hash_table *cache;
955 
956       /** Is a GS or TES outputting points or lines? */
957       bool output_topology_is_points_or_lines;
958 
959       /**
960        * Scratch buffers for various sizes and stages.
961        *
962        * Indexed by the "Per-Thread Scratch Space" field's 4-bit encoding,
963        * and shader stage.
964        */
965       struct iris_bo *scratch_bos[1 << 4][MESA_SHADER_STAGES];
966 
967       /**
968        * Scratch buffer surface states on Gfx12.5+
969        */
970       struct iris_state_ref scratch_surfs[1 << 4];
971    } shaders;
972 
973    struct intel_perf_context *perf_ctx;
974 
975    /** Frame number for u_trace */
976    struct {
977       uint32_t begin_frame;
978       uint32_t end_frame;
979       uint64_t last_full_timestamp;
980       void    *last_compute_walker;
981    } utrace;
982 
983    /** Frame number for debug prints */
984    uint32_t frame;
985 
986    /** Track draw call count for adding GPU breakpoint on 3DPRIMITIVE */
987    uint32_t draw_call_count;
988 
989    struct {
990       uint64_t dirty;
991       uint64_t stage_dirty;
992       uint64_t stage_dirty_for_nos[IRIS_NOS_COUNT];
993 
994       unsigned num_viewports;
995       unsigned sample_mask;
996       struct iris_blend_state *cso_blend;
997       struct iris_rasterizer_state *cso_rast;
998       struct iris_depth_stencil_alpha_state *cso_zsa;
999       struct iris_vertex_element_state *cso_vertex_elements;
1000       struct pipe_blend_color blend_color;
1001       struct pipe_poly_stipple poly_stipple;
1002       struct pipe_viewport_state viewports[IRIS_MAX_VIEWPORTS];
1003       struct pipe_scissor_state scissors[IRIS_MAX_VIEWPORTS];
1004       struct pipe_stencil_ref stencil_ref;
1005       struct pipe_framebuffer_state framebuffer;
1006       struct pipe_clip_state clip_planes;
1007 
1008       float default_outer_level[4];
1009       float default_inner_level[2];
1010 
1011       /** Bitfield of which vertex buffers are bound (non-null). */
1012       uint64_t bound_vertex_buffers;
1013 
1014       uint8_t patch_vertices;
1015       bool primitive_restart;
1016       unsigned cut_index;
1017       enum mesa_prim prim_mode:8;
1018       bool prim_is_points_or_lines;
1019       uint8_t vertices_per_patch;
1020 
1021       bool window_space_position;
1022 
1023       /** The last compute group size */
1024       uint32_t last_block[3];
1025 
1026       /** The last compute grid size */
1027       uint32_t last_grid[3];
1028       /** The last compute grid dimensions */
1029       uint32_t last_grid_dim;
1030       /** Reference to the BO containing the compute grid size */
1031       struct iris_state_ref grid_size;
1032       /** Reference to the SURFACE_STATE for the compute grid resource */
1033       struct iris_state_ref grid_surf_state;
1034 
1035       /**
1036        * Array of aux usages for drawing, altered to account for any
1037        * self-dependencies from resources bound for sampling and rendering.
1038        */
1039       enum isl_aux_usage draw_aux_usage[IRIS_MAX_DRAW_BUFFERS];
1040 
1041       /** Aux usage of the fb's depth buffer (which may or may not exist). */
1042       enum isl_aux_usage hiz_usage;
1043 
1044       enum intel_urb_deref_block_size urb_deref_block_size;
1045 
1046       /** Are depth writes enabled?  (Depth buffer may or may not exist.) */
1047       bool depth_writes_enabled;
1048 
1049       /** Are stencil writes enabled?  (Stencil buffer may or may not exist.) */
1050       bool stencil_writes_enabled;
1051 
1052       /** Current/upcoming ds_write_state for Wa_18019816803. */
1053       bool ds_write_state;
1054 
1055       /** State tracking for Wa_14018912822. */
1056       bool color_blend_zero;
1057       bool alpha_blend_zero;
1058 
1059       /** State tracking for Wa_18020335297. */
1060       bool viewport_ptr_set;
1061 
1062       /** Do we have integer RT in current framebuffer state? */
1063       bool has_integer_rt;
1064 
1065       /** GenX-specific current state */
1066       struct iris_genx_state *genx;
1067 
1068       struct iris_shader_state shaders[MESA_SHADER_STAGES];
1069 
1070       /** Do vertex shader uses shader draw parameters ? */
1071       bool vs_uses_draw_params;
1072       bool vs_uses_derived_draw_params;
1073       bool vs_needs_sgvs_element;
1074 
1075       /** Do vertex shader uses edge flag ? */
1076       bool vs_needs_edge_flag;
1077 
1078       /** Do any samplers need border color?  One bit per shader stage. */
1079       uint8_t need_border_colors;
1080 
1081       /** Global resource bindings */
1082       struct pipe_resource *global_bindings[IRIS_MAX_GLOBAL_BINDINGS];
1083 
1084       struct pipe_stream_output_target *so_target[PIPE_MAX_SO_BUFFERS];
1085       bool streamout_active;
1086 
1087       bool statistics_counters_enabled;
1088 
1089       /** Current conditional rendering mode */
1090       enum iris_predicate_state predicate;
1091 
1092       /**
1093        * Query BO with a MI_PREDICATE_RESULT snapshot calculated on the
1094        * render context that needs to be uploaded to the compute context.
1095        */
1096       struct iris_bo *compute_predicate;
1097 
1098       /** Is a PIPE_QUERY_PRIMITIVES_GENERATED query active? */
1099       bool prims_generated_query_active;
1100 
1101       /** Is a PIPE_QUERY_OCCLUSION_COUNTER query active? */
1102       bool occlusion_query_active;
1103 
1104       /** 3DSTATE_STREAMOUT and 3DSTATE_SO_DECL_LIST packets */
1105       uint32_t *streamout;
1106 
1107       /** The SURFACE_STATE for a 1x1x1 null surface. */
1108       struct iris_state_ref unbound_tex;
1109 
1110       /** The SURFACE_STATE for a framebuffer-sized null surface. */
1111       struct iris_state_ref null_fb;
1112 
1113       struct u_upload_mgr *surface_uploader;
1114       struct u_upload_mgr *scratch_surface_uploader;
1115       struct u_upload_mgr *dynamic_uploader;
1116 
1117       struct iris_binder binder;
1118 
1119       /** The high 16-bits of the last VBO/index buffer addresses */
1120       uint16_t last_vbo_high_bits[33];
1121       uint16_t last_index_bo_high_bits;
1122 
1123       /**
1124        * Resources containing streamed state which our render context
1125        * currently points to.  Used to re-add these to the validation
1126        * list when we start a new batch and haven't resubmitted commands.
1127        */
1128       struct {
1129          struct pipe_resource *cc_vp;
1130          struct pipe_resource *sf_cl_vp;
1131          struct pipe_resource *color_calc;
1132          struct pipe_resource *scissor;
1133          struct pipe_resource *blend;
1134          struct pipe_resource *index_buffer;
1135          struct pipe_resource *cs_thread_ids;
1136          struct pipe_resource *cs_desc;
1137       } last_res;
1138 
1139       /** Records the size of variable-length state for INTEL_DEBUG=bat */
1140       struct hash_table_u64 *sizes;
1141 
1142       /** Last rendering scale argument provided to genX(emit_hashing_mode). */
1143       unsigned current_hash_scale;
1144 
1145       /** Resource holding the pixel pipe hashing tables. */
1146       struct pipe_resource *pixel_hashing_tables;
1147 
1148       bool use_tbimr;
1149    } state;
1150 };
1151 
1152 /**
1153  * Push constant data handed over to the indirect draw generation shader
1154  */
1155 struct iris_gen_indirect_params {
1156    /**
1157     * Address of iris_context:draw:generation:ring_bo
1158     */
1159    uint64_t generated_cmds_addr;
1160    /**
1161     * Address of indirect data to draw with
1162     */
1163    uint64_t indirect_data_addr;
1164    /**
1165     * Address inside iris_context:draw:generation:ring_bo where to draw ids
1166     */
1167    uint64_t draw_id_addr;
1168    /**
1169     * Address of the indirect count (can be null, in which case max_draw_count
1170     * is used)
1171     */
1172    uint64_t draw_count_addr;
1173    /**
1174     * Address to jump to in order to generate more draws
1175     */
1176    uint64_t gen_addr;
1177    /**
1178     * Address to jump to to end generated draws
1179     */
1180    uint64_t end_addr;
1181    /**
1182     * Stride between the indirect draw data
1183     */
1184    uint32_t indirect_data_stride;
1185    /**
1186     * Base index of the current generated draws in the ring buffer (increments
1187     * by ring_count)
1188     */
1189    uint32_t draw_base;
1190    /**
1191     * Maximum number of generated draw if draw_count_addr is null
1192     */
1193    uint32_t max_draw_count;
1194    /**
1195     * bits 0-7:   ANV_GENERATED_FLAG_*
1196     * bits 8-15:  vertex buffer mocs
1197     * bits 16-23: stride between generated commands
1198     */
1199    uint32_t flags;
1200    /**
1201     * Number of items to generate in the ring buffer
1202     */
1203    uint32_t ring_count;
1204 };
1205 
1206 #define perf_debug(dbg, ...) do {                      \
1207    if (INTEL_DEBUG(DEBUG_PERF))                        \
1208       dbg_printf(__VA_ARGS__);                         \
1209    if (unlikely(dbg))                                  \
1210       util_debug_message(dbg, PERF_INFO, __VA_ARGS__); \
1211 } while(0)
1212 
1213 struct pipe_context *
1214 iris_create_context(struct pipe_screen *screen, void *priv, unsigned flags);
1215 void iris_destroy_context(struct pipe_context *ctx);
1216 
1217 void iris_lost_context_state(struct iris_batch *batch);
1218 
1219 void iris_mark_dirty_dmabuf(struct iris_context *ice,
1220                             struct pipe_resource *res);
1221 void iris_flush_dirty_dmabufs(struct iris_context *ice);
1222 
1223 void iris_init_blit_functions(struct pipe_context *ctx);
1224 void iris_init_clear_functions(struct pipe_context *ctx);
1225 void iris_init_program_functions(struct pipe_context *ctx);
1226 void iris_init_screen_program_functions(struct pipe_screen *pscreen);
1227 void iris_init_resource_functions(struct pipe_context *ctx);
1228 void iris_init_perfquery_functions(struct pipe_context *ctx);
1229 void iris_update_compiled_shaders(struct iris_context *ice);
1230 void iris_update_compiled_compute_shader(struct iris_context *ice);
1231 void iris_fill_cs_push_const_buffer(struct iris_screen *screen,
1232                                     struct iris_compiled_shader *shader,
1233                                     unsigned threads,
1234                                     uint32_t *dst);
1235 
1236 
1237 /* iris_blit.c */
1238 #define IRIS_BLORP_RELOC_FLAGS_EXEC_OBJECT_WRITE      (1 << 2)
1239 
1240 void iris_blorp_surf_for_resource(struct isl_device *isl_dev,
1241                                   struct blorp_surf *surf,
1242                                   struct pipe_resource *p_res,
1243                                   enum isl_aux_usage aux_usage,
1244                                   unsigned level,
1245                                   bool is_render_target);
1246 void iris_copy_region(struct blorp_context *blorp,
1247                       struct iris_batch *batch,
1248                       struct pipe_resource *dst,
1249                       unsigned dst_level,
1250                       unsigned dstx, unsigned dsty, unsigned dstz,
1251                       struct pipe_resource *src,
1252                       unsigned src_level,
1253                       const struct pipe_box *src_box);
1254 
1255 static inline enum blorp_batch_flags
iris_blorp_flags_for_batch(struct iris_batch * batch)1256 iris_blorp_flags_for_batch(struct iris_batch *batch)
1257 {
1258    if (batch->name == IRIS_BATCH_COMPUTE)
1259       return BLORP_BATCH_USE_COMPUTE;
1260 
1261    if (batch->name == IRIS_BATCH_BLITTER)
1262       return BLORP_BATCH_USE_BLITTER;
1263 
1264    return 0;
1265 }
1266 
1267 /* iris_draw.c */
1268 
1269 void iris_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *info,
1270                    unsigned drawid_offset,
1271                    const struct pipe_draw_indirect_info *indirect,
1272                    const struct pipe_draw_start_count_bias *draws,
1273                    unsigned num_draws);
1274 void iris_launch_grid(struct pipe_context *, const struct pipe_grid_info *);
1275 
1276 /* iris_pipe_control.c */
1277 
1278 void iris_emit_pipe_control_flush(struct iris_batch *batch,
1279                                   const char *reason, uint32_t flags);
1280 void iris_emit_pipe_control_write(struct iris_batch *batch,
1281                                   const char *reason, uint32_t flags,
1282                                   struct iris_bo *bo, uint32_t offset,
1283                                   uint64_t imm);
1284 void iris_emit_end_of_pipe_sync(struct iris_batch *batch,
1285                                 const char *reason, uint32_t flags);
1286 void iris_emit_buffer_barrier_for(struct iris_batch *batch,
1287                                   struct iris_bo *bo,
1288                                   enum iris_domain access);
1289 void iris_flush_all_caches(struct iris_batch *batch);
1290 
1291 #define iris_handle_always_flush_cache(batch) \
1292    if (unlikely(batch->screen->driconf.always_flush_cache)) \
1293       iris_flush_all_caches(batch);
1294 
1295 void iris_init_flush_functions(struct pipe_context *ctx);
1296 
1297 /* iris_program.c */
1298 void iris_compiler_init(struct iris_screen *screen);
1299 void iris_upload_ubo_ssbo_surf_state(struct iris_context *ice,
1300                                      struct pipe_shader_buffer *buf,
1301                                      struct iris_state_ref *surf_state,
1302                                      isl_surf_usage_flags_t usage);
1303 const struct shader_info *iris_get_shader_info(const struct iris_context *ice,
1304                                                gl_shader_stage stage);
1305 struct iris_bo *iris_get_scratch_space(struct iris_context *ice,
1306                                        unsigned per_thread_scratch,
1307                                        gl_shader_stage stage);
1308 const struct iris_state_ref *iris_get_scratch_surf(struct iris_context *ice,
1309                                                    unsigned per_thread_scratch);
1310 uint32_t iris_group_index_to_bti(const struct iris_binding_table *bt,
1311                                  enum iris_surface_group group,
1312                                  uint32_t index);
1313 uint32_t iris_bti_to_group_index(const struct iris_binding_table *bt,
1314                                  enum iris_surface_group group,
1315                                  uint32_t bti);
1316 void iris_apply_brw_prog_data(struct iris_compiled_shader *shader,
1317                               struct brw_stage_prog_data *prog_data);
1318 void iris_apply_elk_prog_data(struct iris_compiled_shader *shader,
1319                               struct elk_stage_prog_data *prog_data);
1320 struct intel_cs_dispatch_info
1321 iris_get_cs_dispatch_info(const struct intel_device_info *devinfo,
1322                           const struct iris_compiled_shader *shader,
1323                           const uint32_t block[3]);
1324 unsigned
1325 iris_cs_push_const_total_size(const struct iris_compiled_shader *shader,
1326                               unsigned threads);
1327 uint32_t
1328 iris_fs_barycentric_modes(const struct iris_compiled_shader *shader,
1329                           enum intel_msaa_flags pushed_msaa_flags);
1330 bool iris_use_tcs_multi_patch(struct iris_screen *screen);
1331 bool iris_indirect_ubos_use_sampler(struct iris_screen *screen);
1332 const void *iris_get_compiler_options(struct pipe_screen *pscreen,
1333                                       enum pipe_shader_ir ir,
1334                                       enum pipe_shader_type pstage);
1335 
1336 /* iris_disk_cache.c */
1337 
1338 void iris_disk_cache_store(struct disk_cache *cache,
1339                            const struct iris_uncompiled_shader *ish,
1340                            const struct iris_compiled_shader *shader,
1341                            const void *prog_key,
1342                            uint32_t prog_key_size);
1343 bool
1344 iris_disk_cache_retrieve(struct iris_screen *screen,
1345                          struct u_upload_mgr *uploader,
1346                          struct iris_uncompiled_shader *ish,
1347                          struct iris_compiled_shader *shader,
1348                          const void *prog_key,
1349                          uint32_t prog_key_size);
1350 
1351 /* iris_program_cache.c */
1352 
1353 void iris_init_program_cache(struct iris_context *ice);
1354 void iris_destroy_program_cache(struct iris_context *ice);
1355 struct iris_compiled_shader *iris_find_cached_shader(struct iris_context *ice,
1356                                                      enum iris_program_cache_id,
1357                                                      uint32_t key_size,
1358                                                      const void *key);
1359 
1360 struct iris_compiled_shader *iris_create_shader_variant(const struct iris_screen *,
1361                                                         void *mem_ctx,
1362                                                         gl_shader_stage stage,
1363                                                         enum iris_program_cache_id cache_id,
1364                                                         uint32_t key_size,
1365                                                         const void *key);
1366 
1367 void iris_finalize_program(struct iris_compiled_shader *shader,
1368                            uint32_t *streamout,
1369                            uint32_t *system_values,
1370                            unsigned num_system_values,
1371                            unsigned kernel_input_size,
1372                            unsigned num_cbufs,
1373                            const struct iris_binding_table *bt);
1374 
1375 void iris_upload_shader(struct iris_screen *screen,
1376                         struct iris_uncompiled_shader *,
1377                         struct iris_compiled_shader *,
1378                         struct hash_table *driver_ht,
1379                         struct u_upload_mgr *uploader,
1380                         enum iris_program_cache_id,
1381                         uint32_t key_size,
1382                         const void *key,
1383                         const void *assembly);
1384 void iris_delete_shader_variant(struct iris_compiled_shader *shader);
1385 
1386 void iris_destroy_shader_state(struct pipe_context *ctx, void *state);
1387 
1388 static inline void
iris_uncompiled_shader_reference(struct pipe_context * ctx,struct iris_uncompiled_shader ** dst,struct iris_uncompiled_shader * src)1389 iris_uncompiled_shader_reference(struct pipe_context *ctx,
1390                                  struct iris_uncompiled_shader **dst,
1391                                  struct iris_uncompiled_shader *src)
1392 {
1393    if (*dst == src)
1394       return;
1395 
1396    struct iris_uncompiled_shader *old_dst = *dst;
1397 
1398    if (pipe_reference(old_dst != NULL ? &old_dst->ref : NULL,
1399                       src != NULL ? &src->ref : NULL)) {
1400       iris_destroy_shader_state(ctx, *dst);
1401    }
1402 
1403    *dst = src;
1404 }
1405 
1406 static inline void
iris_shader_variant_reference(struct iris_compiled_shader ** dst,struct iris_compiled_shader * src)1407 iris_shader_variant_reference(struct iris_compiled_shader **dst,
1408                               struct iris_compiled_shader *src)
1409 {
1410    struct iris_compiled_shader *old_dst = *dst;
1411 
1412    if (pipe_reference(old_dst ? &old_dst->ref: NULL, src ? &src->ref : NULL))
1413       iris_delete_shader_variant(old_dst);
1414 
1415    *dst = src;
1416 }
1417 
1418 bool iris_blorp_lookup_shader(struct blorp_batch *blorp_batch,
1419                               const void *key,
1420                               uint32_t key_size,
1421                               uint32_t *kernel_out,
1422                               void *prog_data_out);
1423 bool iris_blorp_upload_shader(struct blorp_batch *blorp_batch, uint32_t stage,
1424                               const void *key, uint32_t key_size,
1425                               const void *kernel, uint32_t kernel_size,
1426                               const void *prog_data,
1427                               uint32_t prog_data_size,
1428                               uint32_t *kernel_out,
1429                               void *prog_data_out);
1430 
1431 void iris_ensure_indirect_generation_shader(struct iris_batch *batch);
1432 
1433 
1434 /* iris_resolve.c */
1435 
1436 void iris_predraw_resolve_inputs(struct iris_context *ice,
1437                                  struct iris_batch *batch,
1438                                  bool *draw_aux_buffer_disabled,
1439                                  gl_shader_stage stage,
1440                                  bool consider_framebuffer);
1441 void iris_predraw_resolve_framebuffer(struct iris_context *ice,
1442                                       struct iris_batch *batch,
1443                                       bool *draw_aux_buffer_disabled);
1444 void iris_predraw_flush_buffers(struct iris_context *ice,
1445                                 struct iris_batch *batch,
1446                                 gl_shader_stage stage);
1447 void iris_postdraw_update_resolve_tracking(struct iris_context *ice);
1448 void iris_postdraw_update_image_resolve_tracking(struct iris_context *ice,
1449                                                  gl_shader_stage stage);
1450 int iris_get_driver_query_info(struct pipe_screen *pscreen, unsigned index,
1451                                struct pipe_driver_query_info *info);
1452 int iris_get_driver_query_group_info(struct pipe_screen *pscreen,
1453                                      unsigned index,
1454                                      struct pipe_driver_query_group_info *info);
1455 
1456 /* iris_state.c */
1457 void gfx9_toggle_preemption(struct iris_context *ice,
1458                             struct iris_batch *batch,
1459                             const struct pipe_draw_info *draw);
1460 static const bool
iris_execute_indirect_draw_supported(const struct iris_context * ice,const struct pipe_draw_indirect_info * indirect,const struct pipe_draw_info * draw)1461 iris_execute_indirect_draw_supported(const struct iris_context *ice,
1462                                      const struct pipe_draw_indirect_info *indirect,
1463                                      const struct pipe_draw_info *draw)
1464 {
1465    const struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1466    const struct iris_vs_data *vs_data =
1467       iris_vs_data(ice->shaders.prog[MESA_SHADER_VERTEX]);
1468    const bool is_multiview = draw->view_mask != 0;
1469    const size_t struct_size = draw->index_size ?
1470       sizeof(uint32_t) * 5 :
1471       sizeof(uint32_t) * 4;
1472    const bool aligned_stride =
1473       indirect && (indirect->stride == 0 || indirect->stride == struct_size);
1474 
1475    return (screen->devinfo->has_indirect_unroll &&
1476            aligned_stride &&
1477            (indirect &&
1478            !indirect->count_from_stream_output) &&
1479            !is_multiview &&
1480            !(vs_data->uses_firstvertex ||
1481              vs_data->uses_baseinstance ||
1482              vs_data->uses_drawid));
1483 }
1484 
1485 #ifdef genX
1486 #  include "iris_genx_protos.h"
1487 #else
1488 #  define genX(x) gfx8_##x
1489 #  include "iris_genx_protos.h"
1490 #  undef genX
1491 #  define genX(x) gfx9_##x
1492 #  include "iris_genx_protos.h"
1493 #  undef genX
1494 #  define genX(x) gfx11_##x
1495 #  include "iris_genx_protos.h"
1496 #  undef genX
1497 #  define genX(x) gfx12_##x
1498 #  include "iris_genx_protos.h"
1499 #  undef genX
1500 #  define genX(x) gfx125_##x
1501 #  include "iris_genx_protos.h"
1502 #  undef genX
1503 #  define genX(x) gfx20_##x
1504 #  include "iris_genx_protos.h"
1505 #  undef genX
1506 #endif
1507 
1508 #endif
1509