• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2014 Rob Clark <robclark@freedesktop.org>
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  *
23  * Authors:
24  *    Rob Clark <robclark@freedesktop.org>
25  */
26 
27 #ifndef IR3_SHADER_H_
28 #define IR3_SHADER_H_
29 
30 #include <stdio.h>
31 
32 #include "c11/threads.h"
33 #include "compiler/shader_enums.h"
34 #include "compiler/nir/nir.h"
35 #include "util/bitscan.h"
36 #include "util/disk_cache.h"
37 
38 #include "ir3_compiler.h"
39 
40 struct glsl_type;
41 
42 /* driver param indices: */
43 enum ir3_driver_param {
44 	/* compute shader driver params: */
45 	IR3_DP_NUM_WORK_GROUPS_X = 0,
46 	IR3_DP_NUM_WORK_GROUPS_Y = 1,
47 	IR3_DP_NUM_WORK_GROUPS_Z = 2,
48 	IR3_DP_LOCAL_GROUP_SIZE_X = 4,
49 	IR3_DP_LOCAL_GROUP_SIZE_Y = 5,
50 	IR3_DP_LOCAL_GROUP_SIZE_Z = 6,
51 	/* NOTE: gl_NumWorkGroups should be vec4 aligned because
52 	 * glDispatchComputeIndirect() needs to load these from
53 	 * the info->indirect buffer.  Keep that in mind when/if
54 	 * adding any addition CS driver params.
55 	 */
56 	IR3_DP_CS_COUNT   = 8,   /* must be aligned to vec4 */
57 
58 	/* vertex shader driver params: */
59 	IR3_DP_DRAWID = 0,
60 	IR3_DP_VTXID_BASE = 1,
61 	IR3_DP_INSTID_BASE = 2,
62 	IR3_DP_VTXCNT_MAX = 3,
63 	/* user-clip-plane components, up to 8x vec4's: */
64 	IR3_DP_UCP0_X     = 4,
65 	/* .... */
66 	IR3_DP_UCP7_W     = 35,
67 	IR3_DP_VS_COUNT   = 36   /* must be aligned to vec4 */
68 };
69 
70 #define IR3_MAX_SHADER_BUFFERS   32
71 #define IR3_MAX_SHADER_IMAGES    32
72 #define IR3_MAX_SO_BUFFERS        4
73 #define IR3_MAX_SO_STREAMS        4
74 #define IR3_MAX_SO_OUTPUTS       64
75 #define IR3_MAX_UBO_PUSH_RANGES  32
76 
77 /* mirrors SYSTEM_VALUE_BARYCENTRIC_ but starting from 0 */
78 enum ir3_bary {
79 	IJ_PERSP_PIXEL,
80 	IJ_PERSP_SAMPLE,
81 	IJ_PERSP_CENTROID,
82 	IJ_PERSP_SIZE,
83 	IJ_LINEAR_PIXEL,
84 	IJ_LINEAR_CENTROID,
85 	IJ_LINEAR_SAMPLE,
86 	IJ_COUNT,
87 };
88 
89 /**
90  * Description of a lowered UBO.
91  */
92 struct ir3_ubo_info {
93 	uint32_t block; /* Which constant block */
94 	uint16_t bindless_base; /* For bindless, which base register is used */
95 	bool bindless;
96 };
97 
98 /**
99  * Description of a range of a lowered UBO access.
100  *
101  * Drivers should not assume that there are not multiple disjoint
102  * lowered ranges of a single UBO.
103  */
104 struct ir3_ubo_range {
105 	struct ir3_ubo_info ubo;
106 	uint32_t offset; /* start offset to push in the const register file */
107 	uint32_t start, end; /* range of block that's actually used */
108 };
109 
110 struct ir3_ubo_analysis_state {
111 	struct ir3_ubo_range range[IR3_MAX_UBO_PUSH_RANGES];
112 	uint32_t num_enabled;
113 	uint32_t size;
114 	uint32_t cmdstream_size; /* for per-gen backend to stash required cmdstream size */
115 };
116 
117 /**
118  * Describes the layout of shader consts.  This includes:
119  *   + User consts + driver lowered UBO ranges
120  *   + SSBO sizes
121  *   + Image sizes/dimensions
122  *   + Driver params (ie. IR3_DP_*)
123  *   + TFBO addresses (for generations that do not have hardware streamout)
124  *   + Lowered immediates
125  *
126  * For consts needed to pass internal values to shader which may or may not
127  * be required, rather than allocating worst-case const space, we scan the
128  * shader and allocate consts as-needed:
129  *
130  *   + SSBO sizes: only needed if shader has a get_ssbo_size intrinsic
131  *     for a given SSBO
132  *
133  *   + Image dimensions: needed to calculate pixel offset, but only for
134  *     images that have a image_store intrinsic
135  *
136  * Layout of constant registers, each section aligned to vec4.  Note
137  * that pointer size (ubo, etc) changes depending on generation.
138  *
139  *    user consts
140  *    UBO addresses
141  *    SSBO sizes
142  *    if (vertex shader) {
143  *        driver params (IR3_DP_*)
144  *        if (stream_output.num_outputs > 0)
145  *           stream-out addresses
146  *    } else if (compute_shader) {
147  *        driver params (IR3_DP_*)
148  *    }
149  *    immediates
150  *
151  * Immediates go last mostly because they are inserted in the CP pass
152  * after the nir -> ir3 frontend.
153  *
154  * Note UBO size in bytes should be aligned to vec4
155  */
156 struct ir3_const_state {
157 	unsigned num_ubos;
158 	unsigned num_driver_params;   /* scalar */
159 
160 	struct {
161 		/* user const start at zero */
162 		unsigned ubo;
163 		/* NOTE that a3xx might need a section for SSBO addresses too */
164 		unsigned ssbo_sizes;
165 		unsigned image_dims;
166 		unsigned driver_param;
167 		unsigned tfbo;
168 		unsigned primitive_param;
169 		unsigned primitive_map;
170 		unsigned immediate;
171 	} offsets;
172 
173 	struct {
174 		uint32_t mask;  /* bitmask of SSBOs that have get_ssbo_size */
175 		uint32_t count; /* number of consts allocated */
176 		/* one const allocated per SSBO which has get_ssbo_size,
177 		 * ssbo_sizes.off[ssbo_id] is offset from start of ssbo_sizes
178 		 * consts:
179 		 */
180 		uint32_t off[IR3_MAX_SHADER_BUFFERS];
181 	} ssbo_size;
182 
183 	struct {
184 		uint32_t mask;  /* bitmask of images that have image_store */
185 		uint32_t count; /* number of consts allocated */
186 		/* three const allocated per image which has image_store:
187 		 *  + cpp         (bytes per pixel)
188 		 *  + pitch       (y pitch)
189 		 *  + array_pitch (z pitch)
190 		 */
191 		uint32_t off[IR3_MAX_SHADER_IMAGES];
192 	} image_dims;
193 
194 	unsigned immediates_count;
195 	unsigned immediates_size;
196 	uint32_t *immediates;
197 
198 	/* State of ubo access lowered to push consts: */
199 	struct ir3_ubo_analysis_state ubo_state;
200 };
201 
202 /**
203  * A single output for vertex transform feedback.
204  */
205 struct ir3_stream_output {
206 	unsigned register_index:6;  /**< 0 to 63 (OUT index) */
207 	unsigned start_component:2; /** 0 to 3 */
208 	unsigned num_components:3;  /** 1 to 4 */
209 	unsigned output_buffer:3;   /**< 0 to PIPE_MAX_SO_BUFFERS */
210 	unsigned dst_offset:16;     /**< offset into the buffer in dwords */
211 	unsigned stream:2;          /**< 0 to 3 */
212 };
213 
214 /**
215  * Stream output for vertex transform feedback.
216  */
217 struct ir3_stream_output_info {
218 	unsigned num_outputs;
219 	/** stride for an entire vertex for each buffer in dwords */
220 	uint16_t stride[IR3_MAX_SO_BUFFERS];
221 
222 	/* These correspond to the VPC_SO_STREAM_CNTL fields */
223 	uint8_t streams_written;
224 	uint8_t buffer_to_stream[IR3_MAX_SO_BUFFERS];
225 
226 	/**
227 	 * Array of stream outputs, in the order they are to be written in.
228 	 * Selected components are tightly packed into the output buffer.
229 	 */
230 	struct ir3_stream_output output[IR3_MAX_SO_OUTPUTS];
231 };
232 
233 
234 /**
235  * Starting from a4xx, HW supports pre-dispatching texture sampling
236  * instructions prior to scheduling a shader stage, when the
237  * coordinate maps exactly to an output of the previous stage.
238  */
239 
240 /**
241  * There is a limit in the number of pre-dispatches allowed for any
242  * given stage.
243  */
244 #define IR3_MAX_SAMPLER_PREFETCH 4
245 
246 /**
247  * This is the output stream value for 'cmd', as used by blob. It may
248  * encode the return type (in 3 bits) but it hasn't been verified yet.
249  */
250 #define IR3_SAMPLER_PREFETCH_CMD 0x4
251 #define IR3_SAMPLER_BINDLESS_PREFETCH_CMD 0x6
252 
253 /**
254  * Stream output for texture sampling pre-dispatches.
255  */
256 struct ir3_sampler_prefetch {
257 	uint8_t src;
258 	uint8_t samp_id;
259 	uint8_t tex_id;
260 	uint16_t samp_bindless_id;
261 	uint16_t tex_bindless_id;
262 	uint8_t dst;
263 	uint8_t wrmask;
264 	uint8_t half_precision;
265 	uint8_t cmd;
266 };
267 
268 
269 /* Configuration key used to identify a shader variant.. different
270  * shader variants can be used to implement features not supported
271  * in hw (two sided color), binning-pass vertex shader, etc.
272  *
273  * When adding to this struct, please update ir3_shader_variant()'s debug
274  * output.
275  */
276 struct ir3_shader_key {
277 	union {
278 		struct {
279 			/*
280 			 * Combined Vertex/Fragment shader parameters:
281 			 */
282 			unsigned ucp_enables : 8;
283 
284 			/* do we need to check {v,f}saturate_{s,t,r}? */
285 			unsigned has_per_samp : 1;
286 
287 			/*
288 			 * Vertex shader variant parameters:
289 			 */
290 			unsigned vclamp_color : 1;
291 
292 			/*
293 			 * Fragment shader variant parameters:
294 			 */
295 			unsigned sample_shading : 1;
296 			unsigned msaa           : 1;
297 			unsigned color_two_side : 1;
298 			/* used when shader needs to handle flat varyings (a4xx)
299 			 * for front/back color inputs to frag shader:
300 			 */
301 			unsigned rasterflat : 1;
302 			unsigned fclamp_color : 1;
303 
304 			/* Indicates that this is a tessellation pipeline which requires a
305 			 * whole different kind of vertex shader.  In case of
306 			 * tessellation, this field also tells us which kind of output
307 			 * topology the TES uses, which the TCS needs to know.
308 			 */
309 #define IR3_TESS_NONE		0
310 #define IR3_TESS_TRIANGLES	1
311 #define IR3_TESS_QUADS		2
312 #define IR3_TESS_ISOLINES	3
313 			unsigned tessellation : 2;
314 
315 			unsigned has_gs : 1;
316 
317 			/* Whether this variant sticks to the "safe" maximum constlen,
318 			 * which guarantees that the combined stages will never go over
319 			 * the limit:
320 			 */
321 			unsigned safe_constlen : 1;
322 
323 			/* Whether gl_Layer must be forced to 0 because it isn't written. */
324 			unsigned layer_zero : 1;
325 
326 			/* Whether gl_ViewportIndex must be forced to 0 because it isn't written. */
327 			unsigned view_zero : 1;
328 		};
329 		uint32_t global;
330 	};
331 
332 	/* bitmask of sampler which needs coords clamped for vertex
333 	 * shader:
334 	 */
335 	uint16_t vsaturate_s, vsaturate_t, vsaturate_r;
336 
337 	/* bitmask of sampler which needs coords clamped for frag
338 	 * shader:
339 	 */
340 	uint16_t fsaturate_s, fsaturate_t, fsaturate_r;
341 
342 	/* bitmask of ms shifts */
343 	uint32_t vsamples, fsamples;
344 
345 	/* bitmask of samplers which need astc srgb workaround: */
346 	uint16_t vastc_srgb, fastc_srgb;
347 };
348 
349 static inline unsigned
ir3_tess_mode(unsigned gl_tess_mode)350 ir3_tess_mode(unsigned gl_tess_mode)
351 {
352 	switch (gl_tess_mode) {
353 	case GL_ISOLINES:
354 		return  IR3_TESS_ISOLINES;
355 	case GL_TRIANGLES:
356 		return IR3_TESS_TRIANGLES;
357 	case GL_QUADS:
358 		return IR3_TESS_QUADS;
359 	default:
360 		unreachable("bad tessmode");
361 	}
362 }
363 
364 static inline bool
ir3_shader_key_equal(const struct ir3_shader_key * a,const struct ir3_shader_key * b)365 ir3_shader_key_equal(const struct ir3_shader_key *a, const struct ir3_shader_key *b)
366 {
367 	/* slow-path if we need to check {v,f}saturate_{s,t,r} */
368 	if (a->has_per_samp || b->has_per_samp)
369 		return memcmp(a, b, sizeof(struct ir3_shader_key)) == 0;
370 	return a->global == b->global;
371 }
372 
373 /* will the two keys produce different lowering for a fragment shader? */
374 static inline bool
ir3_shader_key_changes_fs(struct ir3_shader_key * key,struct ir3_shader_key * last_key)375 ir3_shader_key_changes_fs(struct ir3_shader_key *key, struct ir3_shader_key *last_key)
376 {
377 	if (last_key->has_per_samp || key->has_per_samp) {
378 		if ((last_key->fsaturate_s != key->fsaturate_s) ||
379 				(last_key->fsaturate_t != key->fsaturate_t) ||
380 				(last_key->fsaturate_r != key->fsaturate_r) ||
381 				(last_key->fsamples != key->fsamples) ||
382 				(last_key->fastc_srgb != key->fastc_srgb))
383 			return true;
384 	}
385 
386 	if (last_key->fclamp_color != key->fclamp_color)
387 		return true;
388 
389 	if (last_key->color_two_side != key->color_two_side)
390 		return true;
391 
392 	if (last_key->rasterflat != key->rasterflat)
393 		return true;
394 
395 	if (last_key->layer_zero != key->layer_zero)
396 		return true;
397 
398 	if (last_key->ucp_enables != key->ucp_enables)
399 		return true;
400 
401 	if (last_key->safe_constlen != key->safe_constlen)
402 		return true;
403 
404 	return false;
405 }
406 
407 /* will the two keys produce different lowering for a vertex shader? */
408 static inline bool
ir3_shader_key_changes_vs(struct ir3_shader_key * key,struct ir3_shader_key * last_key)409 ir3_shader_key_changes_vs(struct ir3_shader_key *key, struct ir3_shader_key *last_key)
410 {
411 	if (last_key->has_per_samp || key->has_per_samp) {
412 		if ((last_key->vsaturate_s != key->vsaturate_s) ||
413 				(last_key->vsaturate_t != key->vsaturate_t) ||
414 				(last_key->vsaturate_r != key->vsaturate_r) ||
415 				(last_key->vsamples != key->vsamples) ||
416 				(last_key->vastc_srgb != key->vastc_srgb))
417 			return true;
418 	}
419 
420 	if (last_key->vclamp_color != key->vclamp_color)
421 		return true;
422 
423 	if (last_key->ucp_enables != key->ucp_enables)
424 		return true;
425 
426 	if (last_key->safe_constlen != key->safe_constlen)
427 		return true;
428 
429 	return false;
430 }
431 
432 /**
433  * On a4xx+a5xx, Images share state with textures and SSBOs:
434  *
435  *   + Uses texture (cat5) state/instruction (isam) to read
436  *   + Uses SSBO state and instructions (cat6) to write and for atomics
437  *
438  * Starting with a6xx, Images and SSBOs are basically the same thing,
439  * with texture state and isam also used for SSBO reads.
440  *
441  * On top of that, gallium makes the SSBO (shader_buffers) state semi
442  * sparse, with the first half of the state space used for atomic
443  * counters lowered to atomic buffers.  We could ignore this, but I
444  * don't think we could *really* handle the case of a single shader
445  * that used the max # of textures + images + SSBOs.  And once we are
446  * offsetting images by num_ssbos (or visa versa) to map them into
447  * the same hardware state, the hardware state has become coupled to
448  * the shader state, so at this point we might as well just use a
449  * mapping table to remap things from image/SSBO idx to hw idx.
450  *
451  * To make things less (more?) confusing, for the hw "SSBO" state
452  * (since it is really both SSBO and Image) I'll use the name "IBO"
453  */
454 struct ir3_ibo_mapping {
455 #define IBO_INVALID 0xff
456 	/* Maps logical SSBO state to hw tex state: */
457 	uint8_t ssbo_to_tex[IR3_MAX_SHADER_BUFFERS];
458 
459 	/* Maps logical Image state to hw tex state: */
460 	uint8_t image_to_tex[IR3_MAX_SHADER_IMAGES];
461 
462 	/* Maps hw state back to logical SSBO or Image state:
463 	 *
464 	 * note IBO_SSBO ORd into values to indicate that the
465 	 * hw slot is used for SSBO state vs Image state.
466 	 */
467 #define IBO_SSBO    0x80
468 	uint8_t tex_to_image[32];
469 
470 	uint8_t num_tex;    /* including real textures */
471 	uint8_t tex_base;   /* the number of real textures, ie. image/ssbo start here */
472 };
473 
474 /* Represents half register in regid */
475 #define HALF_REG_ID    0x100
476 
477 /**
478  * Shader variant which contains the actual hw shader instructions,
479  * and necessary info for shader state setup.
480  */
481 struct ir3_shader_variant {
482 	struct fd_bo *bo;
483 
484 	/* variant id (for debug) */
485 	uint32_t id;
486 
487 	struct ir3_shader_key key;
488 
489 	/* vertex shaders can have an extra version for hwbinning pass,
490 	 * which is pointed to by so->binning:
491 	 */
492 	bool binning_pass;
493 //	union {
494 		struct ir3_shader_variant *binning;
495 		struct ir3_shader_variant *nonbinning;
496 //	};
497 
498 	struct ir3 *ir;     /* freed after assembling machine instructions */
499 
500 	/* shader variants form a linked list: */
501 	struct ir3_shader_variant *next;
502 
503 	/* replicated here to avoid passing extra ptrs everywhere: */
504 	gl_shader_stage type;
505 	struct ir3_shader *shader;
506 
507 	/*
508 	 * Below here is serialized when written to disk cache:
509 	 */
510 
511 	/* The actual binary shader instructions, size given by info.sizedwords: */
512 	uint32_t *bin;
513 
514 	struct ir3_const_state *const_state;
515 
516 	/*
517 	 * The following macros are used by the shader disk cache save/
518 	 * restore paths to serialize/deserialize the variant.  Any
519 	 * pointers that require special handling in store_variant()
520 	 * and retrieve_variant() should go above here.
521 	 */
522 #define VARIANT_CACHE_START    offsetof(struct ir3_shader_variant, info)
523 #define VARIANT_CACHE_PTR(v)   (((char *)v) + VARIANT_CACHE_START)
524 #define VARIANT_CACHE_SIZE     (sizeof(struct ir3_shader_variant) - VARIANT_CACHE_START)
525 
526 	struct ir3_info info;
527 
528 	/* Levels of nesting of flow control:
529 	 */
530 	unsigned branchstack;
531 
532 	unsigned max_sun;
533 	unsigned loops;
534 
535 	/* the instructions length is in units of instruction groups
536 	 * (4 instructions for a3xx, 16 instructions for a4xx.. each
537 	 * instruction is 2 dwords):
538 	 */
539 	unsigned instrlen;
540 
541 	/* the constants length is in units of vec4's, and is the sum of
542 	 * the uniforms and the built-in compiler constants
543 	 */
544 	unsigned constlen;
545 
546 	/* About Linkage:
547 	 *   + Let the frag shader determine the position/compmask for the
548 	 *     varyings, since it is the place where we know if the varying
549 	 *     is actually used, and if so, which components are used.  So
550 	 *     what the hw calls "outloc" is taken from the "inloc" of the
551 	 *     frag shader.
552 	 *   + From the vert shader, we only need the output regid
553 	 */
554 
555 	bool frag_face, color0_mrt;
556 	uint8_t fragcoord_compmask;
557 
558 	/* NOTE: for input/outputs, slot is:
559 	 *   gl_vert_attrib  - for VS inputs
560 	 *   gl_varying_slot - for VS output / FS input
561 	 *   gl_frag_result  - for FS output
562 	 */
563 
564 	/* varyings/outputs: */
565 	unsigned outputs_count;
566 	struct {
567 		uint8_t slot;
568 		uint8_t regid;
569 		uint8_t view;
570 		bool    half : 1;
571 	} outputs[32 + 2];  /* +POSITION +PSIZE */
572 	bool writes_pos, writes_smask, writes_psize, writes_stencilref;
573 
574 	/* Size in dwords of all outputs for VS, size of entire patch for HS. */
575 	uint32_t output_size;
576 
577 	/* Expected size of incoming output_loc for HS, DS, and GS */
578 	uint32_t input_size;
579 
580 	/* Map from location to offset in per-primitive storage. In dwords for
581 	 * HS, where varyings are read in the next stage via ldg with a dword
582 	 * offset, and in bytes for all other stages.
583 	 */
584 	unsigned output_loc[32 + 4]; /* +POSITION +PSIZE +CLIP_DIST0 +CLIP_DIST1 */
585 
586 	/* attributes (VS) / varyings (FS):
587 	 * Note that sysval's should come *after* normal inputs.
588 	 */
589 	unsigned inputs_count;
590 	struct {
591 		uint8_t slot;
592 		uint8_t regid;
593 		uint8_t compmask;
594 		/* location of input (ie. offset passed to bary.f, etc).  This
595 		 * matches the SP_VS_VPC_DST_REG.OUTLOCn value (a3xx and a4xx
596 		 * have the OUTLOCn value offset by 8, presumably to account
597 		 * for gl_Position/gl_PointSize)
598 		 */
599 		uint8_t inloc;
600 		/* vertex shader specific: */
601 		bool    sysval     : 1;   /* slot is a gl_system_value */
602 		/* fragment shader specific: */
603 		bool    bary       : 1;   /* fetched varying (vs one loaded into reg) */
604 		bool    rasterflat : 1;   /* special handling for emit->rasterflat */
605 		bool    half       : 1;
606 		bool    flat       : 1;
607 	} inputs[32 + 2];  /* +POSITION +FACE */
608 
609 	/* sum of input components (scalar).  For frag shaders, it only counts
610 	 * the varying inputs:
611 	 */
612 	unsigned total_in;
613 
614 	/* For frag shaders, the total number of inputs (not scalar,
615 	 * ie. SP_VS_PARAM_REG.TOTALVSOUTVAR)
616 	 */
617 	unsigned varying_in;
618 
619 	/* Remapping table to map Image and SSBO to hw state: */
620 	struct ir3_ibo_mapping image_mapping;
621 
622 	/* number of samplers/textures (which are currently 1:1): */
623 	int num_samp;
624 
625 	/* is there an implicit sampler to read framebuffer (FS only).. if
626 	 * so the sampler-idx is 'num_samp - 1' (ie. it is appended after
627 	 * the last "real" texture)
628 	 */
629 	bool fb_read;
630 
631 	/* do we have one or more SSBO instructions: */
632 	bool has_ssbo;
633 
634 	/* Which bindless resources are used, for filling out sp_xs_config */
635 	bool bindless_tex;
636 	bool bindless_samp;
637 	bool bindless_ibo;
638 	bool bindless_ubo;
639 
640 	/* do we need derivatives: */
641 	bool need_pixlod;
642 
643 	bool need_fine_derivatives;
644 
645 	/* do we have image write, etc (which prevents early-z): */
646 	bool no_earlyz;
647 
648 	/* do we have kill, which also prevents early-z, but not necessarily
649 	 * early-lrz (as long as lrz-write is disabled, which must be handled
650 	 * outside of ir3.  Unlike other no_earlyz cases, kill doesn't have
651 	 * side effects that prevent early-lrz discard.
652 	 */
653 	bool has_kill;
654 
655 	bool per_samp;
656 
657 	/* Are we using split or merged register file? */
658 	bool mergedregs;
659 
660 	uint8_t clip_mask, cull_mask;
661 
662 	/* for astc srgb workaround, the number/base of additional
663 	 * alpha tex states we need, and index of original tex states
664 	 */
665 	struct {
666 		unsigned base, count;
667 		unsigned orig_idx[16];
668 	} astc_srgb;
669 
670 	/* texture sampler pre-dispatches */
671 	uint32_t num_sampler_prefetch;
672 	struct ir3_sampler_prefetch sampler_prefetch[IR3_MAX_SAMPLER_PREFETCH];
673 };
674 
675 static inline const char *
ir3_shader_stage(struct ir3_shader_variant * v)676 ir3_shader_stage(struct ir3_shader_variant *v)
677 {
678 	switch (v->type) {
679 	case MESA_SHADER_VERTEX:     return v->binning_pass ? "BVERT" : "VERT";
680 	case MESA_SHADER_TESS_CTRL:  return "TCS";
681 	case MESA_SHADER_TESS_EVAL:  return "TES";
682 	case MESA_SHADER_GEOMETRY:   return "GEOM";
683 	case MESA_SHADER_FRAGMENT:   return "FRAG";
684 	case MESA_SHADER_COMPUTE:    return "CL";
685 	default:
686 		unreachable("invalid type");
687 		return NULL;
688 	}
689 }
690 
691 /* Currently we do not do binning for tess.  And for GS there is no
692  * cross-stage VS+GS optimization, so the full VS+GS is used in
693  * the binning pass.
694  */
695 static inline bool
ir3_has_binning_vs(const struct ir3_shader_key * key)696 ir3_has_binning_vs(const struct ir3_shader_key *key)
697 {
698 	if (key->tessellation || key->has_gs)
699 		return false;
700 	return true;
701 }
702 
703 /**
704  * Represents a shader at the API level, before state-specific variants are
705  * generated.
706  */
707 struct ir3_shader {
708 	gl_shader_stage type;
709 
710 	/* shader id (for debug): */
711 	uint32_t id;
712 	uint32_t variant_count;
713 
714 	/* Set by freedreno after shader_state_create, so we can emit debug info
715 	 * when recompiling a shader at draw time.
716 	 */
717 	bool initial_variants_done;
718 
719 	struct ir3_compiler *compiler;
720 
721 	unsigned num_reserved_user_consts;
722 
723 	bool nir_finalized;
724 	struct nir_shader *nir;
725 	struct ir3_stream_output_info stream_output;
726 
727 	struct ir3_shader_variant *variants;
728 	mtx_t variants_lock;
729 
730 	cache_key cache_key;     /* shader disk-cache key */
731 
732 	/* Bitmask of bits of the shader key used by this shader.  Used to avoid
733 	 * recompiles for GL NOS that doesn't actually apply to the shader.
734 	 */
735 	struct ir3_shader_key key_mask;
736 };
737 
738 /**
739  * In order to use the same cmdstream, in particular constlen setup and const
740  * emit, for both binning and draw pass (a6xx+), the binning pass re-uses it's
741  * corresponding draw pass shaders const_state.
742  */
743 static inline struct ir3_const_state *
ir3_const_state(const struct ir3_shader_variant * v)744 ir3_const_state(const struct ir3_shader_variant *v)
745 {
746 	if (v->binning_pass)
747 		return v->nonbinning->const_state;
748 	return v->const_state;
749 }
750 
751 /* Given a variant, calculate the maximum constlen it can have.
752  */
753 
754 static inline unsigned
ir3_max_const(const struct ir3_shader_variant * v)755 ir3_max_const(const struct ir3_shader_variant *v)
756 {
757 	const struct ir3_compiler *compiler = v->shader->compiler;
758 
759 	if (v->shader->type == MESA_SHADER_COMPUTE) {
760 		return compiler->max_const_compute;
761 	} else if (v->key.safe_constlen) {
762 		return compiler->max_const_safe;
763 	} else if (v->shader->type == MESA_SHADER_FRAGMENT) {
764 		return compiler->max_const_frag;
765 	} else {
766 		return compiler->max_const_geom;
767 	}
768 }
769 
770 void * ir3_shader_assemble(struct ir3_shader_variant *v);
771 struct ir3_shader_variant * ir3_shader_get_variant(struct ir3_shader *shader,
772 		const struct ir3_shader_key *key, bool binning_pass, bool *created);
773 struct ir3_shader * ir3_shader_from_nir(struct ir3_compiler *compiler, nir_shader *nir,
774 		unsigned reserved_user_consts, struct ir3_stream_output_info *stream_output);
775 uint32_t ir3_trim_constlen(struct ir3_shader_variant **variants,
776 		const struct ir3_compiler *compiler);
777 void ir3_shader_destroy(struct ir3_shader *shader);
778 void ir3_shader_disasm(struct ir3_shader_variant *so, uint32_t *bin, FILE *out);
779 uint64_t ir3_shader_outputs(const struct ir3_shader *so);
780 
781 int
782 ir3_glsl_type_size(const struct glsl_type *type, bool bindless);
783 
784 /*
785  * Helper/util:
786  */
787 
788 /* clears shader-key flags which don't apply to the given shader.
789  */
790 static inline void
ir3_key_clear_unused(struct ir3_shader_key * key,struct ir3_shader * shader)791 ir3_key_clear_unused(struct ir3_shader_key *key, struct ir3_shader *shader)
792 {
793 	uint32_t *key_bits = (uint32_t *)key;
794 	uint32_t *key_mask = (uint32_t *)&shader->key_mask;
795 	STATIC_ASSERT(sizeof(*key) % 4 == 0);
796 	for (int i = 0; i < sizeof(*key) >> 2; i++)
797 		key_bits[i] &= key_mask[i];
798 }
799 
800 static inline int
ir3_find_output(const struct ir3_shader_variant * so,gl_varying_slot slot)801 ir3_find_output(const struct ir3_shader_variant *so, gl_varying_slot slot)
802 {
803 	int j;
804 
805 	for (j = 0; j < so->outputs_count; j++)
806 		if (so->outputs[j].slot == slot)
807 			return j;
808 
809 	/* it seems optional to have a OUT.BCOLOR[n] for each OUT.COLOR[n]
810 	 * in the vertex shader.. but the fragment shader doesn't know this
811 	 * so  it will always have both IN.COLOR[n] and IN.BCOLOR[n].  So
812 	 * at link time if there is no matching OUT.BCOLOR[n], we must map
813 	 * OUT.COLOR[n] to IN.BCOLOR[n].  And visa versa if there is only
814 	 * a OUT.BCOLOR[n] but no matching OUT.COLOR[n]
815 	 */
816 	if (slot == VARYING_SLOT_BFC0) {
817 		slot = VARYING_SLOT_COL0;
818 	} else if (slot == VARYING_SLOT_BFC1) {
819 		slot = VARYING_SLOT_COL1;
820 	} else if (slot == VARYING_SLOT_COL0) {
821 		slot = VARYING_SLOT_BFC0;
822 	} else if (slot == VARYING_SLOT_COL1) {
823 		slot = VARYING_SLOT_BFC1;
824 	} else {
825 		return -1;
826 	}
827 
828 	for (j = 0; j < so->outputs_count; j++)
829 		if (so->outputs[j].slot == slot)
830 			return j;
831 
832 	debug_assert(0);
833 
834 	return -1;
835 }
836 
837 static inline int
ir3_next_varying(const struct ir3_shader_variant * so,int i)838 ir3_next_varying(const struct ir3_shader_variant *so, int i)
839 {
840 	while (++i < so->inputs_count)
841 		if (so->inputs[i].compmask && so->inputs[i].bary)
842 			break;
843 	return i;
844 }
845 
846 struct ir3_shader_linkage {
847 	/* Maximum location either consumed by the fragment shader or produced by
848 	 * the last geometry stage, i.e. the size required for each vertex in the
849 	 * VPC in DWORD's.
850 	 */
851 	uint8_t max_loc;
852 
853 	/* Number of entries in var. */
854 	uint8_t cnt;
855 
856 	/* Bitset of locations used, including ones which are only used by the FS.
857 	 */
858 	uint32_t varmask[4];
859 
860 	/* Map from VS output to location. */
861 	struct {
862 		uint8_t regid;
863 		uint8_t compmask;
864 		uint8_t loc;
865 	} var[32];
866 
867 	/* location for fixed-function gl_PrimitiveID passthrough */
868 	uint8_t primid_loc;
869 
870 	/* location for fixed-function gl_ViewIndex passthrough */
871 	uint8_t viewid_loc;
872 
873 	/* location for combined clip/cull distance arrays */
874 	uint8_t clip0_loc, clip1_loc;
875 };
876 
877 static inline void
ir3_link_add(struct ir3_shader_linkage * l,uint8_t regid_,uint8_t compmask,uint8_t loc)878 ir3_link_add(struct ir3_shader_linkage *l, uint8_t regid_, uint8_t compmask, uint8_t loc)
879 {
880 	for (int j = 0; j < util_last_bit(compmask); j++) {
881 		uint8_t comploc = loc + j;
882 		l->varmask[comploc / 32] |= 1 << (comploc % 32);
883 	}
884 
885 	l->max_loc = MAX2(l->max_loc, loc + util_last_bit(compmask));
886 
887 	if (regid_ != regid(63, 0)) {
888 		int i = l->cnt++;
889 		debug_assert(i < ARRAY_SIZE(l->var));
890 
891 		l->var[i].regid    = regid_;
892 		l->var[i].compmask = compmask;
893 		l->var[i].loc      = loc;
894 	}
895 }
896 
897 static inline void
ir3_link_shaders(struct ir3_shader_linkage * l,const struct ir3_shader_variant * vs,const struct ir3_shader_variant * fs,bool pack_vs_out)898 ir3_link_shaders(struct ir3_shader_linkage *l,
899 		const struct ir3_shader_variant *vs,
900 		const struct ir3_shader_variant *fs,
901 		bool pack_vs_out)
902 {
903 	/* On older platforms, varmask isn't programmed at all, and it appears
904 	 * that the hardware generates a mask of used VPC locations using the VS
905 	 * output map, and hangs if a FS bary instruction references a location
906 	 * not in the list. This means that we need to have a dummy entry in the
907 	 * VS out map for things like gl_PointCoord which aren't written by the
908 	 * VS. Furthermore we can't use r63.x, so just pick a random register to
909 	 * use if there is no VS output.
910 	 */
911 	const unsigned default_regid = pack_vs_out ? regid(63, 0) : regid(0, 0);
912 	int j = -1, k;
913 
914 	l->primid_loc = 0xff;
915 	l->viewid_loc = 0xff;
916 	l->clip0_loc = 0xff;
917 	l->clip1_loc = 0xff;
918 
919 	while (l->cnt < ARRAY_SIZE(l->var)) {
920 		j = ir3_next_varying(fs, j);
921 
922 		if (j >= fs->inputs_count)
923 			break;
924 
925 		if (fs->inputs[j].inloc >= fs->total_in)
926 			continue;
927 
928 		k = ir3_find_output(vs, fs->inputs[j].slot);
929 
930 		if (k < 0 && fs->inputs[j].slot == VARYING_SLOT_PRIMITIVE_ID) {
931 			l->primid_loc = fs->inputs[j].inloc;
932 		}
933 
934 		if (fs->inputs[j].slot == VARYING_SLOT_VIEW_INDEX) {
935 			assert(k < 0);
936 			l->viewid_loc = fs->inputs[j].inloc;
937 		}
938 
939 		if (fs->inputs[j].slot == VARYING_SLOT_CLIP_DIST0)
940 			l->clip0_loc = fs->inputs[j].inloc;
941 
942 		if (fs->inputs[j].slot == VARYING_SLOT_CLIP_DIST1)
943 			l->clip1_loc = fs->inputs[j].inloc;
944 
945 		ir3_link_add(l, k >= 0 ? vs->outputs[k].regid : default_regid,
946 			fs->inputs[j].compmask, fs->inputs[j].inloc);
947 	}
948 }
949 
950 static inline uint32_t
ir3_find_output_regid(const struct ir3_shader_variant * so,unsigned slot)951 ir3_find_output_regid(const struct ir3_shader_variant *so, unsigned slot)
952 {
953 	int j;
954 	for (j = 0; j < so->outputs_count; j++)
955 		if (so->outputs[j].slot == slot) {
956 			uint32_t regid = so->outputs[j].regid;
957 			if (so->outputs[j].half)
958 				regid |= HALF_REG_ID;
959 			return regid;
960 		}
961 	return regid(63, 0);
962 }
963 
964 #define VARYING_SLOT_GS_HEADER_IR3			(VARYING_SLOT_MAX + 0)
965 #define VARYING_SLOT_GS_VERTEX_FLAGS_IR3	(VARYING_SLOT_MAX + 1)
966 #define VARYING_SLOT_TCS_HEADER_IR3			(VARYING_SLOT_MAX + 2)
967 
968 
969 static inline uint32_t
ir3_find_sysval_regid(const struct ir3_shader_variant * so,unsigned slot)970 ir3_find_sysval_regid(const struct ir3_shader_variant *so, unsigned slot)
971 {
972 	int j;
973 	for (j = 0; j < so->inputs_count; j++)
974 		if (so->inputs[j].sysval && (so->inputs[j].slot == slot))
975 			return so->inputs[j].regid;
976 	return regid(63, 0);
977 }
978 
979 /* calculate register footprint in terms of half-regs (ie. one full
980  * reg counts as two half-regs).
981  */
982 static inline uint32_t
ir3_shader_halfregs(const struct ir3_shader_variant * v)983 ir3_shader_halfregs(const struct ir3_shader_variant *v)
984 {
985 	return (2 * (v->info.max_reg + 1)) + (v->info.max_half_reg + 1);
986 }
987 
988 static inline uint32_t
ir3_shader_nibo(const struct ir3_shader_variant * v)989 ir3_shader_nibo(const struct ir3_shader_variant *v)
990 {
991 	/* The dummy variant used in binning mode won't have an actual shader. */
992 	if (!v->shader)
993 		return 0;
994 
995 	return v->shader->nir->info.num_ssbos + v->shader->nir->info.num_images;
996 }
997 
998 #endif /* IR3_SHADER_H_ */
999