1 /*
2 * Copyright © 2023 Advanced Micro Devices, Inc.
3 *
4 * SPDX-License-Identifier: MIT
5 */
6
7 /* Introduction
8 * ============
9 *
10 * This pass optimizes varyings between 2 shaders, which means dead input/
11 * output removal, constant and uniform load propagation, deduplication,
12 * compaction, and inter-shader code motion. This is used during the shader
13 * linking process.
14 *
15 *
16 * Notes on behavior
17 * =================
18 *
19 * The pass operates on scalar varyings using 32-bit and 16-bit types. Vector
20 * varyings are not allowed.
21 *
22 * Indirectly-indexed varying slots (not vertices) are not optimized or
23 * compacted, but unused slots of indirectly-indexed varyings are still filled
24 * with directly-indexed varyings during compaction. Indirectly-indexed
25 * varyings are still removed if they are unused by the other shader.
26 *
27 * Indirectly-indexed vertices don't disallow optimizations, but compromises
28 * are made depending on how they are accessed. They are common in TCS, TES,
29 * and GS, so there is a desire to optimize them as much as possible. More on
30 * that in various sections below.
31 *
32 * Transform feedback doesn't prevent most optimizations such as constant
33 * propagation and compaction. Shaders can be left with output stores that set
34 * the no_varying flag, meaning the output is not consumed by the next shader,
35 * which means that optimizations did their job and now the output is only
36 * consumed by transform feedback.
37 *
38 * All legacy varying slots are optimized when it's allowed.
39 *
40 *
41 * Convergence property of shader outputs
42 * ======================================
43 *
44 * When an output stores an SSA that is convergent and all stores of that
45 * output appear in unconditional blocks or conditional blocks with
46 * a convergent entry condition and the shader is not GS, it implies that all
47 * vertices of that output have the same value, therefore the output can be
48 * promoted to flat because all interpolation modes lead to the same result
49 * as flat. Such outputs are opportunistically compacted with both flat and
50 * non-flat varyings based on whichever has unused slots in their vec4s. This
51 * pass refers to such inputs, outputs, and varyings as "convergent" (meaning
52 * all vertices are always equal).
53 *
54 * By default, flat varyings are the only ones that are not considered convergent
55 * because we want the flexibility to pack convergent varyings with both flat
56 * and non-flat varyings, and since flat varyings can contain integers and
57 * doubles, we can never interpolate them as FP32 or FP16. Optimizations start
58 * with separate interpolated, flat, and convergent groups of varyings, and
59 * they choose whether they want to promote convergent to interpolated or
60 * flat, or whether to leave that decision to the end when the compaction
61 * happens.
62 *
63 * The above default behavior doesn't apply when the hw supports convergent
64 * flat loads with interpolated vec4 slots. (there is a NIR option)
65 *
66 * TES patch inputs are always convergent because they are uniform within
67 * a primitive.
68 *
69 *
70 * Optimization steps
71 * ==================
72 *
73 * 1. Determine which varying slots can be optimized and how.
74 *
75 * * When a varying is said to be "optimized" in the following text, it
76 * means all optimizations are performed, such as removal, constant
77 * propagation, and deduplication.
78 * * All VARn, PATCHn, and FOGC varyings are always optimized and
79 * compacted.
80 * * PRIMITIVE_ID is treated as VARn in (GS, FS).
81 * * TEXn are removed if they are dead (except TEXn inputs, which can't be
82 * removed due to being affected by the coord replace state). TEXn can’t
83 * also be optimized or compacted due to being affected by the coord
84 * replace state. TEXn not consumed by FS are treated as VARn.
85 * * COLn and BFCn only propagate constants if they are between 0 and 1
86 * because of the clamp vertex color state, and they are only
87 * deduplicated and compacted among themselves because they are affected
88 * by the flat shade, provoking vertex, two-side color selection, and
89 * clamp vertex color states. COLn and BFCn not consumed by FS are
90 * treated as VARn.
91 * * All system value outputs like POS, PSIZ, CLIP_DISTn, etc. can’t be
92 * removed, but they are demoted to sysval-only outputs by setting
93 * the "no_varying" flag (i.e. they can be removed as varyings), so
94 * drivers should look at the "no_varying" flag. If an output is not
95 * a sysval output in a specific stage, it's treated as VARn. (such as
96 * POS in TCS)
97 * * TESS_LEVEL_* inputs in TES can’t be touched if TCS is missing.
98 *
99 * 2. Remove unused inputs and outputs
100 *
101 * * Outputs not used in the next shader are removed.
102 * * Inputs not initialized by the previous shader are replaced with undef
103 * except:
104 * * LAYER and VIEWPORT are replaced with 0 in FS.
105 * * TEXn.xy is untouched because the coord replace state can set it, and
106 * TEXn.zw is replaced by (0, 1), which is equal to the coord replace
107 * value.
108 * * Output loads that have no output stores anywhere in the shader are
109 * replaced with undef. (for TCS, though it works with any shader)
110 * * Output stores with transform feedback are preserved, but get
111 * the “no_varying” flag, meaning they are not consumed by the next
112 * shader stage. Later, transform-feedback-only varyings are compacted
113 * (relocated) such that they are always last.
114 * * TCS outputs that are read by TCS, but not used by TES get
115 * the "no_varying" flag to indicate that they are only read by TCS and
116 * not consumed by TES. Later, such TCS outputs are compacted (relocated)
117 * such that they are always last to keep all outputs consumed by TES
118 * consecutive without holes.
119 *
120 * 3. Constant, uniform, UBO load, and uniform expression propagation
121 *
122 * * Define “uniform expressions” as ALU expressions only sourcing
123 * constants, uniforms, and UBO loads.
124 * * Constants, uniforms, UBO loads, and uniform expressions stored
125 * in outputs are moved into the next shader, and the outputs are removed.
126 * * The same propagation is done from output stores to output loads.
127 * (for TCS, though it works with any shader)
128 * * If there are multiple stores to the same output, all such stores
129 * should store the same constant, uniform, UBO load, or uniform
130 * expression for the expression to be propagated. If an output has
131 * multiple vertices, all vertices should store the same expression.
132 * * nir->options has callbacks that are used to estimate the cost of
133 * uniform expressions that drivers can set to control the complexity of
134 * uniform expressions that are propagated. This is to ensure that
135 * we don't increase the GPU overhead measurably by moving code across
136 * pipeline stages that amplify GPU work.
137 * * Special cases:
138 * * Constant COLn and BFCn are propagated only if the constants are
139 * in the [0, 1] range because of the clamp vertex color state.
140 * If both COLn and BFCn are written, they must write the same
141 * constant. If BFCn is written but not COLn, the constant is
142 * propagated from BFCn to COLn.
143 * * TEX.xy is untouched because of the coord replace state.
144 * If TEX.zw is (0, 1), only those constants are propagated because
145 * they match the coord replace values.
146 * * CLIP_DISTn, LAYER and VIEWPORT are always propagated.
147 * Eliminated output stores get the "no_varying" flag if they are also
148 * xfb stores or write sysval outputs.
149 *
150 * 4. Remove duplicated output components
151 *
152 * * By comparing SSA defs.
153 * * If there are multiple stores to the same output, all such stores
154 * should store the same SSA as all stores of another output for
155 * the output to be considered duplicated. If an output has multiple
156 * vertices, all vertices should store the same SSA.
157 * * Deduplication can only be done between outputs of the same category.
158 * Those are: interpolated, patch, flat, interpolated color, flat color,
159 * and conditionally interpolated color based on the flat
160 * shade state
161 * * Everything is deduplicated except TEXn due to the coord replace state.
162 * * Eliminated output stores get the "no_varying" flag if they are also
163 * xfb stores or write sysval outputs.
164 *
165 * 5. Backward inter-shader code motion
166 *
167 * "Backward" refers to moving code in the opposite direction that shaders
168 * are executed, i.e. moving code from the consumer to the producer.
169 *
170 * Fragment shader example:
171 * ```
172 * result = input0 * uniform + input1 * constant + UBO.variable;
173 * ```
174 *
175 * The computation of "result" in the above example can be moved into
176 * the previous shader and both inputs can be replaced with a new input
177 * holding the value of "result", thus making the shader smaller and
178 * possibly reducing the number of inputs, uniforms, and UBOs by 1.
179 *
180 * Such code motion can be performed for any expression sourcing only
181 * inputs, constants, and uniforms except for fragment shaders, which can
182 * also do it but with the following limitations:
183 * * Only these transformations can be perfomed with interpolated inputs
184 * and any composition of these transformations (such as lerp), which can
185 * all be proven mathematically:
186 * * interp(x, i, j) + interp(y, i, j) = interp(x + y, i, j)
187 * * interp(x, i, j) + convergent_expr = interp(x + convergent_expr, i, j)
188 * * interp(x, i, j) * convergent_expr = interp(x * convergent_expr, i, j)
189 * * all of these transformations are considered "inexact" in NIR
190 * * interp interpolates an input according to the barycentric
191 * coordinates (i, j), which are different for perspective,
192 * noperspective, center, centroid, sample, at_offset, and at_sample
193 * modes.
194 * * convergent_expr is any expression sourcing only constants,
195 * uniforms, and convergent inputs. The only requirement on
196 * convergent_expr is that it doesn't vary between vertices of
197 * the same primitive, but it can vary between primitives.
198 * * If inputs are flat or convergent, there are no limitations on
199 * expressions that can be moved.
200 * * Interpolated and flat inputs can't mix in the same expression, but
201 * convergent inputs can mix with both.
202 * * The interpolation qualifier of the new input is inherited from
203 * the removed non-convergent inputs that should all have the same (i, j).
204 * If there are no non-convergent inputs, then the new input is declared
205 * as flat (for simplicity; we can't choose the barycentric coordinates
206 * at random because AMD doesn't like when there are multiple sets of
207 * barycentric coordinates in the same shader unnecessarily).
208 * * Inf values break code motion across interpolation. See the section
209 * discussing how we handle it near the end.
210 *
211 * The above rules also apply to open-coded TES input interpolation, which
212 * is handled the same as FS input interpolation. The only differences are:
213 * * Open-coded TES input interpolation must match one of the allowed
214 * equations. Different interpolation equations are treated the same as
215 * different interpolation qualifiers in FS.
216 * * Patch varyings are always treated as convergent.
217 *
218 * Prerequisites:
219 * * We need a post-dominator tree that is constructed from a graph where
220 * vertices are instructions and directed edges going into them are
221 * the values of their source operands. This is different from how NIR
222 * dominance works, which represents all instructions within a basic
223 * block as a linear chain of vertices in the graph.
224 * In our graph, all loads without source operands and all constants are
225 * entry nodes in the graph, and all stores and discards are exit nodes
226 * in the graph. Each shader can have multiple disjoint graphs where
227 * the Lowest Common Ancestor of 2 instructions doesn't exist.
228 * * Given the above definition, the instruction whose result is the best
229 * candidate for a new input is the farthest instruction that
230 * post-dominates one of more inputs and is movable between shaders.
231 *
232 * Algorithm Idea Part 1: Search
233 * * Pick any input load that is hypothetically movable and call it
234 * the iterator.
235 * * Get the immediate post-dominator of the iterator, and if it's movable,
236 * replace the iterator with it.
237 * * Repeat the previous step until the obtained immediate post-dominator
238 * is not movable.
239 * * The iterator now contains the farthest post-dominator that is movable.
240 * * Gather all input loads that the post-dominator consumes.
241 * * For each of those input loads, all matching output stores must be
242 * in the same block (because they will be replaced by a single store).
243 *
244 * Algorithm Idea Part 2: Code Motion
245 * * Clone the post-dominator in the producer except input loads, which
246 * should be replaced by stored output values. Uniform and UBO loads,
247 * if any, should be cloned too.
248 * * Remove the original output stores.
249 * * Replace the post-dominator from the consumer with a new input load.
250 * * The step above makes the post-dominated input load that we picked
251 * at the beginning dead, but other input loads used by the post-
252 * dominator might still have other uses (shown in the example below).
253 *
254 * Example SSA-use graph - initial shader and the result:
255 * ```
256 * input0 input1 input0 input1
257 * \ / \ | \
258 * constant alu ... ======> | ...
259 * \ /
260 * alu
261 * (post-dominator)
262 * ```
263 *
264 * Description:
265 * On the right, the algorithm moved the constant and both ALU opcodes
266 * into the previous shader and input0 now contains the value of
267 * the post-dominator. input1 stays the same because it still has one
268 * use left. If input1 hadn't had the other use, it would have been
269 * removed.
270 *
271 * If the algorithm moves any code, the algorithm is repeated until there
272 * is no code that it can move.
273 *
274 * Which shader pairs are supported:
275 * * (VS, FS), (TES, FS): yes, fully
276 * * Limitation: If Infs must be preserved, no code is moved across
277 * interpolation, so only flat varyings are optimized.
278 * * (VS, TCS), (VS, GS), (TES, GS): no, but possible -- TODO
279 * * Current behavior:
280 * * Per-vertex inputs are rejected.
281 * * Possible solution:
282 * * All input loads used by an accepted post-dominator must use
283 * the same vertex index. The post-dominator must use all loads with
284 * that vertex index.
285 * * If a post-dominator is found for an input load from a specific
286 * slot, all other input loads from that slot must also have
287 * an accepted post-dominator, and all such post-dominators should
288 * be identical expressions.
289 * * (TCS, TES), (VS, TES): yes, with limitations
290 * * Limitations:
291 * * Only 1 store and 1 load per slot allowed.
292 * * No output loads allowed.
293 * * All stores used by an accepted post-dominator must be in
294 * the same block.
295 * * TCS barriers don't matter because there are no output loads.
296 * * Patch varyings are handled trivially with the above constraints.
297 * * Per-vertex outputs should only be indexed by gl_InvocationID.
298 * * An interpolated TES load is any ALU instruction that computes
299 * the result of linear interpolation of per-vertex inputs from
300 * the same slot using gl_TessCoord. If such an ALU instruction is
301 * found, it must be the only one, and all per-vertex input loads
302 * from that slot must feed into it. The interpolation equation must
303 * be equal to one of the allowed equations. Then the same rules as
304 * for interpolated FS inputs are used, treating different
305 * interpolation equations just like different interpolation
306 * qualifiers.
307 * * Patch inputs are treated as convergent, which means they are
308 * allowed to be in the same movable expression as interpolated TES
309 * inputs, and the same rules as for convergent FS inputs apply.
310 * * (GS, FS), (MS, FS): no
311 * * Workaround: Add a passthrough VS between GS/MS and FS, run
312 * the pass on the (VS, FS) pair to move code out of FS,
313 * and inline that VS at the end of your hw-specific
314 * GS/MS if it's possible.
315 * * (TS, MS): no
316 *
317 * The disadvantage of using the post-dominator tree is that it's a tree,
318 * which means there is only 1 post-dominator of each input. This example
319 * shows a case that could be optimized by replacing 3 inputs with 2 inputs,
320 * reducing the number of inputs by 1, but the immediate post-dominator of
321 * all input loads is NULL:
322 * ```
323 * temp0 = input0 + input1 + input2;
324 * temp1 = input0 + input1 * const1 + input2 * const2;
325 * ```
326 *
327 * If there is a graph algorithm that returns the best solution to
328 * the above case (which is temp0 and temp1 to replace all 3 inputs), let
329 * us know.
330 *
331 * 6. Forward inter-shader code motion
332 *
333 * TODO: Not implemented. The text below is a draft of the description.
334 *
335 * "Forward" refers to moving code in the direction that shaders are
336 * executed, i.e. moving code from the producer to the consumer.
337 *
338 * Vertex shader example:
339 * ```
340 * output0 = value + 1;
341 * output1 = value * 2;
342 * ```
343 *
344 * Both outputs can be replaced by 1 output storing "value", and both ALU
345 * operations can be moved into the next shader.
346 *
347 * The same dominance algorithm as in the previous optimization is used,
348 * except that:
349 * * Instead of inputs, we use outputs.
350 * * Instead of a post-dominator tree, we use a dominator tree of the exact
351 * same graph.
352 *
353 * The algorithm idea is: For each pair of 2 output stores, find their
354 * Lowest Common Ancestor in the dominator tree, and that's a candidate
355 * for a new output. All movable loads like load_const should be removed
356 * from the graph, otherwise the LCA wouldn't exist.
357 *
358 * The limitations on instructions that can be moved between shaders across
359 * interpolated loads are exactly the same as the previous optimization.
360 *
361 * nir->options has callbacks that are used to estimate the cost of
362 * expressions that drivers can set to control the complexity of
363 * expressions that can be moved to later shaders. This is to ensure that
364 * we don't increase the GPU overhead measurably by moving code across
365 * pipeline stages that amplify GPU work.
366 *
367 * 7. Compaction to vec4 slots (AKA packing)
368 *
369 * First, varyings are divided into these groups, and components from each
370 * group are assigned locations in this order (effectively forcing
371 * components from the same group to be in the same vec4 slot or adjacent
372 * vec4 slots) with some exceptions listed below:
373 *
374 * Non-FS groups (patch and non-patch are packed separately):
375 * * 32-bit cross-invocation (TCS inputs using cross-invocation access)
376 * * 16-bit cross-invocation (TCS inputs using cross-invocation access)
377 * * 32-bit flat
378 * * 16-bit flat
379 * * 32-bit no-varying (TCS outputs read by TCS but not TES)
380 * * 16-bit no-varying (TCS outputs read by TCS but not TES)
381 *
382 * FS groups:
383 * * 32-bit interpolated (always FP32)
384 * * 32-bit flat
385 * * 32-bit convergent (always FP32)
386 * * 16-bit interpolated (always FP16)
387 * * 16-bit flat
388 * * 16-bit convergent (always FP16)
389 * * 32-bit transform feedback only
390 * * 16-bit transform feedback only
391 *
392 * When the driver/hw can't mix different interpolation qualifiers
393 * in the same vec4, the interpolated groups are further split into 6
394 * groups, one for each qualifier.
395 *
396 * Then, all scalar varyings are relocated into new slots, starting from
397 * VAR0.x and increasing the scalar slot offset in 32-bit or 16-bit
398 * increments. Rules:
399 * * Both 32-bit and 16-bit flat varyings are packed in the same vec4.
400 * * Convergent varyings can be packed with interpolated varyings of
401 * the same type or flat. The group to pack with is chosen based on
402 * whichever has unused scalar slots because we want to reduce the total
403 * number of vec4s. After filling all unused scalar slots, the remaining
404 * convergent varyings are packed as flat.
405 * * Transform-feedback-only slots and no-varying slots are packed last,
406 * so that they are consecutive and not intermixed with varyings consumed
407 * by the next shader stage, and 32-bit and 16-bit slots are packed in
408 * the same vec4. This allows reducing memory for outputs by ignoring
409 * the trailing outputs that the next shader stage doesn't read.
410 *
411 * In the end, we should end up with these groups for FS:
412 * * 32-bit interpolated (always FP32) on separate vec4s
413 * * 16-bit interpolated (always FP16) on separate vec4s
414 * * 32-bit flat and 16-bit flat, mixed in the same vec4
415 * * 32-bit and 16-bit transform feedback only, sharing vec4s with flat
416 *
417 * Colors are compacted the same but separately because they can't be mixed
418 * with VARn. Colors are divided into 3 FS groups. They are:
419 * * 32-bit maybe-interpolated (affected by the flat-shade state)
420 * * 32-bit interpolated (not affected by the flat-shade state)
421 * * 32-bit flat (not affected by the flat-shade state)
422 *
423 * To facilitate driver-specific output merging, color channels are
424 * assigned in a rotated order depending on which one the first unused VARn
425 * channel is. For example, if the first unused VARn channel is VAR0.z,
426 * color channels are allocated in this order:
427 * COL0.z, COL0.w, COL0.x, COL0.y, COL1.z, COL1.w, COL1.x, COL1.y
428 * The reason is that some drivers merge outputs if each output sets
429 * different components, for example 2 outputs defining VAR0.xy and COL0.z.
430 * If drivers do interpolation in the fragment shader and color
431 * interpolation can differ for each component, VAR0.xy and COL.z can be
432 * stored in the same output storage slot, and the consumer can load VAR0
433 * and COL0 from the same slot.
434 *
435 * If COLn, BFCn, and TEXn are transform-feedback-only, they are moved to
436 * VARn. PRIMITIVE_ID in (GS, FS) and FOGC in (xx, FS) are always moved to
437 * VARn for better packing.
438 *
439 *
440 * Issue: Interpolation converts Infs to NaNs
441 * ==========================================
442 *
443 * Interpolation converts Infs to NaNs, i.e. interp(Inf, i, j) = NaN, which
444 * impacts and limits backward inter-shader code motion, uniform expression
445 * propagation, and compaction.
446 *
447 * When we decide not to interpolate a varying, we need to convert Infs to
448 * NaNs manually. Infs can be converted to NaNs like this: x*0 + x
449 * (suggested by Ian Romanick, the multiplication must be "exact")
450 *
451 * Changes to optimizations:
452 * - When we propagate a uniform expression and NaNs must be preserved,
453 * convert Infs in the result to NaNs using "x*0 + x" in the consumer.
454 * - When we change interpolation to flat for convergent varyings and NaNs
455 * must be preserved, apply "x*0 + x" to the stored output value
456 * in the producer.
457 * - There is no solution for backward inter-shader code motion with
458 * interpolation if Infs must be preserved. As an alternative, we can allow
459 * code motion across interpolation only for specific shader hashes in
460 * can_move_alu_across_interp. We can use shader-db to automatically produce
461 * a list of shader hashes that benefit from this optimization.
462 *
463 *
464 * Usage
465 * =====
466 *
467 * Requirements:
468 * - ALUs should be scalarized
469 * - Dot products and other vector opcodes should be lowered (recommended)
470 * - Input loads and output stores should be scalarized
471 * - 64-bit varyings should be lowered to 32 bits
472 * - nir_vertex_divergence_analysis must be called on the producer if
473 * the constumer is a fragment shader
474 *
475 * It's recommended to run this for all shader pairs from the first shader
476 * to the last shader first (to propagate constants etc.). If the optimization
477 * of (S1, S2) stages leads to changes in S1, remember the highest S1. Then
478 * re-run this for all shader pairs in the descending order from S1 to VS.
479 *
480 * NIR optimizations should be performed after every run that changes the IR.
481 *
482 *
483 * Analyzing the optimization potential of linking separate shaders
484 * ================================================================
485 *
486 * We can use this pass in an analysis pass that decides whether a separate
487 * shader has the potential to benefit from full draw-time linking. The way
488 * it would work is that we would create a passthrough shader adjacent to
489 * the separate shader, run this pass on both shaders, and check if the number
490 * of varyings decreased. This way we can decide to perform the draw-time
491 * linking only if we are confident that it would help performance.
492 *
493 * TODO: not implemented, mention the pass that implements it
494 */
495
496 #include "nir.h"
497 #include "nir_builder.h"
498 #include "util/hash_table.h"
499 #include "util/u_math.h"
500 #include "util/u_memory.h"
501
502 /* nir_opt_varyings works at scalar 16-bit granularity across all varyings.
503 *
504 * Slots (i % 8 == 0,2,4,6) are 32-bit channels or low bits of 16-bit channels.
505 * Slots (i % 8 == 1,3,5,7) are high bits of 16-bit channels. 32-bit channels
506 * don't set these slots as used in bitmasks.
507 */
508 #define NUM_SCALAR_SLOTS (NUM_TOTAL_VARYING_SLOTS * 8)
509
510 /* Fragment shader input slots can be packed with indirectly-indexed vec4
511 * slots if there are unused components, but only if the vec4 slot has
512 * the same interpolation type. There are only 3 types: FLAT, FP32, FP16.
513 */
514 enum fs_vec4_type {
515 FS_VEC4_TYPE_NONE = 0,
516 FS_VEC4_TYPE_FLAT,
517 FS_VEC4_TYPE_INTERP_EXPLICIT,
518 FS_VEC4_TYPE_INTERP_EXPLICIT_STRICT,
519 FS_VEC4_TYPE_PER_PRIMITIVE,
520 /* When nir_io_has_flexible_input_interpolation_except_flat is set: */
521 FS_VEC4_TYPE_INTERP_FP32,
522 FS_VEC4_TYPE_INTERP_FP16,
523 FS_VEC4_TYPE_INTERP_COLOR, /* only for glShadeModel, i.e. INTERP_MODE_NONE */
524 /* When nir_io_has_flexible_input_interpolation_except_flat is not set: */
525 FS_VEC4_TYPE_INTERP_FP32_PERSP_PIXEL,
526 FS_VEC4_TYPE_INTERP_FP32_PERSP_CENTROID,
527 FS_VEC4_TYPE_INTERP_FP32_PERSP_SAMPLE,
528 FS_VEC4_TYPE_INTERP_FP32_LINEAR_PIXEL,
529 FS_VEC4_TYPE_INTERP_FP32_LINEAR_CENTROID,
530 FS_VEC4_TYPE_INTERP_FP32_LINEAR_SAMPLE,
531 FS_VEC4_TYPE_INTERP_FP16_PERSP_PIXEL,
532 FS_VEC4_TYPE_INTERP_FP16_PERSP_CENTROID,
533 FS_VEC4_TYPE_INTERP_FP16_PERSP_SAMPLE,
534 FS_VEC4_TYPE_INTERP_FP16_LINEAR_PIXEL,
535 FS_VEC4_TYPE_INTERP_FP16_LINEAR_CENTROID,
536 FS_VEC4_TYPE_INTERP_FP16_LINEAR_SAMPLE,
537 FS_VEC4_TYPE_INTERP_COLOR_PIXEL, /* only for glShadeModel, i.e. INTERP_MODE_NONE */
538 FS_VEC4_TYPE_INTERP_COLOR_CENTROID, /* same */
539 FS_VEC4_TYPE_INTERP_COLOR_SAMPLE, /* same */
540 };
541
542 enum {
543 PERSP_PIXEL,
544 PERSP_CENTROID,
545 PERSP_SAMPLE,
546 LINEAR_PIXEL,
547 LINEAR_CENTROID,
548 LINEAR_SAMPLE,
549 NUM_INTERP_QUALIFIERS,
550 };
551
552 enum {
553 COLOR_PIXEL,
554 COLOR_CENTROID,
555 COLOR_SAMPLE,
556 NUM_COLOR_QUALIFIERS,
557 };
558
559 #if PRINT_RELOCATE_SLOT
560 static const char *fs_vec4_type_strings[] = {
561 "NONE",
562 "FLAT",
563 "INTERP_EXPLICIT",
564 "INTERP_EXPLICIT_STRICT",
565 "PER_PRIMITIVE",
566 "INTERP_FP32",
567 "INTERP_FP16",
568 "INTERP_COLOR",
569 "INTERP_FP32_PERSP_PIXEL",
570 "INTERP_FP32_PERSP_CENTROID",
571 "INTERP_FP32_PERSP_SAMPLE",
572 "INTERP_FP32_LINEAR_PIXEL",
573 "INTERP_FP32_LINEAR_CENTROID",
574 "INTERP_FP32_LINEAR_SAMPLE",
575 "INTERP_FP16_PERSP_PIXEL",
576 "INTERP_FP16_PERSP_CENTROID",
577 "INTERP_FP16_PERSP_SAMPLE",
578 "INTERP_FP16_LINEAR_PIXEL",
579 "INTERP_FP16_LINEAR_CENTROID",
580 "INTERP_FP16_LINEAR_SAMPLE",
581 "INTERP_COLOR_PIXEL",
582 "INTERP_COLOR_CENTROID",
583 "INTERP_COLOR_SAMPLE",
584 };
585 #endif // PRINT_RELOCATE_SLOT
586
587 typedef BITSET_WORD INTERP_QUAL_BITSET[NUM_INTERP_QUALIFIERS][BITSET_WORDS(NUM_SCALAR_SLOTS)];
588 typedef BITSET_WORD COLOR_QUAL_BITSET[NUM_COLOR_QUALIFIERS][BITSET_WORDS(NUM_SCALAR_SLOTS)];
589
590 static unsigned
get_scalar_16bit_slot(nir_io_semantics sem,unsigned component)591 get_scalar_16bit_slot(nir_io_semantics sem, unsigned component)
592 {
593 return sem.location * 8 + component * 2 + sem.high_16bits;
594 }
595
596 static unsigned
intr_get_scalar_16bit_slot(nir_intrinsic_instr * intr)597 intr_get_scalar_16bit_slot(nir_intrinsic_instr *intr)
598 {
599 return get_scalar_16bit_slot(nir_intrinsic_io_semantics(intr),
600 nir_intrinsic_component(intr));
601 }
602
603 static unsigned
vec4_slot(unsigned scalar_slot)604 vec4_slot(unsigned scalar_slot)
605 {
606 return scalar_slot / 8;
607 }
608
609 struct list_node {
610 struct list_head head;
611 nir_intrinsic_instr *instr;
612 };
613
614 /* Information about 1 scalar varying slot for both shader stages. */
615 struct scalar_slot {
616 struct {
617 /* Linked list of all store instructions writing into the scalar slot
618 * in the producer.
619 */
620 struct list_head stores;
621
622 /* Only for TCS: Linked list of all load instructions read the scalar
623 * slot in the producer.
624 */
625 struct list_head loads;
626
627 /* If there is only one store instruction or if all store instructions
628 * store the same value in the producer, this is the instruction
629 * computing the stored value. Used by constant and uniform propagation
630 * to the next shader.
631 */
632 nir_instr *value;
633 } producer;
634
635 struct {
636 /* Linked list of all load instructions loading from the scalar slot
637 * in the consumer.
638 */
639 struct list_head loads;
640
641 /* The result of TES input interpolation. */
642 nir_alu_instr *tes_interp_load;
643 unsigned tes_interp_mode; /* FLAG_INTERP_TES_* */
644 nir_def *tes_load_tess_coord;
645 } consumer;
646
647 /* The number of accessed slots if this slot has indirect indexing. */
648 unsigned num_slots;
649 };
650
651 struct linkage_info {
652 struct scalar_slot slot[NUM_SCALAR_SLOTS];
653
654 bool spirv;
655 bool can_move_uniforms;
656 bool can_move_ubos;
657 bool can_mix_convergent_flat_with_interpolated;
658 bool has_flexible_interp;
659 bool always_interpolate_convergent_fs_inputs;
660
661 gl_shader_stage producer_stage;
662 gl_shader_stage consumer_stage;
663 nir_builder producer_builder;
664 nir_builder consumer_builder;
665 unsigned max_varying_expression_cost;
666 unsigned (*varying_estimate_instr_cost)(struct nir_instr *instr);
667
668 /* Memory context for linear_alloc_child (fast allocation). */
669 void *linear_mem_ctx;
670
671 /* Hash table for efficient cloning instructions between shaders. */
672 struct hash_table *clones_ht;
673
674 /* If any component of a vec4 slot is accessed indirectly, this is its
675 * FS vec4 qualifier type, which is either FLAT, FP32, or FP16.
676 * Components with different qualifier types can't be compacted
677 * in the same vec4.
678 */
679 uint8_t fs_vec4_type[NUM_TOTAL_VARYING_SLOTS];
680
681 /* Mask of all varyings that can be removed. Only a few non-VARn non-PATCHn
682 * varyings can't be removed.
683 */
684 BITSET_DECLARE(removable_mask, NUM_SCALAR_SLOTS);
685
686 /* Mask of all slots that have transform feedback info. */
687 BITSET_DECLARE(xfb_mask, NUM_SCALAR_SLOTS);
688
689 /* Mask of all slots that have transform feedback info, but are not used
690 * by the next shader. Separate masks for 32-bit and 16-bit outputs.
691 */
692 BITSET_DECLARE(xfb32_only_mask, NUM_SCALAR_SLOTS);
693 BITSET_DECLARE(xfb16_only_mask, NUM_SCALAR_SLOTS);
694
695 /* Mask of all TCS inputs using cross-invocation access. */
696 BITSET_DECLARE(tcs_cross_invoc32_mask, NUM_SCALAR_SLOTS);
697 BITSET_DECLARE(tcs_cross_invoc16_mask, NUM_SCALAR_SLOTS);
698
699 /* Mask of all TCS->TES slots that are read by TCS, but not TES. */
700 BITSET_DECLARE(no_varying32_mask, NUM_SCALAR_SLOTS);
701 BITSET_DECLARE(no_varying16_mask, NUM_SCALAR_SLOTS);
702
703 /* Mask of all slots accessed with indirect indexing. */
704 BITSET_DECLARE(indirect_mask, NUM_SCALAR_SLOTS);
705
706 /* The following masks only contain slots that can be compacted and
707 * describe the groups in which they should be compacted. Non-fragment
708 * shaders only use the flat bitmasks.
709 *
710 * Some legacy varyings are excluded when they can't be compacted due to
711 * being affected by pipeline states (like coord replace). That only
712 * applies to xx->FS shader pairs. Other shader pairs get all legacy
713 * varyings compacted and relocated to VARn.
714 *
715 * Indirectly-indexed varyings are also excluded because they are not
716 * compacted.
717 */
718 BITSET_DECLARE(interp_fp32_mask, NUM_SCALAR_SLOTS);
719 BITSET_DECLARE(interp_fp16_mask, NUM_SCALAR_SLOTS);
720 BITSET_DECLARE(flat32_mask, NUM_SCALAR_SLOTS);
721 BITSET_DECLARE(flat16_mask, NUM_SCALAR_SLOTS);
722 BITSET_DECLARE(interp_explicit32_mask, NUM_SCALAR_SLOTS);
723 BITSET_DECLARE(interp_explicit16_mask, NUM_SCALAR_SLOTS);
724 BITSET_DECLARE(interp_explicit_strict32_mask, NUM_SCALAR_SLOTS);
725 BITSET_DECLARE(interp_explicit_strict16_mask, NUM_SCALAR_SLOTS);
726 BITSET_DECLARE(per_primitive32_mask, NUM_SCALAR_SLOTS);
727 BITSET_DECLARE(per_primitive16_mask, NUM_SCALAR_SLOTS);
728
729 /* Color interpolation unqualified (follows the flat-shade state). */
730 BITSET_DECLARE(color32_mask, NUM_SCALAR_SLOTS);
731
732 /* A separate bitmask for each qualifier when
733 * nir_io_has_flexible_input_interpolation_except_flat is not set.
734 */
735 INTERP_QUAL_BITSET interp_fp32_qual_masks;
736 INTERP_QUAL_BITSET interp_fp16_qual_masks;
737 COLOR_QUAL_BITSET color32_qual_masks;
738
739 /* Mask of output components that have only one store instruction, or if
740 * they have multiple store instructions, all those instructions store
741 * the same value. If the output has multiple vertices, all vertices store
742 * the same value. This is a useful property for:
743 * - constant and uniform propagation to the next shader
744 * - deduplicating outputs
745 */
746 BITSET_DECLARE(output_equal_mask, NUM_SCALAR_SLOTS);
747
748 /* Mask of output components that store values that are convergent,
749 * i.e. all values stored into the outputs are equal within a primitive.
750 *
751 * This is different from output_equal_mask, which says that all stores
752 * to the same slot in the same thread are equal, while this says that
753 * each store to the same slot can be different, but it always stores
754 * a convergent value, which means the stored value is equal among all
755 * threads within a primitive.
756 *
757 * The advantage is that these varyings can always be promoted to flat
758 * regardless of the original interpolation mode, and they can always be
759 * compacted with both interpolated and flat varyings.
760 */
761 BITSET_DECLARE(convergent32_mask, NUM_SCALAR_SLOTS);
762 BITSET_DECLARE(convergent16_mask, NUM_SCALAR_SLOTS);
763 };
764
765 /******************************************************************
766 * HELPERS
767 ******************************************************************/
768
769 /* Return whether the low or high 16-bit slot is 1. */
770 #define BITSET_TEST32(m, b) \
771 (BITSET_TEST(m, (b) & ~0x1) || BITSET_TEST(m, ((b) & ~0x1) + 1))
772
773 #define BITSET3_TEST_ANY(bitsets, b) (BITSET_TEST((bitsets)[0], (b)) || \
774 BITSET_TEST((bitsets)[1], (b)) || \
775 BITSET_TEST((bitsets)[2], (b)))
776 #define BITSET6_TEST_ANY(bitsets, b) (BITSET3_TEST_ANY((bitsets), (b)) || \
777 BITSET3_TEST_ANY(&(bitsets)[3], (b)))
778
779 static void
print_linkage(struct linkage_info * linkage)780 print_linkage(struct linkage_info *linkage)
781 {
782 printf("Linkage: %s -> %s\n",
783 _mesa_shader_stage_to_abbrev(linkage->producer_stage),
784 _mesa_shader_stage_to_abbrev(linkage->consumer_stage));
785
786 for (unsigned i = 0; i < NUM_SCALAR_SLOTS; i++) {
787 struct scalar_slot *slot = &linkage->slot[i];
788
789 if (!slot->num_slots &&
790 list_is_empty(&slot->producer.stores) &&
791 list_is_empty(&slot->producer.loads) &&
792 list_is_empty(&slot->consumer.loads) &&
793 !BITSET_TEST(linkage->removable_mask, i) &&
794 !BITSET_TEST(linkage->indirect_mask, i) &&
795 !BITSET_TEST(linkage->xfb32_only_mask, i) &&
796 !BITSET_TEST(linkage->xfb16_only_mask, i) &&
797 !BITSET_TEST(linkage->tcs_cross_invoc32_mask, i) &&
798 !BITSET_TEST(linkage->tcs_cross_invoc16_mask, i) &&
799 !BITSET_TEST(linkage->no_varying32_mask, i) &&
800 !BITSET_TEST(linkage->no_varying16_mask, i) &&
801 !BITSET_TEST(linkage->interp_fp32_mask, i) &&
802 !BITSET_TEST(linkage->interp_fp16_mask, i) &&
803 !BITSET6_TEST_ANY(linkage->interp_fp32_qual_masks, i) &&
804 !BITSET6_TEST_ANY(linkage->interp_fp16_qual_masks, i) &&
805 !BITSET_TEST(linkage->color32_mask, i) &&
806 !BITSET3_TEST_ANY(linkage->color32_qual_masks, i) &&
807 !BITSET_TEST(linkage->flat32_mask, i) &&
808 !BITSET_TEST(linkage->flat16_mask, i) &&
809 !BITSET_TEST(linkage->interp_explicit32_mask, i) &&
810 !BITSET_TEST(linkage->interp_explicit16_mask, i) &&
811 !BITSET_TEST(linkage->interp_explicit_strict32_mask, i) &&
812 !BITSET_TEST(linkage->interp_explicit_strict16_mask, i) &&
813 !BITSET_TEST(linkage->per_primitive32_mask, i) &&
814 !BITSET_TEST(linkage->per_primitive16_mask, i) &&
815 !BITSET_TEST(linkage->convergent32_mask, i) &&
816 !BITSET_TEST(linkage->convergent16_mask, i) &&
817 !BITSET_TEST(linkage->output_equal_mask, i))
818 continue;
819
820 printf(" %7s.%c.%s: num_slots=%2u%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
821 gl_varying_slot_name_for_stage(vec4_slot(i),
822 linkage->producer_stage) + 13,
823 "xyzw"[(i / 2) % 4],
824 i % 2 ? "hi" : "lo",
825 slot->num_slots,
826 BITSET_TEST(linkage->removable_mask, i) ? " removable" : "",
827 BITSET_TEST(linkage->indirect_mask, i) ? " indirect" : "",
828 BITSET_TEST(linkage->xfb32_only_mask, i) ? " xfb32_only" : "",
829 BITSET_TEST(linkage->xfb16_only_mask, i) ? " xfb16_only" : "",
830 BITSET_TEST(linkage->tcs_cross_invoc32_mask, i) ? " tcs_cross_invoc32" : "",
831 BITSET_TEST(linkage->tcs_cross_invoc16_mask, i) ? " tcs_cross_invoc16" : "",
832 BITSET_TEST(linkage->no_varying32_mask, i) ? " no_varying32" : "",
833 BITSET_TEST(linkage->no_varying16_mask, i) ? " no_varying16" : "",
834 BITSET_TEST(linkage->interp_fp32_mask, i) ? " interp_fp32" : "",
835 BITSET_TEST(linkage->interp_fp32_qual_masks[0], i) ? " interp_fp32_persp_pixel" : "",
836 BITSET_TEST(linkage->interp_fp32_qual_masks[1], i) ? " interp_fp32_persp_centroid" : "",
837 BITSET_TEST(linkage->interp_fp32_qual_masks[2], i) ? " interp_fp32_persp_sample" : "",
838 BITSET_TEST(linkage->interp_fp32_qual_masks[3], i) ? " interp_fp32_linear_pixel" : "",
839 BITSET_TEST(linkage->interp_fp32_qual_masks[4], i) ? " interp_fp32_linear_centroid" : "",
840 BITSET_TEST(linkage->interp_fp32_qual_masks[5], i) ? " interp_fp32_linear_sample" : "",
841 BITSET_TEST(linkage->interp_fp16_mask, i) ? " interp_fp16" : "",
842 BITSET_TEST(linkage->interp_fp16_qual_masks[0], i) ? " interp_fp16_persp_pixel" : "",
843 BITSET_TEST(linkage->interp_fp16_qual_masks[1], i) ? " interp_fp16_persp_centroid" : "",
844 BITSET_TEST(linkage->interp_fp16_qual_masks[2], i) ? " interp_fp16_persp_sample" : "",
845 BITSET_TEST(linkage->interp_fp16_qual_masks[3], i) ? " interp_fp16_linear_pixel" : "",
846 BITSET_TEST(linkage->interp_fp16_qual_masks[4], i) ? " interp_fp16_linear_centroid" : "",
847 BITSET_TEST(linkage->interp_fp16_qual_masks[5], i) ? " interp_fp16_linear_sample" : "",
848 BITSET_TEST(linkage->color32_mask, i) ? " color32" : "",
849 BITSET_TEST(linkage->color32_qual_masks[0], i) ? " color32_pixel" : "",
850 BITSET_TEST(linkage->color32_qual_masks[1], i) ? " color32_centroid" : "",
851 BITSET_TEST(linkage->color32_qual_masks[2], i) ? " color32_sample" : "",
852 BITSET_TEST(linkage->flat32_mask, i) ? " flat32" : "",
853 BITSET_TEST(linkage->flat16_mask, i) ? " flat16" : "",
854 BITSET_TEST(linkage->interp_explicit32_mask, i) ? " interp_explicit32" : "",
855 BITSET_TEST(linkage->interp_explicit16_mask, i) ? " interp_explicit16" : "",
856 BITSET_TEST(linkage->interp_explicit_strict32_mask, i) ? " interp_explicit_strict32" : "",
857 BITSET_TEST(linkage->interp_explicit_strict16_mask, i) ? " interp_explicit_strict16" : "",
858 BITSET_TEST(linkage->per_primitive32_mask, i) ? " per_primitive32" : "",
859 BITSET_TEST(linkage->per_primitive32_mask, i) ? " per_primitive16" : "",
860 BITSET_TEST(linkage->convergent32_mask, i) ? " convergent32" : "",
861 BITSET_TEST(linkage->convergent16_mask, i) ? " convergent16" : "",
862 BITSET_TEST(linkage->output_equal_mask, i) ? " output_equal" : "",
863 !list_is_empty(&slot->producer.stores) ? " producer_stores" : "",
864 !list_is_empty(&slot->producer.loads) ? " producer_loads" : "",
865 !list_is_empty(&slot->consumer.loads) ? " consumer_loads" : "");
866 }
867 }
868
869 static void
slot_disable_optimizations_and_compaction(struct linkage_info * linkage,unsigned i)870 slot_disable_optimizations_and_compaction(struct linkage_info *linkage,
871 unsigned i)
872 {
873 BITSET_CLEAR(linkage->output_equal_mask, i);
874 BITSET_CLEAR(linkage->convergent32_mask, i);
875 BITSET_CLEAR(linkage->convergent16_mask, i);
876 BITSET_CLEAR(linkage->interp_fp32_mask, i);
877 BITSET_CLEAR(linkage->interp_fp16_mask, i);
878 for (unsigned b = 0; b < NUM_INTERP_QUALIFIERS; b++) {
879 BITSET_CLEAR(linkage->interp_fp32_qual_masks[b], i);
880 BITSET_CLEAR(linkage->interp_fp16_qual_masks[b], i);
881 }
882 BITSET_CLEAR(linkage->flat32_mask, i);
883 BITSET_CLEAR(linkage->flat16_mask, i);
884 BITSET_CLEAR(linkage->interp_explicit32_mask, i);
885 BITSET_CLEAR(linkage->interp_explicit16_mask, i);
886 BITSET_CLEAR(linkage->interp_explicit_strict32_mask, i);
887 BITSET_CLEAR(linkage->interp_explicit_strict16_mask, i);
888 BITSET_CLEAR(linkage->per_primitive32_mask, i);
889 BITSET_CLEAR(linkage->per_primitive16_mask, i);
890 BITSET_CLEAR(linkage->tcs_cross_invoc32_mask, i);
891 BITSET_CLEAR(linkage->tcs_cross_invoc16_mask, i);
892 BITSET_CLEAR(linkage->no_varying32_mask, i);
893 BITSET_CLEAR(linkage->no_varying16_mask, i);
894 BITSET_CLEAR(linkage->color32_mask, i);
895 for (unsigned b = 0; b < NUM_COLOR_QUALIFIERS; b++)
896 BITSET_CLEAR(linkage->color32_qual_masks[b], i);
897 }
898
899 static void
clear_slot_info_after_removal(struct linkage_info * linkage,unsigned i,bool uses_xfb)900 clear_slot_info_after_removal(struct linkage_info *linkage, unsigned i, bool uses_xfb)
901 {
902 slot_disable_optimizations_and_compaction(linkage, i);
903
904 if (uses_xfb)
905 return;
906
907 linkage->slot[i].num_slots = 0;
908
909 BITSET_CLEAR(linkage->indirect_mask, i);
910 BITSET_CLEAR(linkage->removable_mask, i);
911
912 /* Transform feedback stores can't be removed. */
913 assert(!BITSET_TEST(linkage->xfb32_only_mask, i));
914 assert(!BITSET_TEST(linkage->xfb16_only_mask, i));
915 }
916
917 static bool
has_xfb(nir_intrinsic_instr * intr)918 has_xfb(nir_intrinsic_instr *intr)
919 {
920 /* This means whether the instrinsic is ABLE to have xfb info. */
921 if (!nir_intrinsic_has_io_xfb(intr))
922 return false;
923
924 unsigned comp = nir_intrinsic_component(intr);
925
926 if (comp >= 2)
927 return nir_intrinsic_io_xfb2(intr).out[comp - 2].num_components > 0;
928 else
929 return nir_intrinsic_io_xfb(intr).out[comp].num_components > 0;
930 }
931
932 static bool
is_interpolated_color(struct linkage_info * linkage,unsigned i)933 is_interpolated_color(struct linkage_info *linkage, unsigned i)
934 {
935 if (linkage->consumer_stage != MESA_SHADER_FRAGMENT)
936 return false;
937
938 /* BFCn stores are bunched in the COLn slots with COLn, so we should never
939 * get BFCn here.
940 */
941 assert(vec4_slot(i) != VARYING_SLOT_BFC0 &&
942 vec4_slot(i) != VARYING_SLOT_BFC1);
943
944 return vec4_slot(i) == VARYING_SLOT_COL0 ||
945 vec4_slot(i) == VARYING_SLOT_COL1;
946 }
947
948 static bool
is_interpolated_texcoord(struct linkage_info * linkage,unsigned i)949 is_interpolated_texcoord(struct linkage_info *linkage, unsigned i)
950 {
951 if (linkage->consumer_stage != MESA_SHADER_FRAGMENT)
952 return false;
953
954 return vec4_slot(i) >= VARYING_SLOT_TEX0 &&
955 vec4_slot(i) <= VARYING_SLOT_TEX7;
956 }
957
958 static bool
color_uses_shade_model(struct linkage_info * linkage,unsigned i)959 color_uses_shade_model(struct linkage_info *linkage, unsigned i)
960 {
961 if (!is_interpolated_color(linkage, i))
962 return false;
963
964 list_for_each_entry(struct list_node, iter,
965 &linkage->slot[i].consumer.loads, head) {
966 assert(iter->instr->intrinsic == nir_intrinsic_load_interpolated_input);
967
968 nir_intrinsic_instr *baryc =
969 nir_instr_as_intrinsic(iter->instr->src[0].ssa->parent_instr);
970 if (nir_intrinsic_interp_mode(baryc) == INTERP_MODE_NONE)
971 return true;
972 }
973
974 return false;
975 }
976
977 static enum fs_vec4_type
get_interp_vec4_type(struct linkage_info * linkage,unsigned slot,nir_intrinsic_instr * load)978 get_interp_vec4_type(struct linkage_info *linkage, unsigned slot,
979 nir_intrinsic_instr *load)
980 {
981 assert(!linkage->has_flexible_interp);
982 assert(load->intrinsic == nir_intrinsic_load_interpolated_input);
983
984 nir_intrinsic_instr *baryc =
985 nir_instr_as_intrinsic(load->src[0].ssa->parent_instr);
986 enum fs_vec4_type base;
987
988 if (color_uses_shade_model(linkage, slot))
989 base = FS_VEC4_TYPE_INTERP_COLOR_PIXEL;
990 else if (load->def.bit_size == 32)
991 base = FS_VEC4_TYPE_INTERP_FP32_PERSP_PIXEL;
992 else if (load->def.bit_size == 16)
993 base = FS_VEC4_TYPE_INTERP_FP16_PERSP_PIXEL;
994 else
995 unreachable("invalid load_interpolated_input type");
996
997 bool linear = nir_intrinsic_interp_mode(baryc) == INTERP_MODE_NOPERSPECTIVE;
998
999 if (linear)
1000 base += 3;
1001
1002 switch (baryc->intrinsic) {
1003 case nir_intrinsic_load_barycentric_pixel:
1004 case nir_intrinsic_load_barycentric_at_offset:
1005 case nir_intrinsic_load_barycentric_at_sample:
1006 return base;
1007 case nir_intrinsic_load_barycentric_centroid:
1008 return base + 1;
1009 case nir_intrinsic_load_barycentric_sample:
1010 return base + 2;
1011 default:
1012 unreachable("unexpected barycentric intrinsic");
1013 }
1014 }
1015
1016 static bool
preserve_infs_nans(nir_shader * nir,unsigned bit_size)1017 preserve_infs_nans(nir_shader *nir, unsigned bit_size)
1018 {
1019 unsigned mode = nir->info.float_controls_execution_mode;
1020
1021 return nir_is_float_control_inf_preserve(mode, bit_size) ||
1022 nir_is_float_control_nan_preserve(mode, bit_size);
1023 }
1024
1025 static bool
preserve_nans(nir_shader * nir,unsigned bit_size)1026 preserve_nans(nir_shader *nir, unsigned bit_size)
1027 {
1028 unsigned mode = nir->info.float_controls_execution_mode;
1029
1030 return nir_is_float_control_nan_preserve(mode, bit_size);
1031 }
1032
1033 static nir_def *
build_convert_inf_to_nan(nir_builder * b,nir_def * x)1034 build_convert_inf_to_nan(nir_builder *b, nir_def *x)
1035 {
1036 /* Do x*0 + x. The multiplication by 0 can't be optimized out. */
1037 nir_def *fma = nir_ffma_imm1(b, x, 0, x);
1038 nir_instr_as_alu(fma->parent_instr)->exact = true;
1039 return fma;
1040 }
1041
1042 static bool
is_sysval(nir_instr * instr,gl_system_value sysval)1043 is_sysval(nir_instr *instr, gl_system_value sysval)
1044 {
1045 if (instr->type == nir_instr_type_intrinsic) {
1046 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
1047
1048 if (intr->intrinsic == nir_intrinsic_from_system_value(sysval))
1049 return true;
1050
1051 if (intr->intrinsic == nir_intrinsic_load_deref) {
1052 nir_deref_instr *deref =
1053 nir_instr_as_deref(intr->src[0].ssa->parent_instr);
1054
1055 return nir_deref_mode_is_one_of(deref, nir_var_system_value) &&
1056 nir_deref_instr_get_variable(deref)->data.location == sysval;
1057 }
1058 }
1059
1060 return false;
1061 }
1062
1063 /******************************************************************
1064 * GATHERING INPUTS & OUTPUTS
1065 ******************************************************************/
1066
1067 static bool
is_active_sysval_output(struct linkage_info * linkage,unsigned slot,nir_intrinsic_instr * intr)1068 is_active_sysval_output(struct linkage_info *linkage, unsigned slot,
1069 nir_intrinsic_instr *intr)
1070 {
1071 return nir_slot_is_sysval_output(vec4_slot(slot),
1072 linkage->consumer_stage) &&
1073 !nir_intrinsic_io_semantics(intr).no_sysval_output;
1074 }
1075
1076 /**
1077 * This function acts like a filter. The pass won't touch varyings that
1078 * return false here, and the return value is saved in the linkage bitmasks,
1079 * so that all subpasses will *automatically* skip such varyings.
1080 */
1081 static bool
can_remove_varying(struct linkage_info * linkage,gl_varying_slot location)1082 can_remove_varying(struct linkage_info *linkage, gl_varying_slot location)
1083 {
1084 if (linkage->consumer_stage == MESA_SHADER_FRAGMENT) {
1085 /* User-defined varyings and fog coordinates can always be removed. */
1086 if (location >= VARYING_SLOT_VAR0 ||
1087 location == VARYING_SLOT_FOGC)
1088 return true;
1089
1090 /* These can be removed as varyings, which means they will be demoted to
1091 * sysval-only outputs keeping their culling/rasterization functions
1092 * while not passing the values to FS. Drivers should handle
1093 * the "no_varying" semantic to benefit from this.
1094 *
1095 * Note: When removing unset LAYER and VIEWPORT FS inputs, they will
1096 * be replaced by 0 instead of undef.
1097 */
1098 if (location == VARYING_SLOT_CLIP_DIST0 ||
1099 location == VARYING_SLOT_CLIP_DIST1 ||
1100 location == VARYING_SLOT_CULL_DIST0 ||
1101 location == VARYING_SLOT_CULL_DIST1 ||
1102 location == VARYING_SLOT_LAYER ||
1103 location == VARYING_SLOT_VIEWPORT)
1104 return true;
1105
1106 /* COLn inputs can be removed only if both COLn and BFCn are not
1107 * written. Both COLn and BFCn outputs can be removed if COLn inputs
1108 * aren't read.
1109 *
1110 * TEXn inputs can never be removed in FS because of the coord replace
1111 * state, but TEXn outputs can be removed if they are not read by FS.
1112 */
1113 if (location == VARYING_SLOT_COL0 ||
1114 location == VARYING_SLOT_COL1 ||
1115 location == VARYING_SLOT_BFC0 ||
1116 location == VARYING_SLOT_BFC1 ||
1117 (location >= VARYING_SLOT_TEX0 && location <= VARYING_SLOT_TEX7))
1118 return true;
1119
1120 /* "GS -> FS" can remove the primitive ID if not written or not read. */
1121 if ((linkage->producer_stage == MESA_SHADER_GEOMETRY ||
1122 linkage->producer_stage == MESA_SHADER_MESH) &&
1123 location == VARYING_SLOT_PRIMITIVE_ID)
1124 return true;
1125
1126 /* No other varyings can be removed. */
1127 return false;
1128 } else if (linkage->consumer_stage == MESA_SHADER_TESS_EVAL) {
1129 /* Only VS->TES shouldn't remove TESS_LEVEL_* inputs because the values
1130 * come from glPatchParameterfv.
1131 *
1132 * For TCS->TES, TESS_LEVEL_* outputs can be removed as varyings, which
1133 * means they will be demoted to sysval-only outputs, so that drivers
1134 * know that TES doesn't read them.
1135 */
1136 if (linkage->producer_stage == MESA_SHADER_VERTEX &&
1137 (location == VARYING_SLOT_TESS_LEVEL_INNER ||
1138 location == VARYING_SLOT_TESS_LEVEL_OUTER))
1139 return false;
1140
1141 return true;
1142 }
1143
1144 /* All other varyings can be removed. */
1145 return true;
1146 }
1147
1148 struct opt_options {
1149 bool propagate_uniform_expr:1;
1150 bool deduplicate:1;
1151 bool inter_shader_code_motion:1;
1152 bool compact:1;
1153 bool disable_all:1;
1154 };
1155
1156 /**
1157 * Return which optimizations are allowed.
1158 */
1159 static struct opt_options
can_optimize_varying(struct linkage_info * linkage,gl_varying_slot location)1160 can_optimize_varying(struct linkage_info *linkage, gl_varying_slot location)
1161 {
1162 struct opt_options options_var = {
1163 .propagate_uniform_expr = true,
1164 .deduplicate = true,
1165 .inter_shader_code_motion = true,
1166 .compact = true,
1167 };
1168 struct opt_options options_color = {
1169 .propagate_uniform_expr = true, /* only constants in [0, 1] */
1170 .deduplicate = true,
1171 .compact = true,
1172 };
1173 struct opt_options options_tex = {
1174 .propagate_uniform_expr = true, /* only TEX.zw if equal to (0, 1) */
1175 };
1176 struct opt_options options_sysval_output = {
1177 .propagate_uniform_expr = true,
1178 .deduplicate = true,
1179 };
1180 struct opt_options options_tess_levels = {
1181 .propagate_uniform_expr = true,
1182 .deduplicate = true,
1183 };
1184 struct opt_options options_disable_all = {
1185 .disable_all = true,
1186 };
1187
1188 assert(can_remove_varying(linkage, location));
1189
1190 if (linkage->consumer_stage == MESA_SHADER_FRAGMENT) {
1191 /* xx -> FS */
1192 /* User-defined varyings and fog coordinates can always be optimized. */
1193 if (location >= VARYING_SLOT_VAR0 ||
1194 location == VARYING_SLOT_FOGC)
1195 return options_var;
1196
1197 /* The primitive ID can always be optimized in GS -> FS and MS -> FS. */
1198 if ((linkage->producer_stage == MESA_SHADER_GEOMETRY ||
1199 linkage->producer_stage == MESA_SHADER_MESH) &&
1200 location == VARYING_SLOT_PRIMITIVE_ID)
1201 return options_var;
1202
1203 /* Colors can only do constant propagation if COLn and BFCn store the
1204 * same constant and the constant is between 0 and 1 (because clamp
1205 * vertex color state is unknown). Uniform propagation isn't possible
1206 * because of the clamping.
1207 *
1208 * Color components can only be deduplicated and compacted among
1209 * themselves if they have the same interpolation qualifier, and can't
1210 * be mixed with other varyings.
1211 */
1212 if (location == VARYING_SLOT_COL0 ||
1213 location == VARYING_SLOT_COL1 ||
1214 location == VARYING_SLOT_BFC0 ||
1215 location == VARYING_SLOT_BFC1)
1216 return options_color;
1217
1218 /* TEXn.zw can only be constant-propagated if the value is (0, 1)
1219 * because it matches the coord replace values.
1220 */
1221 if (location >= VARYING_SLOT_TEX0 && location <= VARYING_SLOT_TEX7)
1222 return options_tex;
1223
1224 /* LAYER, VIEWPORT, CLIP_DISTn, and CULL_DISTn can only propagate
1225 * uniform expressions and be compacted (moved to VARn while keeping
1226 * the sysval outputs where they are).
1227 */
1228 if (location == VARYING_SLOT_LAYER ||
1229 location == VARYING_SLOT_VIEWPORT ||
1230 location == VARYING_SLOT_CLIP_DIST0 ||
1231 location == VARYING_SLOT_CLIP_DIST1 ||
1232 location == VARYING_SLOT_CULL_DIST0 ||
1233 location == VARYING_SLOT_CULL_DIST1)
1234 return options_sysval_output;
1235
1236 /* Everything else can't be read by the consumer, such as POS, PSIZ,
1237 * CLIP_VERTEX, EDGE, PRIMITIVE_SHADING_RATE, etc.
1238 */
1239 return options_disable_all;
1240 }
1241
1242 if (linkage->producer_stage == MESA_SHADER_TESS_CTRL) {
1243 /* TESS_LEVEL_* can only propagate uniform expressions.
1244 * Compaction is disabled because AMD doesn't want the varying to be
1245 * moved to PATCHn while keeping the sysval output where it is.
1246 */
1247 if (location == VARYING_SLOT_TESS_LEVEL_INNER ||
1248 location == VARYING_SLOT_TESS_LEVEL_OUTER)
1249 return options_tess_levels;
1250 }
1251
1252 /* All other shader pairs, which are (VS, TCS), (TCS, TES), (VS, TES),
1253 * (TES, GS), and (VS, GS) can compact and optimize all varyings.
1254 */
1255 return options_var;
1256 }
1257
1258 static bool
gather_inputs(struct nir_builder * builder,nir_intrinsic_instr * intr,void * cb_data)1259 gather_inputs(struct nir_builder *builder, nir_intrinsic_instr *intr, void *cb_data)
1260 {
1261 struct linkage_info *linkage = (struct linkage_info *)cb_data;
1262
1263 if (intr->intrinsic != nir_intrinsic_load_input &&
1264 intr->intrinsic != nir_intrinsic_load_per_vertex_input &&
1265 intr->intrinsic != nir_intrinsic_load_per_primitive_input &&
1266 intr->intrinsic != nir_intrinsic_load_interpolated_input &&
1267 intr->intrinsic != nir_intrinsic_load_input_vertex)
1268 return false;
1269
1270 /* nir_lower_io_to_scalar is required before this */
1271 assert(intr->def.num_components == 1);
1272 /* Non-zero constant offsets should have been folded by
1273 * nir_io_add_const_offset_to_base.
1274 */
1275 nir_src offset = *nir_get_io_offset_src(intr);
1276 assert(!nir_src_is_const(offset) || nir_src_as_uint(offset) == 0);
1277
1278 nir_io_semantics sem = nir_intrinsic_io_semantics(intr);
1279
1280 if (!can_remove_varying(linkage, sem.location))
1281 return false;
1282
1283 /* Insert the load into the list of loads for this scalar slot. */
1284 unsigned slot = intr_get_scalar_16bit_slot(intr);
1285 struct scalar_slot *in = &linkage->slot[slot];
1286 struct list_node *node = linear_alloc_child(linkage->linear_mem_ctx,
1287 sizeof(struct list_node));
1288 node->instr = intr;
1289 list_addtail(&node->head, &in->consumer.loads);
1290 in->num_slots = MAX2(in->num_slots, sem.num_slots);
1291
1292 BITSET_SET(linkage->removable_mask, slot);
1293
1294 enum fs_vec4_type fs_vec4_type = FS_VEC4_TYPE_NONE;
1295
1296 /* Determine the type of the input for compaction. Other inputs
1297 * can be compacted with indirectly-indexed vec4 slots if they
1298 * have unused components, but only if they are of the same type.
1299 */
1300 if (linkage->consumer_stage == MESA_SHADER_FRAGMENT) {
1301 switch (intr->intrinsic) {
1302 case nir_intrinsic_load_input:
1303 fs_vec4_type = FS_VEC4_TYPE_FLAT;
1304 break;
1305 case nir_intrinsic_load_per_primitive_input:
1306 fs_vec4_type = FS_VEC4_TYPE_PER_PRIMITIVE;
1307 break;
1308 case nir_intrinsic_load_input_vertex:
1309 if (sem.interp_explicit_strict)
1310 fs_vec4_type = FS_VEC4_TYPE_INTERP_EXPLICIT_STRICT;
1311 else
1312 fs_vec4_type = FS_VEC4_TYPE_INTERP_EXPLICIT;
1313 break;
1314 case nir_intrinsic_load_interpolated_input:
1315 if (linkage->has_flexible_interp) {
1316 if (color_uses_shade_model(linkage, slot))
1317 fs_vec4_type = FS_VEC4_TYPE_INTERP_COLOR;
1318 else if (intr->def.bit_size == 32)
1319 fs_vec4_type = FS_VEC4_TYPE_INTERP_FP32;
1320 else if (intr->def.bit_size == 16)
1321 fs_vec4_type = FS_VEC4_TYPE_INTERP_FP16;
1322 else
1323 unreachable("invalid load_interpolated_input type");
1324 } else {
1325 fs_vec4_type = get_interp_vec4_type(linkage, slot, intr);
1326 }
1327 break;
1328 default:
1329 unreachable("unexpected input load intrinsic");
1330 }
1331
1332 linkage->fs_vec4_type[sem.location] = fs_vec4_type;
1333 }
1334
1335 /* Indirect indexing. */
1336 if (!nir_src_is_const(offset)) {
1337 /* Only the indirectly-indexed component is marked as indirect. */
1338 for (unsigned i = 0; i < sem.num_slots; i++)
1339 BITSET_SET(linkage->indirect_mask, slot + i * 8);
1340
1341 /* Set the same vec4 type as the first element in all slots. */
1342 if (linkage->consumer_stage == MESA_SHADER_FRAGMENT) {
1343 for (unsigned i = 1; i < sem.num_slots; i++)
1344 linkage->fs_vec4_type[sem.location + i] = fs_vec4_type;
1345 }
1346 return false;
1347 }
1348
1349 if (!can_optimize_varying(linkage, sem.location).compact)
1350 return false;
1351
1352 /* Record inputs that can be compacted. */
1353 if (linkage->consumer_stage == MESA_SHADER_FRAGMENT) {
1354 unsigned i;
1355 assert(intr->def.bit_size == 32 || intr->def.bit_size == 16);
1356
1357 switch (fs_vec4_type) {
1358 case FS_VEC4_TYPE_FLAT:
1359 if (intr->def.bit_size == 32)
1360 BITSET_SET(linkage->flat32_mask, slot);
1361 else
1362 BITSET_SET(linkage->flat16_mask, slot);
1363 break;
1364 case FS_VEC4_TYPE_INTERP_EXPLICIT:
1365 if (intr->def.bit_size == 32)
1366 BITSET_SET(linkage->interp_explicit32_mask, slot);
1367 else
1368 BITSET_SET(linkage->interp_explicit16_mask, slot);
1369 break;
1370 case FS_VEC4_TYPE_INTERP_EXPLICIT_STRICT:
1371 if (intr->def.bit_size == 32)
1372 BITSET_SET(linkage->interp_explicit_strict32_mask, slot);
1373 else
1374 BITSET_SET(linkage->interp_explicit_strict16_mask, slot);
1375 break;
1376 case FS_VEC4_TYPE_PER_PRIMITIVE:
1377 if (intr->def.bit_size == 32)
1378 BITSET_SET(linkage->per_primitive32_mask, slot);
1379 else
1380 BITSET_SET(linkage->per_primitive16_mask, slot);
1381 break;
1382
1383 case FS_VEC4_TYPE_INTERP_FP32:
1384 BITSET_SET(linkage->interp_fp32_mask, slot);
1385 break;
1386 case FS_VEC4_TYPE_INTERP_FP16:
1387 BITSET_SET(linkage->interp_fp16_mask, slot);
1388 break;
1389 case FS_VEC4_TYPE_INTERP_COLOR:
1390 BITSET_SET(linkage->color32_mask, slot);
1391 break;
1392
1393 case FS_VEC4_TYPE_INTERP_FP32_PERSP_PIXEL:
1394 case FS_VEC4_TYPE_INTERP_FP32_PERSP_CENTROID:
1395 case FS_VEC4_TYPE_INTERP_FP32_PERSP_SAMPLE:
1396 case FS_VEC4_TYPE_INTERP_FP32_LINEAR_PIXEL:
1397 case FS_VEC4_TYPE_INTERP_FP32_LINEAR_CENTROID:
1398 case FS_VEC4_TYPE_INTERP_FP32_LINEAR_SAMPLE:
1399 i = fs_vec4_type - FS_VEC4_TYPE_INTERP_FP32_PERSP_PIXEL;
1400 BITSET_SET(linkage->interp_fp32_qual_masks[i], slot);
1401 break;
1402
1403 case FS_VEC4_TYPE_INTERP_FP16_PERSP_PIXEL:
1404 case FS_VEC4_TYPE_INTERP_FP16_PERSP_CENTROID:
1405 case FS_VEC4_TYPE_INTERP_FP16_PERSP_SAMPLE:
1406 case FS_VEC4_TYPE_INTERP_FP16_LINEAR_PIXEL:
1407 case FS_VEC4_TYPE_INTERP_FP16_LINEAR_CENTROID:
1408 case FS_VEC4_TYPE_INTERP_FP16_LINEAR_SAMPLE:
1409 i = fs_vec4_type - FS_VEC4_TYPE_INTERP_FP16_PERSP_PIXEL;
1410 BITSET_SET(linkage->interp_fp16_qual_masks[i], slot);
1411 break;
1412
1413 case FS_VEC4_TYPE_INTERP_COLOR_PIXEL:
1414 case FS_VEC4_TYPE_INTERP_COLOR_CENTROID:
1415 case FS_VEC4_TYPE_INTERP_COLOR_SAMPLE:
1416 i = fs_vec4_type - FS_VEC4_TYPE_INTERP_COLOR_PIXEL;
1417 BITSET_SET(linkage->color32_qual_masks[i], slot);
1418 break;
1419
1420 case FS_VEC4_TYPE_NONE:
1421 unreachable("unexpected fs_vec4_type");
1422 }
1423
1424 if (!linkage->has_flexible_interp &&
1425 intr->intrinsic == nir_intrinsic_load_interpolated_input) {
1426 /* interpolateAtCentroid can occur simultaneously with any other
1427 * qualifier. If centroid is flagged with any other qualifier,
1428 * unflag centroid. Even though we track such outputs as the other
1429 * qualifier, the load_barycentric_centroid intrinsic must be
1430 * preserved by all optimizations. The only case when it's not
1431 * preserved is when the input is convergent, in which case
1432 * all qualifiers have the same behavior and we opportunistically
1433 * change it during compaction.
1434 */
1435 if (color_uses_shade_model(linkage, slot)) {
1436 if (BITSET_TEST(linkage->color32_qual_masks[COLOR_CENTROID], slot) &&
1437 (BITSET_TEST(linkage->color32_qual_masks[COLOR_PIXEL], slot) ||
1438 BITSET_TEST(linkage->color32_qual_masks[COLOR_SAMPLE], slot)))
1439 BITSET_CLEAR(linkage->color32_qual_masks[COLOR_CENTROID], slot);
1440 } else {
1441 INTERP_QUAL_BITSET *bitsets =
1442 intr->def.bit_size == 32 ? &linkage->interp_fp32_qual_masks :
1443 &linkage->interp_fp16_qual_masks;
1444
1445 if (BITSET_TEST((*bitsets)[PERSP_CENTROID], slot) &&
1446 (BITSET_TEST((*bitsets)[PERSP_PIXEL], slot) ||
1447 BITSET_TEST((*bitsets)[PERSP_SAMPLE], slot)))
1448 BITSET_CLEAR((*bitsets)[PERSP_CENTROID], slot);
1449
1450 if (BITSET_TEST((*bitsets)[LINEAR_CENTROID], slot) &&
1451 (BITSET_TEST((*bitsets)[LINEAR_PIXEL], slot) ||
1452 BITSET_TEST((*bitsets)[LINEAR_SAMPLE], slot)))
1453 BITSET_CLEAR((*bitsets)[LINEAR_CENTROID], slot);
1454 }
1455 }
1456 } else {
1457 if (intr->def.bit_size == 32)
1458 BITSET_SET(linkage->flat32_mask, slot);
1459 else if (intr->def.bit_size == 16)
1460 BITSET_SET(linkage->flat16_mask, slot);
1461 else
1462 unreachable("invalid load_input type");
1463
1464 if (linkage->consumer_stage == MESA_SHADER_TESS_CTRL &&
1465 intr->intrinsic == nir_intrinsic_load_per_vertex_input) {
1466 nir_src *vertex_index_src = nir_get_io_arrayed_index_src(intr);
1467 nir_instr *vertex_index_instr = vertex_index_src->ssa->parent_instr;
1468
1469 if (!is_sysval(vertex_index_instr, SYSTEM_VALUE_INVOCATION_ID)) {
1470 if (intr->def.bit_size == 32)
1471 BITSET_SET(linkage->tcs_cross_invoc32_mask, slot);
1472 else if (intr->def.bit_size == 16)
1473 BITSET_SET(linkage->tcs_cross_invoc16_mask, slot);
1474 else
1475 unreachable("invalid load_input type");
1476 }
1477 }
1478 }
1479 return false;
1480 }
1481
1482 static bool
gather_outputs(struct nir_builder * builder,nir_intrinsic_instr * intr,void * cb_data)1483 gather_outputs(struct nir_builder *builder, nir_intrinsic_instr *intr, void *cb_data)
1484 {
1485 struct linkage_info *linkage = (struct linkage_info *)cb_data;
1486
1487 if (intr->intrinsic != nir_intrinsic_store_output &&
1488 intr->intrinsic != nir_intrinsic_load_output &&
1489 intr->intrinsic != nir_intrinsic_store_per_vertex_output &&
1490 intr->intrinsic != nir_intrinsic_store_per_view_output &&
1491 intr->intrinsic != nir_intrinsic_store_per_primitive_output &&
1492 intr->intrinsic != nir_intrinsic_load_per_vertex_output &&
1493 intr->intrinsic != nir_intrinsic_load_per_view_output &&
1494 intr->intrinsic != nir_intrinsic_load_per_primitive_output)
1495 return false;
1496
1497 bool is_store =
1498 intr->intrinsic == nir_intrinsic_store_output ||
1499 intr->intrinsic == nir_intrinsic_store_per_vertex_output ||
1500 intr->intrinsic == nir_intrinsic_store_per_view_output ||
1501 intr->intrinsic == nir_intrinsic_store_per_primitive_output;
1502
1503 if (is_store) {
1504 /* nir_lower_io_to_scalar is required before this */
1505 assert(intr->src[0].ssa->num_components == 1);
1506 /* nit_opt_undef is required before this. */
1507 assert(intr->src[0].ssa->parent_instr->type !=
1508 nir_instr_type_undef);
1509 } else {
1510 /* nir_lower_io_to_scalar is required before this */
1511 assert(intr->def.num_components == 1);
1512 /* Outputs loads are only allowed in TCS. */
1513 assert(linkage->producer_stage == MESA_SHADER_TESS_CTRL);
1514 }
1515
1516 /* Non-zero constant offsets should have been folded by
1517 * nir_io_add_const_offset_to_base.
1518 */
1519 nir_src offset = *nir_get_io_offset_src(intr);
1520 assert(!nir_src_is_const(offset) || nir_src_as_uint(offset) == 0);
1521
1522 nir_io_semantics sem = nir_intrinsic_io_semantics(intr);
1523
1524 if (!can_remove_varying(linkage, sem.location))
1525 return false;
1526
1527 /* For "xx -> FS", treat BFCn stores as COLn to make dead varying
1528 * elimination do the right thing automatically. The rules are:
1529 * - COLn inputs can be removed only if both COLn and BFCn are not
1530 * written.
1531 * - Both COLn and BFCn outputs can be removed if COLn inputs
1532 * aren't read.
1533 */
1534 if (linkage->consumer_stage == MESA_SHADER_FRAGMENT) {
1535 if (sem.location == VARYING_SLOT_BFC0)
1536 sem.location = VARYING_SLOT_COL0;
1537 else if (sem.location == VARYING_SLOT_BFC1)
1538 sem.location = VARYING_SLOT_COL1;
1539 }
1540
1541 /* Insert the instruction into the list of stores or loads for this
1542 * scalar slot.
1543 */
1544 unsigned slot =
1545 get_scalar_16bit_slot(sem, nir_intrinsic_component(intr));
1546
1547 struct scalar_slot *out = &linkage->slot[slot];
1548 struct list_node *node = linear_alloc_child(linkage->linear_mem_ctx,
1549 sizeof(struct list_node));
1550 node->instr = intr;
1551 out->num_slots = MAX2(out->num_slots, sem.num_slots);
1552
1553 if (is_store) {
1554 list_addtail(&node->head, &out->producer.stores);
1555
1556 if (has_xfb(intr)) {
1557 BITSET_SET(linkage->xfb_mask, slot);
1558
1559 if (sem.no_varying &&
1560 !is_active_sysval_output(linkage, slot, intr)) {
1561 if (intr->src[0].ssa->bit_size == 32)
1562 BITSET_SET(linkage->xfb32_only_mask, slot);
1563 else if (intr->src[0].ssa->bit_size == 16)
1564 BITSET_SET(linkage->xfb16_only_mask, slot);
1565 else
1566 unreachable("invalid load_input type");
1567 }
1568 }
1569 } else {
1570 list_addtail(&node->head, &out->producer.loads);
1571 }
1572
1573 BITSET_SET(linkage->removable_mask, slot);
1574
1575 /* Indirect indexing. */
1576 if (!nir_src_is_const(offset)) {
1577 /* Only the indirectly-indexed component is marked as indirect. */
1578 for (unsigned i = 0; i < sem.num_slots; i++)
1579 BITSET_SET(linkage->indirect_mask, slot + i * 8);
1580
1581 /* Set the same vec4 type as the first element in all slots. */
1582 if (linkage->consumer_stage == MESA_SHADER_FRAGMENT) {
1583 enum fs_vec4_type fs_vec4_type =
1584 linkage->fs_vec4_type[sem.location];
1585
1586 for (unsigned i = 1; i < sem.num_slots; i++)
1587 linkage->fs_vec4_type[sem.location + i] = fs_vec4_type;
1588 }
1589 return false;
1590 }
1591
1592 if (can_optimize_varying(linkage, sem.location).disable_all)
1593 return false;
1594
1595 if (is_store) {
1596 nir_def *value = intr->src[0].ssa;
1597
1598 const bool constant = value->parent_instr->type == nir_instr_type_load_const;
1599
1600 /* If the store instruction is executed in a divergent block, the value
1601 * that's stored in the output becomes divergent.
1602 *
1603 * Mesh shaders get special treatment because we can't follow their topology,
1604 * so we only propagate constants.
1605 * TODO: revisit this when workgroup divergence analysis is merged.
1606 */
1607 const bool divergent = (!constant && linkage->producer_stage == MESA_SHADER_MESH) ||
1608 intr->instr.block->divergent ||
1609 nir_src_is_divergent(&intr->src[0]);
1610
1611 if (!out->producer.value) {
1612 /* This is the first store to this output. */
1613 BITSET_SET(linkage->output_equal_mask, slot);
1614 out->producer.value = value->parent_instr;
1615
1616 /* Set whether the value is convergent. Such varyings can be
1617 * promoted to flat regardless of their original interpolation
1618 * mode.
1619 */
1620 if (linkage->consumer_stage == MESA_SHADER_FRAGMENT && !divergent) {
1621 if (value->bit_size == 32)
1622 BITSET_SET(linkage->convergent32_mask, slot);
1623 else if (value->bit_size == 16)
1624 BITSET_SET(linkage->convergent16_mask, slot);
1625 else
1626 unreachable("invalid store_output type");
1627 }
1628 } else {
1629 /* There are multiple stores to the same output. If they store
1630 * different values, clear the mask.
1631 */
1632 if (out->producer.value != value->parent_instr)
1633 BITSET_CLEAR(linkage->output_equal_mask, slot);
1634
1635 /* Update divergence information. */
1636 if (linkage->consumer_stage == MESA_SHADER_FRAGMENT && divergent) {
1637 if (value->bit_size == 32)
1638 BITSET_CLEAR(linkage->convergent32_mask, slot);
1639 else if (value->bit_size == 16)
1640 BITSET_CLEAR(linkage->convergent16_mask, slot);
1641 else
1642 unreachable("invalid store_output type");
1643 }
1644 }
1645 } else {
1646 /* Only TCS output loads can get here.
1647 *
1648 * We need to record output loads as flat32 or flat16, otherwise
1649 * compaction will think that the slot is free and will put some
1650 * other output in its place.
1651 */
1652 assert(linkage->producer_stage == MESA_SHADER_TESS_CTRL);
1653
1654 if (!can_optimize_varying(linkage, sem.location).compact)
1655 return false;
1656
1657 if (intr->def.bit_size == 32)
1658 BITSET_SET(linkage->flat32_mask, slot);
1659 else if (intr->def.bit_size == 16)
1660 BITSET_SET(linkage->flat16_mask, slot);
1661 else
1662 unreachable("invalid load_input type");
1663 }
1664 return false;
1665 }
1666
1667 /******************************************************************
1668 * TIDYING UP INDIRECT VARYINGS (BEFORE DEAD VARYINGS REMOVAL)
1669 ******************************************************************/
1670
1671 static void
tidy_up_indirect_varyings(struct linkage_info * linkage)1672 tidy_up_indirect_varyings(struct linkage_info *linkage)
1673 {
1674 unsigned i;
1675
1676 /* Indirectly-indexed slots can have direct access too and thus set
1677 * various bitmasks, so clear those bitmasks to make sure they are not
1678 * touched.
1679 */
1680 BITSET_FOREACH_SET(i, linkage->indirect_mask, NUM_SCALAR_SLOTS) {
1681 slot_disable_optimizations_and_compaction(linkage, i);
1682 }
1683
1684 /* If some slots have both direct and indirect accesses, move instructions
1685 * of such slots to the slot representing the first array element, so that
1686 * we can remove all loads/stores of dead indirectly-indexed varyings
1687 * by only looking at the first element.
1688 */
1689 BITSET_FOREACH_SET(i, linkage->indirect_mask, NUM_SCALAR_SLOTS) {
1690 struct scalar_slot *first = &linkage->slot[i];
1691
1692 /* Skip if this is not the first array element. The first element
1693 * always sets num_slots to at least 2.
1694 */
1695 if (first->num_slots <= 1)
1696 continue;
1697
1698 /* Move instructions from other elements of the indirectly-accessed
1699 * array to the first element (by merging the linked lists).
1700 */
1701 for (unsigned elem = 1; elem < first->num_slots; elem++) {
1702 /* The component slots are at 16-bit granularity, so we need to
1703 * increment by 8 to get the same component in the next vec4 slot.
1704 */
1705 struct scalar_slot *other = &linkage->slot[i + elem * 8];
1706
1707 list_splicetail(&other->producer.stores, &first->producer.stores);
1708 list_splicetail(&other->producer.loads, &first->producer.loads);
1709 list_splicetail(&other->consumer.loads, &first->consumer.loads);
1710 list_inithead(&other->producer.stores);
1711 list_inithead(&other->producer.loads);
1712 list_inithead(&other->consumer.loads);
1713 }
1714 }
1715 }
1716
1717 /******************************************************************
1718 * TIDYING UP CONVERGENT VARYINGS
1719 ******************************************************************/
1720
1721 /**
1722 * Reorganize bitmasks for FS because they are initialized such that they can
1723 * intersect with the convergent bitmasks. We want them to be disjoint, so
1724 * that masks of interpolated, flat, and convergent varyings don't intersect.
1725 */
1726 static void
tidy_up_convergent_varyings(struct linkage_info * linkage)1727 tidy_up_convergent_varyings(struct linkage_info *linkage)
1728 {
1729 if (linkage->consumer_stage != MESA_SHADER_FRAGMENT)
1730 return;
1731
1732 unsigned i;
1733 /* Whether to promote convergent interpolated slots to flat if it
1734 * doesn't lead to worse compaction.
1735 */
1736 bool optimize_convergent_slots = true; /* only turn off for debugging */
1737
1738 if (optimize_convergent_slots) {
1739 /* If a slot is flat and convergent and the driver can't load as flat
1740 * from interpolated vec4 slots, keep the flat bit and remove
1741 * the convergent bit. If the driver can load as flat from interpolated
1742 * vec4 slots, keep the convergent bit.
1743 *
1744 * If a slot is interpolated and convergent, remove the interpolated
1745 * bit and keep the convergent bit, which means that it's interpolated,
1746 * but can be promoted to flat.
1747 *
1748 * Since the geometry shader is the only shader that can store values
1749 * in multiple vertices before FS, it's required that all stores are
1750 * equal to be considered convergent (output_equal_mask), otherwise
1751 * the promotion to flat would be incorrect.
1752 */
1753 BITSET_FOREACH_SET(i, linkage->convergent32_mask, NUM_SCALAR_SLOTS) {
1754 if (!BITSET_TEST(linkage->interp_fp32_mask, i) &&
1755 !BITSET_TEST(linkage->color32_mask, i) &&
1756 !BITSET_TEST(linkage->flat32_mask, i) &&
1757 !BITSET6_TEST_ANY(linkage->interp_fp32_qual_masks, i) &&
1758 !BITSET3_TEST_ANY(linkage->color32_qual_masks, i)) {
1759 /* Clear the flag - not used by FS. */
1760 BITSET_CLEAR(linkage->convergent32_mask, i);
1761 } else if ((!linkage->can_mix_convergent_flat_with_interpolated &&
1762 BITSET_TEST(linkage->flat32_mask, i)) ||
1763 (linkage->producer_stage == MESA_SHADER_GEOMETRY &&
1764 !BITSET_TEST(linkage->output_equal_mask, i))) {
1765 /* Keep the original qualifier. */
1766 BITSET_CLEAR(linkage->convergent32_mask, i);
1767 } else {
1768 /* Keep it convergent. */
1769 BITSET_CLEAR(linkage->interp_fp32_mask, i);
1770 for (unsigned b = 0; b < NUM_INTERP_QUALIFIERS; b++)
1771 BITSET_CLEAR(linkage->interp_fp32_qual_masks[b], i);
1772 BITSET_CLEAR(linkage->color32_mask, i);
1773 for (unsigned b = 0; b < NUM_COLOR_QUALIFIERS; b++)
1774 BITSET_CLEAR(linkage->color32_qual_masks[b], i);
1775 BITSET_CLEAR(linkage->flat32_mask, i);
1776 }
1777 }
1778
1779 BITSET_FOREACH_SET(i, linkage->convergent16_mask, NUM_SCALAR_SLOTS) {
1780 if (!BITSET_TEST(linkage->interp_fp16_mask, i) &&
1781 !BITSET_TEST(linkage->flat16_mask, i) &&
1782 !BITSET6_TEST_ANY(linkage->interp_fp16_qual_masks, i)) {
1783 /* Clear the flag - not used by FS. */
1784 BITSET_CLEAR(linkage->convergent16_mask, i);
1785 } else if ((!linkage->can_mix_convergent_flat_with_interpolated &&
1786 BITSET_TEST(linkage->flat16_mask, i)) ||
1787 (linkage->producer_stage == MESA_SHADER_GEOMETRY &&
1788 !BITSET_TEST(linkage->output_equal_mask, i))) {
1789 /* Keep the original qualifier. */
1790 BITSET_CLEAR(linkage->convergent16_mask, i);
1791 } else {
1792 /* Keep it convergent. */
1793 BITSET_CLEAR(linkage->interp_fp16_mask, i);
1794 for (unsigned b = 0; b < NUM_INTERP_QUALIFIERS; b++)
1795 BITSET_CLEAR(linkage->interp_fp16_qual_masks[b], i);
1796 BITSET_CLEAR(linkage->flat16_mask, i);
1797 }
1798 }
1799 } else {
1800 /* Don't do anything with convergent slots. */
1801 BITSET_ZERO(linkage->convergent32_mask);
1802 BITSET_ZERO(linkage->convergent16_mask);
1803 }
1804 }
1805
1806 /******************************************************************
1807 * DETERMINING UNIFORM AND UBO MOVABILITY BASED ON DRIVER LIMITS
1808 ******************************************************************/
1809
1810 static bool
is_variable_present(nir_shader * nir,nir_variable * var,nir_variable_mode mode,bool spirv)1811 is_variable_present(nir_shader *nir, nir_variable *var,
1812 nir_variable_mode mode, bool spirv)
1813 {
1814 nir_foreach_variable_with_modes(it, nir, mode) {
1815 if ((spirv && it->data.binding == var->data.binding) ||
1816 (!spirv && !strcmp(it->name, var->name)))
1817 return true;
1818 }
1819 return false;
1820 }
1821
1822 /* TODO: this should be a helper in common code */
1823 static unsigned
get_uniform_components(const struct glsl_type * type)1824 get_uniform_components(const struct glsl_type *type)
1825 {
1826 unsigned size = glsl_get_aoa_size(type);
1827 size = MAX2(size, 1);
1828 size *= glsl_get_matrix_columns(glsl_without_array(type));
1829
1830 if (glsl_type_is_dual_slot(glsl_without_array(type)))
1831 size *= 2;
1832
1833 /* Convert from vec4 to scalar. */
1834 return size * 4;
1835 }
1836
1837 static unsigned
get_ubo_slots(const nir_variable * var)1838 get_ubo_slots(const nir_variable *var)
1839 {
1840 if (glsl_type_is_interface(glsl_without_array(var->type))) {
1841 unsigned slots = glsl_get_aoa_size(var->type);
1842 return MAX2(slots, 1);
1843 }
1844
1845 return 1;
1846 }
1847
1848 /**
1849 * Count uniforms and see if the combined uniform component count is over
1850 * the limit. If it is, don't move any uniforms. It's sufficient if drivers
1851 * declare a very high limit.
1852 */
1853 static void
determine_uniform_movability(struct linkage_info * linkage,unsigned max_uniform_components)1854 determine_uniform_movability(struct linkage_info *linkage,
1855 unsigned max_uniform_components)
1856 {
1857 nir_shader *producer = linkage->producer_builder.shader;
1858 nir_shader *consumer = linkage->consumer_builder.shader;
1859 unsigned num_producer_uniforms = 0;
1860 unsigned num_consumer_uniforms = 0;
1861 unsigned num_shared_uniforms = 0;
1862
1863 nir_foreach_variable_with_modes(var, producer, nir_var_uniform) {
1864 if (is_variable_present(consumer, var, nir_var_uniform, linkage->spirv))
1865 num_shared_uniforms += get_uniform_components(var->type);
1866 else
1867 num_producer_uniforms += get_uniform_components(var->type);
1868 }
1869
1870 nir_foreach_variable_with_modes(var, consumer, nir_var_uniform) {
1871 if (!is_variable_present(producer, var, nir_var_uniform, linkage->spirv))
1872 num_consumer_uniforms += get_uniform_components(var->type);
1873 }
1874
1875 linkage->can_move_uniforms =
1876 num_producer_uniforms + num_consumer_uniforms + num_shared_uniforms <=
1877 max_uniform_components;
1878 }
1879
1880 /**
1881 * Count UBOs and see if the combined UBO count is over the limit. If it is,
1882 * don't move any UBOs. It's sufficient if drivers declare a very high limit.
1883 */
1884 static void
determine_ubo_movability(struct linkage_info * linkage,unsigned max_ubos_per_stage)1885 determine_ubo_movability(struct linkage_info *linkage,
1886 unsigned max_ubos_per_stage)
1887 {
1888 nir_shader *producer = linkage->producer_builder.shader;
1889 nir_shader *consumer = linkage->consumer_builder.shader;
1890 unsigned num_producer_ubos = 0;
1891 unsigned num_consumer_ubos = 0;
1892 unsigned num_shared_ubos = 0;
1893
1894 nir_foreach_variable_with_modes(var, producer, nir_var_mem_ubo) {
1895 if (is_variable_present(consumer, var, nir_var_mem_ubo, linkage->spirv))
1896 num_shared_ubos += get_ubo_slots(var);
1897 else
1898 num_producer_ubos += get_ubo_slots(var);
1899 }
1900
1901 nir_foreach_variable_with_modes(var, consumer, nir_var_mem_ubo) {
1902 if (!is_variable_present(producer, var, nir_var_mem_ubo,
1903 linkage->spirv))
1904 num_consumer_ubos += get_ubo_slots(var);
1905 }
1906
1907 linkage->can_move_ubos =
1908 num_producer_ubos + num_consumer_ubos + num_shared_ubos <=
1909 max_ubos_per_stage;
1910 }
1911
1912 /******************************************************************
1913 * DEAD VARYINGS REMOVAL
1914 ******************************************************************/
1915
1916 static void
remove_all_stores(struct linkage_info * linkage,unsigned i,bool * uses_xfb,nir_opt_varyings_progress * progress)1917 remove_all_stores(struct linkage_info *linkage, unsigned i,
1918 bool *uses_xfb, nir_opt_varyings_progress *progress)
1919 {
1920 struct scalar_slot *slot = &linkage->slot[i];
1921
1922 assert(!list_is_empty(&slot->producer.stores) &&
1923 list_is_empty(&slot->producer.loads) &&
1924 list_is_empty(&slot->consumer.loads));
1925
1926 /* Remove all stores. */
1927 list_for_each_entry_safe(struct list_node, iter, &slot->producer.stores, head) {
1928 if (nir_remove_varying(iter->instr, linkage->consumer_stage)) {
1929 list_del(&iter->head);
1930 *progress |= nir_progress_producer;
1931 } else {
1932 if (has_xfb(iter->instr)) {
1933 *uses_xfb = true;
1934
1935 if (!is_active_sysval_output(linkage, i, iter->instr)) {
1936 if (iter->instr->src[0].ssa->bit_size == 32)
1937 BITSET_SET(linkage->xfb32_only_mask, i);
1938 else if (iter->instr->src[0].ssa->bit_size == 16)
1939 BITSET_SET(linkage->xfb16_only_mask, i);
1940 else
1941 unreachable("invalid load_input type");
1942 }
1943 }
1944 }
1945 }
1946 }
1947
1948 static void
remove_dead_varyings(struct linkage_info * linkage,nir_opt_varyings_progress * progress)1949 remove_dead_varyings(struct linkage_info *linkage,
1950 nir_opt_varyings_progress *progress)
1951 {
1952 unsigned i;
1953
1954 /* Remove dead inputs and outputs. */
1955 BITSET_FOREACH_SET(i, linkage->removable_mask, NUM_SCALAR_SLOTS) {
1956 struct scalar_slot *slot = &linkage->slot[i];
1957
1958 /* Only indirect access can have no loads and stores because we moved
1959 * them to the first element in tidy_up_indirect_varyings().
1960 */
1961 assert(!list_is_empty(&slot->producer.stores) ||
1962 !list_is_empty(&slot->producer.loads) ||
1963 !list_is_empty(&slot->consumer.loads) ||
1964 BITSET_TEST(linkage->indirect_mask, i));
1965
1966 /* Nothing to do if there are no loads and stores. */
1967 if (list_is_empty(&slot->producer.stores) &&
1968 list_is_empty(&slot->producer.loads) &&
1969 list_is_empty(&slot->consumer.loads))
1970 continue;
1971
1972 /* If there are producer loads (e.g. TCS) but no consumer loads
1973 * (e.g. TES), set the "no_varying" flag to indicate that the outputs
1974 * are not consumed by the next shader stage (e.g. TES).
1975 */
1976 if (!list_is_empty(&slot->producer.stores) &&
1977 !list_is_empty(&slot->producer.loads) &&
1978 list_is_empty(&slot->consumer.loads)) {
1979 for (unsigned list_index = 0; list_index < 2; list_index++) {
1980 struct list_head *list = list_index ? &slot->producer.stores :
1981 &slot->producer.loads;
1982
1983 list_for_each_entry(struct list_node, iter, list, head) {
1984 nir_io_semantics sem = nir_intrinsic_io_semantics(iter->instr);
1985 sem.no_varying = 1;
1986 nir_intrinsic_set_io_semantics(iter->instr, sem);
1987 }
1988 }
1989
1990 /* This tells the compaction to move these varyings to the end. */
1991 if (BITSET_TEST(linkage->flat32_mask, i)) {
1992 assert(linkage->consumer_stage != MESA_SHADER_FRAGMENT);
1993 BITSET_CLEAR(linkage->flat32_mask, i);
1994 BITSET_SET(linkage->no_varying32_mask, i);
1995 }
1996 if (BITSET_TEST(linkage->flat16_mask, i)) {
1997 assert(linkage->consumer_stage != MESA_SHADER_FRAGMENT);
1998 BITSET_CLEAR(linkage->flat16_mask, i);
1999 BITSET_SET(linkage->no_varying16_mask, i);
2000 }
2001 continue;
2002 }
2003
2004 /* The varyings aren't dead if both loads and stores are present. */
2005 if (!list_is_empty(&slot->producer.stores) &&
2006 (!list_is_empty(&slot->producer.loads) ||
2007 !list_is_empty(&slot->consumer.loads)))
2008 continue;
2009
2010 bool uses_xfb = false;
2011
2012 if (list_is_empty(&slot->producer.stores)) {
2013 /* There are no stores. */
2014 assert(!list_is_empty(&slot->producer.loads) ||
2015 !list_is_empty(&slot->consumer.loads));
2016
2017 /* TEXn.xy loads can't be removed in FS because of the coord
2018 * replace state, but TEXn outputs can be removed if they are
2019 * not read by FS.
2020 *
2021 * TEXn.zw loads can be eliminated and replaced by (0, 1), which
2022 * is equal to the coord replace value.
2023 */
2024 if (is_interpolated_texcoord(linkage, i)) {
2025 assert(i % 2 == 0); /* high 16-bit slots disallowed */
2026 /* Keep TEXn.xy. */
2027 if (i % 8 < 4)
2028 continue;
2029 }
2030
2031 /* Replace all loads with undef. Do that for both input loads
2032 * in the consumer stage and output loads in the producer stage
2033 * because we also want to eliminate TCS loads that have no
2034 * corresponding TCS stores.
2035 */
2036 for (unsigned list_index = 0; list_index < 2; list_index++) {
2037 struct list_head *list = list_index ? &slot->producer.loads :
2038 &slot->consumer.loads;
2039 nir_builder *b = list_index ? &linkage->producer_builder :
2040 &linkage->consumer_builder;
2041
2042 list_for_each_entry(struct list_node, iter, list, head) {
2043 nir_intrinsic_instr *loadi = iter->instr;
2044 nir_def *replacement = NULL;
2045
2046 b->cursor = nir_before_instr(&loadi->instr);
2047
2048 /* LAYER and VIEWPORT FS inputs should be replaced by 0
2049 * instead of undef.
2050 */
2051 gl_varying_slot location = (gl_varying_slot)(vec4_slot(i));
2052
2053 if (linkage->consumer_stage == MESA_SHADER_FRAGMENT &&
2054 (location == VARYING_SLOT_LAYER ||
2055 location == VARYING_SLOT_VIEWPORT ||
2056 /* TEXn.z is replaced by 0 (matching coord replace) */
2057 (is_interpolated_texcoord(linkage, i) && i % 8 == 4)))
2058 replacement = nir_imm_intN_t(b, 0, loadi->def.bit_size);
2059 else if (linkage->consumer_stage == MESA_SHADER_FRAGMENT &&
2060 /* TEXn.w is replaced by 1 (matching coord replace) */
2061 is_interpolated_texcoord(linkage, i) && i % 8 == 6)
2062 replacement = nir_imm_floatN_t(b, 1, loadi->def.bit_size);
2063 else
2064 replacement = nir_undef(b, 1, loadi->def.bit_size);
2065
2066 nir_def_replace(&loadi->def, replacement);
2067
2068 *progress |= list_index ? nir_progress_producer :
2069 nir_progress_consumer;
2070 }
2071 }
2072
2073 /* Clear the lists. */
2074 list_inithead(&slot->producer.loads);
2075 list_inithead(&slot->consumer.loads);
2076 } else {
2077 /* There are no loads. */
2078 remove_all_stores(linkage, i, &uses_xfb, progress);
2079 }
2080
2081 /* Clear bitmasks associated with this varying slot or array. */
2082 for (unsigned elem = 0; elem < slot->num_slots; elem++)
2083 clear_slot_info_after_removal(linkage, i + elem, uses_xfb);
2084 }
2085 }
2086
2087 /******************************************************************
2088 * SSA CLONING HELPERS
2089 ******************************************************************/
2090
2091 /* Pass flags for inter-shader code motion. Also used by helpers. */
2092 #define FLAG_ALU_IS_TES_INTERP_LOAD BITFIELD_BIT(0)
2093 #define FLAG_MOVABLE BITFIELD_BIT(1)
2094 #define FLAG_UNMOVABLE BITFIELD_BIT(2)
2095 #define FLAG_POST_DOMINATOR_PROCESSED BITFIELD_BIT(3)
2096 #define FLAG_GATHER_LOADS_VISITED BITFIELD_BIT(4)
2097
2098 #define FLAG_INTERP_MASK BITFIELD_RANGE(5, 3)
2099 #define FLAG_INTERP_CONVERGENT (0 << 5)
2100 #define FLAG_INTERP_FLAT (1 << 5)
2101 /* FS-only interpolation modes. */
2102 #define FLAG_INTERP_PERSP_PIXEL (2 << 5)
2103 #define FLAG_INTERP_PERSP_CENTROID (3 << 5)
2104 #define FLAG_INTERP_PERSP_SAMPLE (4 << 5)
2105 #define FLAG_INTERP_LINEAR_PIXEL (5 << 5)
2106 #define FLAG_INTERP_LINEAR_CENTROID (6 << 5)
2107 #define FLAG_INTERP_LINEAR_SAMPLE (7 << 5)
2108 /* TES-only interpolation modes. (these were found in shaders) */
2109 #define FLAG_INTERP_TES_TRIANGLE_UVW (2 << 5) /* v0*u + v1*v + v2*w */
2110 #define FLAG_INTERP_TES_TRIANGLE_WUV (3 << 5) /* v0*w + v1*u + v2*v */
2111 /* TODO: Feel free to insert more TES interpolation equations here. */
2112
2113 static bool
can_move_deref_between_shaders(struct linkage_info * linkage,nir_instr * instr)2114 can_move_deref_between_shaders(struct linkage_info *linkage, nir_instr *instr)
2115 {
2116 nir_deref_instr *deref = nir_instr_as_deref(instr);
2117 unsigned allowed_modes =
2118 (linkage->can_move_uniforms ? nir_var_uniform : 0) |
2119 (linkage->can_move_ubos ? nir_var_mem_ubo : 0);
2120
2121 if (!nir_deref_mode_is_one_of(deref, allowed_modes))
2122 return false;
2123
2124 switch (deref->deref_type) {
2125 case nir_deref_type_var:
2126 case nir_deref_type_struct:
2127 case nir_deref_type_array:
2128 break;
2129 default:
2130 return false;
2131 }
2132
2133 nir_variable *var = nir_deref_instr_get_variable(deref);
2134
2135 /* Subroutine uniforms are not moved. Even though it works and subroutine
2136 * uniforms are moved correctly and subroutines have been inlined at this
2137 * point, subroutine functions aren't moved and the linker doesn't like
2138 * when a shader only contains a subroutine uniform but no subroutine
2139 * functions. This could be fixed in the linker, but for now, don't
2140 * move subroutine uniforms.
2141 */
2142 if (var->name && strstr(var->name, "__subu_") == var->name)
2143 return false;
2144
2145 return true;
2146 }
2147
2148 static nir_intrinsic_instr *
find_per_vertex_load_for_tes_interp(nir_instr * instr)2149 find_per_vertex_load_for_tes_interp(nir_instr *instr)
2150 {
2151 switch (instr->type) {
2152 case nir_instr_type_alu: {
2153 nir_alu_instr *alu = nir_instr_as_alu(instr);
2154 unsigned num_srcs = nir_op_infos[alu->op].num_inputs;
2155
2156 for (unsigned i = 0; i < num_srcs; i++) {
2157 nir_instr *src = alu->src[i].src.ssa->parent_instr;
2158 nir_intrinsic_instr *intr = find_per_vertex_load_for_tes_interp(src);
2159
2160 if (intr)
2161 return intr;
2162 }
2163 return NULL;
2164 }
2165
2166 case nir_instr_type_intrinsic: {
2167 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
2168
2169 return intr->intrinsic == nir_intrinsic_load_per_vertex_input ?
2170 intr : NULL;
2171 }
2172
2173 default:
2174 unreachable("unexpected instruction type");
2175 }
2176 }
2177
2178 static nir_def *
get_stored_value_for_load(struct linkage_info * linkage,nir_instr * instr)2179 get_stored_value_for_load(struct linkage_info *linkage, nir_instr *instr)
2180 {
2181 nir_intrinsic_instr *intr;
2182
2183 if (instr->type == nir_instr_type_intrinsic) {
2184 intr = nir_instr_as_intrinsic(instr);
2185 } else {
2186 assert(instr->type == nir_instr_type_alu &&
2187 instr->pass_flags & FLAG_ALU_IS_TES_INTERP_LOAD);
2188 intr = find_per_vertex_load_for_tes_interp(instr);
2189 }
2190
2191 unsigned slot_index = intr_get_scalar_16bit_slot(intr);
2192 assert(list_is_singular(&linkage->slot[slot_index].producer.stores));
2193
2194 nir_def *stored_value =
2195 list_first_entry(&linkage->slot[slot_index].producer.stores,
2196 struct list_node, head)->instr->src[0].ssa;
2197 assert(stored_value->num_components == 1);
2198 return stored_value;
2199 }
2200
2201 /* Clone the SSA, which can be in a different shader. */
2202 static nir_def *
clone_ssa_impl(struct linkage_info * linkage,nir_builder * b,nir_def * ssa)2203 clone_ssa_impl(struct linkage_info *linkage, nir_builder *b, nir_def *ssa)
2204 {
2205 struct hash_entry *entry = _mesa_hash_table_search(linkage->clones_ht,
2206 ssa->parent_instr);
2207 if (entry)
2208 return entry->data;
2209
2210 nir_def *clone = NULL;
2211
2212 switch (ssa->parent_instr->type) {
2213 case nir_instr_type_load_const:
2214 clone = nir_build_imm(b, ssa->num_components, ssa->bit_size,
2215 nir_instr_as_load_const(ssa->parent_instr)->value);
2216 break;
2217
2218 case nir_instr_type_undef:
2219 clone = nir_undef(b, ssa->num_components, ssa->bit_size);
2220 break;
2221
2222 case nir_instr_type_alu: {
2223 nir_alu_instr *alu = nir_instr_as_alu(ssa->parent_instr);
2224
2225 if (alu->instr.pass_flags & FLAG_ALU_IS_TES_INTERP_LOAD) {
2226 /* We are cloning an interpolated TES load in the producer for
2227 * backward inter-shader code motion.
2228 */
2229 assert(&linkage->producer_builder == b);
2230 return get_stored_value_for_load(linkage, &alu->instr);
2231 }
2232
2233 nir_def *src[4] = {0};
2234 unsigned num_srcs = nir_op_infos[alu->op].num_inputs;
2235 assert(num_srcs <= ARRAY_SIZE(src));
2236
2237 for (unsigned i = 0; i < num_srcs; i++)
2238 src[i] = clone_ssa_impl(linkage, b, alu->src[i].src.ssa);
2239
2240 clone = nir_build_alu(b, alu->op, src[0], src[1], src[2], src[3]);
2241 nir_alu_instr *alu_clone = nir_instr_as_alu(clone->parent_instr);
2242
2243 alu_clone->exact = alu->exact;
2244 alu_clone->no_signed_wrap = alu->no_signed_wrap;
2245 alu_clone->no_unsigned_wrap = alu->no_unsigned_wrap;
2246 alu_clone->def.num_components = alu->def.num_components;
2247 alu_clone->def.bit_size = alu->def.bit_size;
2248
2249 for (unsigned i = 0; i < num_srcs; i++) {
2250 memcpy(alu_clone->src[i].swizzle, alu->src[i].swizzle,
2251 NIR_MAX_VEC_COMPONENTS);
2252 }
2253 break;
2254 }
2255
2256 case nir_instr_type_intrinsic: {
2257 /* Clone load_deref of uniform or ubo. It's the only thing that can
2258 * occur here.
2259 */
2260 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(ssa->parent_instr);
2261
2262 switch (intr->intrinsic) {
2263 case nir_intrinsic_load_deref: {
2264 nir_def *ssa = clone_ssa_impl(linkage, b, intr->src[0].ssa);
2265 clone = nir_load_deref(b, nir_instr_as_deref(ssa->parent_instr));
2266 break;
2267 }
2268
2269 case nir_intrinsic_load_input:
2270 case nir_intrinsic_load_per_primitive_input:
2271 case nir_intrinsic_load_interpolated_input: {
2272 /* We are cloning load_input in the producer for backward
2273 * inter-shader code motion. Replace the input load with the stored
2274 * output value. That way we can clone any expression using inputs
2275 * from the consumer in the producer.
2276 */
2277 assert(&linkage->producer_builder == b);
2278 clone = get_stored_value_for_load(linkage, &intr->instr);
2279 break;
2280 }
2281
2282 default:
2283 unreachable("unexpected intrinsic");
2284 }
2285 break;
2286 }
2287
2288 case nir_instr_type_deref: {
2289 nir_deref_instr *deref = nir_instr_as_deref(ssa->parent_instr);
2290 assert(nir_deref_mode_is_one_of(deref, nir_var_uniform | nir_var_mem_ubo));
2291
2292 /* Get the uniform from the original shader. */
2293 nir_variable *var = nir_deref_instr_get_variable(deref);
2294 assert(!(var->data.mode & nir_var_mem_ubo) || linkage->can_move_ubos);
2295
2296 /* Declare the uniform in the target shader. If it's the same shader
2297 * (in the case of replacing output loads with a uniform), this has
2298 * no effect. If the variable already exists in the target shader, this
2299 * just returns the existing one.
2300 */
2301 var = nir_clone_uniform_variable(b->shader, var, linkage->spirv);
2302
2303 if (deref->deref_type == nir_deref_type_var) {
2304 clone = &nir_build_deref_var(b, var)->def;
2305 } else {
2306 nir_deref_instr *parent_orig = nir_deref_instr_parent(deref);
2307 nir_deref_instr *parent_clone =
2308 nir_instr_as_deref(clone_ssa_impl(linkage, b, &parent_orig->def)
2309 ->parent_instr);
2310
2311 switch (deref->deref_type) {
2312 case nir_deref_type_array: {
2313 nir_def *index = clone_ssa_impl(linkage, b, deref->arr.index.ssa);
2314 clone = &nir_build_deref_array(b, parent_clone, index)->def;
2315 break;
2316 }
2317 case nir_deref_type_struct:
2318 clone = &nir_build_deref_struct(b, parent_clone,
2319 deref->strct.index)->def;
2320 break;
2321 default:
2322 unreachable("invalid deref type");
2323 }
2324 }
2325 break;
2326 }
2327
2328 default:
2329 unreachable("unexpected instruction type");
2330 }
2331
2332 _mesa_hash_table_insert(linkage->clones_ht, ssa->parent_instr, clone);
2333 return clone;
2334 }
2335
2336 static nir_def *
clone_ssa(struct linkage_info * linkage,nir_builder * b,nir_def * ssa)2337 clone_ssa(struct linkage_info *linkage, nir_builder *b, nir_def *ssa)
2338 {
2339 assert(!linkage->clones_ht);
2340 linkage->clones_ht = _mesa_pointer_hash_table_create(NULL);
2341
2342 nir_def *clone = clone_ssa_impl(linkage, b, ssa);
2343
2344 _mesa_hash_table_destroy(linkage->clones_ht, NULL);
2345 linkage->clones_ht = NULL;
2346 return clone;
2347 }
2348
2349 /******************************************************************
2350 * UNIFORM EXPRESSION PROPAGATION (CONSTANTS, UNIFORMS, UBO LOADS)
2351 ******************************************************************/
2352
2353 static void
remove_all_stores_and_clear_slot(struct linkage_info * linkage,unsigned slot,nir_opt_varyings_progress * progress)2354 remove_all_stores_and_clear_slot(struct linkage_info *linkage, unsigned slot,
2355 nir_opt_varyings_progress *progress)
2356 {
2357 bool uses_xfb = false;
2358 remove_all_stores(linkage, slot, &uses_xfb, progress);
2359 clear_slot_info_after_removal(linkage, slot, uses_xfb);
2360 }
2361
2362 struct is_uniform_expr_state {
2363 struct linkage_info *linkage;
2364 unsigned cost;
2365 };
2366
2367 static bool
2368 is_uniform_expression(nir_instr *instr, struct is_uniform_expr_state *state);
2369
2370 static bool
src_is_uniform_expression(nir_src * src,void * data)2371 src_is_uniform_expression(nir_src *src, void *data)
2372 {
2373 return is_uniform_expression(src->ssa->parent_instr,
2374 (struct is_uniform_expr_state*)data);
2375 }
2376
2377 /**
2378 * Return whether instr is a uniform expression that can be moved into
2379 * the next shader.
2380 */
2381 static bool
is_uniform_expression(nir_instr * instr,struct is_uniform_expr_state * state)2382 is_uniform_expression(nir_instr *instr, struct is_uniform_expr_state *state)
2383 {
2384 switch (instr->type) {
2385 case nir_instr_type_load_const:
2386 case nir_instr_type_undef:
2387 return true;
2388
2389 case nir_instr_type_alu:
2390 break;
2391
2392 case nir_instr_type_intrinsic:
2393 if (nir_instr_as_intrinsic(instr)->intrinsic == nir_intrinsic_load_deref)
2394 break;
2395 return false;
2396
2397 case nir_instr_type_deref:
2398 if (!can_move_deref_between_shaders(state->linkage, instr))
2399 return false;
2400 /* We need to iterate over the deref chain recursively. */
2401 break;
2402
2403 default:
2404 return false;
2405 }
2406
2407 if (!instr->pass_flags) {
2408 state->cost += state->linkage->varying_estimate_instr_cost ?
2409 state->linkage->varying_estimate_instr_cost(instr) : 1;
2410 instr->pass_flags = 1;
2411 return nir_foreach_src(instr, src_is_uniform_expression, state);
2412 }
2413 return true;
2414 }
2415
2416 /**
2417 * Propagate constants, uniforms, UBO loads, and uniform expressions
2418 * in output components to inputs loads in the next shader and output
2419 * loads in the current stage, and remove the output components.
2420 *
2421 * Uniform expressions are ALU expressions only sourcing constants, uniforms,
2422 * and UBO loads.
2423 */
2424 static void
propagate_uniform_expressions(struct linkage_info * linkage,nir_opt_varyings_progress * progress)2425 propagate_uniform_expressions(struct linkage_info *linkage,
2426 nir_opt_varyings_progress *progress)
2427 {
2428 unsigned i;
2429
2430 /* Find uniform expressions. If there are multiple stores, they should all
2431 * store the same value. That's guaranteed by output_equal_mask.
2432 */
2433 BITSET_FOREACH_SET(i, linkage->output_equal_mask, NUM_SCALAR_SLOTS) {
2434 if (!can_optimize_varying(linkage, vec4_slot(i)).propagate_uniform_expr)
2435 continue;
2436
2437 struct scalar_slot *slot = &linkage->slot[i];
2438 assert(!list_is_empty(&slot->producer.loads) ||
2439 !list_is_empty(&slot->consumer.loads));
2440
2441 struct is_uniform_expr_state state = {
2442 .linkage = linkage,
2443 .cost = 0,
2444 };
2445
2446 /* Clear pass_flags, which is used to prevent adding the cost of
2447 * the same instruction multiple times.
2448 */
2449 nir_shader_clear_pass_flags(linkage->producer_builder.shader);
2450
2451 if (!is_uniform_expression(slot->producer.value, &state))
2452 continue;
2453
2454 if (state.cost > linkage->max_varying_expression_cost)
2455 continue;
2456
2457 /* Colors can be propagated only if they are constant between [0, 1]
2458 * because that's the only case when the clamp vertex color state has
2459 * no effect.
2460 */
2461 if (is_interpolated_color(linkage, i) &&
2462 (slot->producer.value->type != nir_instr_type_load_const ||
2463 nir_instr_as_load_const(slot->producer.value)->value[0].f32 < 0 ||
2464 nir_instr_as_load_const(slot->producer.value)->value[0].f32 > 1))
2465 continue;
2466
2467 /* TEXn.zw can be propagated only if it's equal to (0, 1) because it's
2468 * the coord replace value.
2469 */
2470 if (is_interpolated_texcoord(linkage, i)) {
2471 assert(i % 2 == 0); /* high 16-bit slots disallowed */
2472
2473 if (i % 8 == 0 || /* TEXn.x */
2474 i % 8 == 2 || /* TEXn.y */
2475 slot->producer.value->type != nir_instr_type_load_const)
2476 continue;
2477
2478 float value =
2479 nir_instr_as_load_const(slot->producer.value)->value[0].f32;
2480
2481 /* This ignores signed zeros, but those are destroyed by
2482 * interpolation, so it doesn't matter.
2483 */
2484 if ((i % 8 == 4 && value != 0) ||
2485 (i % 8 == 6 && value != 1))
2486 continue;
2487 }
2488
2489 /* Clear pass_flags, which is used by clone_ssa. */
2490 nir_shader_clear_pass_flags(linkage->producer_builder.shader);
2491
2492 /* Replace all loads. Do that for both input and output loads. */
2493 for (unsigned list_index = 0; list_index < 2; list_index++) {
2494 struct list_head *load = list_index ? &slot->producer.loads :
2495 &slot->consumer.loads;
2496 nir_builder *b = list_index ? &linkage->producer_builder :
2497 &linkage->consumer_builder;
2498
2499 list_for_each_entry(struct list_node, node, load, head) {
2500 nir_intrinsic_instr *loadi = node->instr;
2501 b->cursor = nir_before_instr(&loadi->instr);
2502
2503 /* Copy the uniform expression before the load. */
2504 nir_def *clone = clone_ssa(linkage, b,
2505 nir_instr_def(slot->producer.value));
2506
2507 /* Interpolation converts Infs to NaNs. If we skip it, we need to
2508 * convert Infs to NaNs manually.
2509 */
2510 if (loadi->intrinsic == nir_intrinsic_load_interpolated_input &&
2511 preserve_nans(b->shader, clone->bit_size))
2512 clone = build_convert_inf_to_nan(b, clone);
2513
2514 /* Replace the original load. */
2515 nir_def_replace(&loadi->def, clone);
2516 *progress |= list_index ? nir_progress_producer :
2517 nir_progress_consumer;
2518 }
2519 }
2520
2521 /* Clear the lists. */
2522 list_inithead(&slot->producer.loads);
2523 list_inithead(&slot->consumer.loads);
2524
2525 /* Remove all stores now that loads have been replaced. */
2526 remove_all_stores_and_clear_slot(linkage, i, progress);
2527 }
2528 }
2529
2530 /******************************************************************
2531 * OUTPUT DEDUPLICATION
2532 ******************************************************************/
2533
2534 /* We can only deduplicate outputs that have the same qualifier, and color
2535 * components must be deduplicated separately because they are affected by GL
2536 * states.
2537 *
2538 * QUAL_*_INTERP_ANY means that the interpolation qualifier doesn't matter for
2539 * deduplication as long as it's not flat.
2540 *
2541 * QUAL_COLOR_SHADEMODEL_ANY is the same, but can be switched to flat
2542 * by the flatshade state, so it can't be deduplicated with
2543 * QUAL_COLOR_INTERP_ANY, which is never flat.
2544 */
2545 enum var_qualifier {
2546 QUAL_PATCH,
2547 QUAL_VAR_FLAT,
2548 QUAL_COLOR_FLAT,
2549 QUAL_EXPLICIT,
2550 QUAL_EXPLICIT_STRICT,
2551 QUAL_PER_PRIMITIVE,
2552 /* When nir_io_has_flexible_input_interpolation_except_flat is set: */
2553 QUAL_VAR_INTERP_ANY,
2554 QUAL_COLOR_INTERP_ANY,
2555 QUAL_COLOR_SHADEMODEL_ANY,
2556 /* When nir_io_has_flexible_input_interpolation_except_flat is not set: */
2557 QUAL_VAR_PERSP_PIXEL,
2558 QUAL_VAR_PERSP_CENTROID,
2559 QUAL_VAR_PERSP_SAMPLE,
2560 QUAL_VAR_LINEAR_PIXEL,
2561 QUAL_VAR_LINEAR_CENTROID,
2562 QUAL_VAR_LINEAR_SAMPLE,
2563 QUAL_COLOR_PERSP_PIXEL,
2564 QUAL_COLOR_PERSP_CENTROID,
2565 QUAL_COLOR_PERSP_SAMPLE,
2566 QUAL_COLOR_LINEAR_PIXEL,
2567 QUAL_COLOR_LINEAR_CENTROID,
2568 QUAL_COLOR_LINEAR_SAMPLE,
2569 QUAL_COLOR_SHADEMODEL_PIXEL,
2570 QUAL_COLOR_SHADEMODEL_CENTROID,
2571 QUAL_COLOR_SHADEMODEL_SAMPLE,
2572 NUM_DEDUP_QUALIFIERS,
2573
2574 QUAL_SKIP,
2575 QUAL_UNKNOWN,
2576 };
2577
2578 /* Return the input qualifier if all loads use the same one, else skip.
2579 * This is only used by output deduplication to determine input compatibility.
2580 */
2581 static enum var_qualifier
get_input_qualifier(struct linkage_info * linkage,unsigned i)2582 get_input_qualifier(struct linkage_info *linkage, unsigned i)
2583 {
2584 assert(linkage->consumer_stage == MESA_SHADER_FRAGMENT);
2585 struct scalar_slot *slot = &linkage->slot[i];
2586 bool is_color = is_interpolated_color(linkage, i);
2587 nir_intrinsic_instr *load =
2588 list_first_entry(&slot->consumer.loads, struct list_node, head)->instr;
2589
2590 if (load->intrinsic == nir_intrinsic_load_input)
2591 return is_color ? QUAL_COLOR_FLAT : QUAL_VAR_FLAT;
2592
2593 if (load->intrinsic == nir_intrinsic_load_per_primitive_input)
2594 return QUAL_PER_PRIMITIVE;
2595
2596 if (load->intrinsic == nir_intrinsic_load_input_vertex) {
2597 return nir_intrinsic_io_semantics(load).interp_explicit_strict ?
2598 QUAL_EXPLICIT_STRICT : QUAL_EXPLICIT;
2599 }
2600
2601 assert(load->intrinsic == nir_intrinsic_load_interpolated_input);
2602 nir_intrinsic_instr *baryc =
2603 nir_instr_as_intrinsic(load->src[0].ssa->parent_instr);
2604
2605 if (linkage->has_flexible_interp) {
2606 if (is_color) {
2607 return nir_intrinsic_interp_mode(baryc) == INTERP_MODE_NONE ?
2608 QUAL_COLOR_SHADEMODEL_ANY : QUAL_COLOR_INTERP_ANY;
2609 } else {
2610 return QUAL_VAR_INTERP_ANY;
2611 }
2612 }
2613
2614 /* If interpolateAt{Centroid,Offset,Sample} is used, see if there is
2615 * another load that doesn't use those, so that we get the real qualifier.
2616 */
2617 if (baryc->intrinsic == nir_intrinsic_load_barycentric_centroid ||
2618 baryc->intrinsic == nir_intrinsic_load_barycentric_at_offset ||
2619 baryc->intrinsic == nir_intrinsic_load_barycentric_at_sample) {
2620 list_for_each_entry(struct list_node, iter, &slot->consumer.loads, head) {
2621 nir_intrinsic_instr *bar =
2622 nir_instr_as_intrinsic(iter->instr->src[0].ssa->parent_instr);
2623
2624 if (bar->intrinsic != nir_intrinsic_load_barycentric_centroid &&
2625 bar->intrinsic != nir_intrinsic_load_barycentric_at_offset &&
2626 bar->intrinsic != nir_intrinsic_load_barycentric_at_sample) {
2627 baryc = bar;
2628 break;
2629 }
2630 }
2631 }
2632
2633 /* Get the exact interpolation qualifier. */
2634 unsigned pixel_location;
2635 enum var_qualifier qual;
2636
2637 switch (baryc->intrinsic) {
2638 case nir_intrinsic_load_barycentric_pixel:
2639 pixel_location = 0;
2640 break;
2641 case nir_intrinsic_load_barycentric_centroid:
2642 pixel_location = 1;
2643 break;
2644 case nir_intrinsic_load_barycentric_sample:
2645 pixel_location = 2;
2646 break;
2647 case nir_intrinsic_load_barycentric_at_offset:
2648 case nir_intrinsic_load_barycentric_at_sample:
2649 /* Don't deduplicate outputs that are interpolated at offset/sample. */
2650 return QUAL_SKIP;
2651 default:
2652 unreachable("unexpected barycentric src");
2653 }
2654
2655 switch (nir_intrinsic_interp_mode(baryc)) {
2656 case INTERP_MODE_NONE:
2657 qual = is_color ? QUAL_COLOR_SHADEMODEL_PIXEL :
2658 QUAL_VAR_PERSP_PIXEL;
2659 break;
2660 case INTERP_MODE_SMOOTH:
2661 qual = is_color ? QUAL_COLOR_PERSP_PIXEL : QUAL_VAR_PERSP_PIXEL;
2662 break;
2663 case INTERP_MODE_NOPERSPECTIVE:
2664 qual = is_color ? QUAL_COLOR_LINEAR_PIXEL : QUAL_VAR_LINEAR_PIXEL;
2665 break;
2666 default:
2667 unreachable("unexpected interp mode");
2668 }
2669
2670 /* The ordering of the "qual" enum was carefully chosen to make this
2671 * addition correct.
2672 */
2673 STATIC_ASSERT(QUAL_VAR_PERSP_PIXEL + 1 == QUAL_VAR_PERSP_CENTROID);
2674 STATIC_ASSERT(QUAL_VAR_PERSP_PIXEL + 2 == QUAL_VAR_PERSP_SAMPLE);
2675 STATIC_ASSERT(QUAL_VAR_LINEAR_PIXEL + 1 == QUAL_VAR_LINEAR_CENTROID);
2676 STATIC_ASSERT(QUAL_VAR_LINEAR_PIXEL + 2 == QUAL_VAR_LINEAR_SAMPLE);
2677 STATIC_ASSERT(QUAL_COLOR_PERSP_PIXEL + 1 == QUAL_COLOR_PERSP_CENTROID);
2678 STATIC_ASSERT(QUAL_COLOR_PERSP_PIXEL + 2 == QUAL_COLOR_PERSP_SAMPLE);
2679 STATIC_ASSERT(QUAL_COLOR_LINEAR_PIXEL + 1 == QUAL_COLOR_LINEAR_CENTROID);
2680 STATIC_ASSERT(QUAL_COLOR_LINEAR_PIXEL + 2 == QUAL_COLOR_LINEAR_SAMPLE);
2681 STATIC_ASSERT(QUAL_COLOR_SHADEMODEL_PIXEL + 1 ==
2682 QUAL_COLOR_SHADEMODEL_CENTROID);
2683 STATIC_ASSERT(QUAL_COLOR_SHADEMODEL_PIXEL + 2 ==
2684 QUAL_COLOR_SHADEMODEL_SAMPLE);
2685 return qual + pixel_location;
2686 }
2687
2688 static void
deduplicate_outputs(struct linkage_info * linkage,nir_opt_varyings_progress * progress)2689 deduplicate_outputs(struct linkage_info *linkage,
2690 nir_opt_varyings_progress *progress)
2691 {
2692 struct hash_table *tables[NUM_DEDUP_QUALIFIERS] = {NULL};
2693 unsigned i;
2694
2695 /* Find duplicated outputs. If there are multiple stores, they should all
2696 * store the same value as all stores of some other output. That's
2697 * guaranteed by output_equal_mask.
2698 */
2699 BITSET_FOREACH_SET(i, linkage->output_equal_mask, NUM_SCALAR_SLOTS) {
2700 if (!can_optimize_varying(linkage, vec4_slot(i)).deduplicate)
2701 continue;
2702
2703 struct scalar_slot *slot = &linkage->slot[i];
2704 enum var_qualifier qualifier;
2705 gl_varying_slot var_slot = vec4_slot(i);
2706
2707 /* Determine which qualifier this slot has. */
2708 if ((var_slot >= VARYING_SLOT_PATCH0 &&
2709 var_slot <= VARYING_SLOT_PATCH31) ||
2710 var_slot == VARYING_SLOT_TESS_LEVEL_INNER ||
2711 var_slot == VARYING_SLOT_TESS_LEVEL_OUTER)
2712 qualifier = QUAL_PATCH;
2713 else if (linkage->consumer_stage != MESA_SHADER_FRAGMENT)
2714 qualifier = QUAL_VAR_FLAT;
2715 else
2716 qualifier = get_input_qualifier(linkage, i);
2717
2718 if (qualifier == QUAL_SKIP)
2719 continue;
2720
2721 struct hash_table **table = &tables[qualifier];
2722 if (!*table)
2723 *table = _mesa_pointer_hash_table_create(NULL);
2724
2725 nir_instr *value = slot->producer.value;
2726
2727 struct hash_entry *entry = _mesa_hash_table_search(*table, value);
2728 if (!entry) {
2729 _mesa_hash_table_insert(*table, value, (void*)(uintptr_t)i);
2730 continue;
2731 }
2732
2733 /* We've found a duplicate. Redirect loads and remove stores. */
2734 struct scalar_slot *found_slot = &linkage->slot[(uintptr_t)entry->data];
2735 nir_intrinsic_instr *store =
2736 list_first_entry(&found_slot->producer.stores,
2737 struct list_node, head)->instr;
2738 nir_io_semantics sem = nir_intrinsic_io_semantics(store);
2739 unsigned component = nir_intrinsic_component(store);
2740
2741 /* Redirect loads. */
2742 for (unsigned list_index = 0; list_index < 2; list_index++) {
2743 struct list_head *src_loads = list_index ? &slot->producer.loads :
2744 &slot->consumer.loads;
2745 struct list_head *dst_loads = list_index ? &found_slot->producer.loads :
2746 &found_slot->consumer.loads;
2747 bool has_progress = !list_is_empty(src_loads);
2748
2749 list_for_each_entry(struct list_node, iter, src_loads, head) {
2750 nir_intrinsic_instr *loadi = iter->instr;
2751
2752 nir_intrinsic_set_io_semantics(loadi, sem);
2753 nir_intrinsic_set_component(loadi, component);
2754
2755 /* We also need to set the base to match the duplicate load, so
2756 * that CSE can eliminate it.
2757 */
2758 if (!list_is_empty(dst_loads)) {
2759 struct list_node *first =
2760 list_first_entry(dst_loads, struct list_node, head);
2761 nir_intrinsic_set_base(loadi, nir_intrinsic_base(first->instr));
2762 } else {
2763 /* Use the base of the found store if there are no loads (it can
2764 * only happen with TCS).
2765 */
2766 assert(list_index == 0);
2767 nir_intrinsic_set_base(loadi, nir_intrinsic_base(store));
2768 }
2769 }
2770
2771 if (has_progress) {
2772 /* Move the redirected loads to the found slot, so that compaction
2773 * can find them.
2774 */
2775 list_splicetail(src_loads, dst_loads);
2776 list_inithead(src_loads);
2777
2778 *progress |= list_index ? nir_progress_producer :
2779 nir_progress_consumer;
2780 }
2781 }
2782
2783 /* Remove all duplicated stores now that loads have been redirected. */
2784 remove_all_stores_and_clear_slot(linkage, i, progress);
2785 }
2786
2787 for (unsigned i = 0; i < ARRAY_SIZE(tables); i++)
2788 _mesa_hash_table_destroy(tables[i], NULL);
2789 }
2790
2791 /******************************************************************
2792 * FIND OPEN-CODED TES INPUT INTERPOLATION
2793 ******************************************************************/
2794
2795 static nir_alu_instr *
get_single_use_as_alu(nir_def * def)2796 get_single_use_as_alu(nir_def *def)
2797 {
2798 /* Only 1 use allowed. */
2799 if (!list_is_singular(&def->uses))
2800 return NULL;
2801
2802 nir_instr *instr =
2803 nir_src_parent_instr(list_first_entry(&def->uses, nir_src, use_link));
2804 if (instr->type != nir_instr_type_alu)
2805 return NULL;
2806
2807 return nir_instr_as_alu(instr);
2808 }
2809
2810 static nir_alu_instr *
check_tes_input_load_get_single_use_alu(nir_intrinsic_instr * load,unsigned * vertex_index,unsigned * vertices_used,unsigned max_vertices)2811 check_tes_input_load_get_single_use_alu(nir_intrinsic_instr *load,
2812 unsigned *vertex_index,
2813 unsigned *vertices_used,
2814 unsigned max_vertices)
2815 {
2816 if (load->intrinsic != nir_intrinsic_load_per_vertex_input)
2817 return NULL;
2818
2819 /* Check the vertex index. Each vertex can be loaded only once. */
2820 if (!nir_src_is_const(load->src[0]))
2821 return false;
2822
2823 *vertex_index = nir_src_as_uint(load->src[0]);
2824 if (*vertex_index >= max_vertices ||
2825 *vertices_used & BITFIELD_BIT(*vertex_index))
2826 return false;
2827
2828 *vertices_used |= BITFIELD_BIT(*vertex_index);
2829
2830 return get_single_use_as_alu(&load->def);
2831 }
2832
2833 static bool
gather_fmul_tess_coord(nir_intrinsic_instr * load,nir_alu_instr * fmul,unsigned vertex_index,unsigned * tess_coord_swizzle,unsigned * tess_coord_used,nir_def ** load_tess_coord)2834 gather_fmul_tess_coord(nir_intrinsic_instr *load, nir_alu_instr *fmul,
2835 unsigned vertex_index, unsigned *tess_coord_swizzle,
2836 unsigned *tess_coord_used, nir_def **load_tess_coord)
2837 {
2838 unsigned other_src = fmul->src[0].src.ssa == &load->def;
2839 nir_instr *other_instr = fmul->src[other_src].src.ssa->parent_instr;
2840
2841 assert(fmul->src[!other_src].swizzle[0] == 0);
2842
2843 if (!is_sysval(other_instr, SYSTEM_VALUE_TESS_COORD))
2844 return false;
2845
2846 unsigned tess_coord_component = fmul->src[other_src].swizzle[0];
2847 /* Each tesscoord component can be used only once. */
2848 if (*tess_coord_used & BITFIELD_BIT(tess_coord_component))
2849 return false;
2850
2851 *tess_coord_swizzle |= tess_coord_component << (4 * vertex_index);
2852 *tess_coord_used |= BITFIELD_BIT(tess_coord_component);
2853 *load_tess_coord = &nir_instr_as_intrinsic(other_instr)->def;
2854 return true;
2855 }
2856
2857 /**
2858 * Find interpolation of the form:
2859 * input[0].slot * TessCoord.a +
2860 * input[1].slot * TessCoord.b +
2861 * input[2].slot * TessCoord.c;
2862 *
2863 * a,b,c can be any of x,y,z, but each can occur only once.
2864 */
2865 static bool
find_tes_triangle_interp_3fmul_2fadd(struct linkage_info * linkage,unsigned i)2866 find_tes_triangle_interp_3fmul_2fadd(struct linkage_info *linkage, unsigned i)
2867 {
2868 struct scalar_slot *slot = &linkage->slot[i];
2869 unsigned vertices_used = 0;
2870 unsigned tess_coord_used = 0;
2871 unsigned tess_coord_swizzle = 0;
2872 unsigned num_fmuls = 0, num_fadds = 0;
2873 nir_alu_instr *fadds[2];
2874 nir_def *load_tess_coord = NULL;
2875
2876 /* Find 3 multiplications by TessCoord and their uses, which must be
2877 * fadds.
2878 */
2879 list_for_each_entry(struct list_node, iter, &slot->consumer.loads, head) {
2880 unsigned vertex_index;
2881 nir_alu_instr *fmul =
2882 check_tes_input_load_get_single_use_alu(iter->instr, &vertex_index,
2883 &vertices_used, 3);
2884 /* Only maximum of 3 loads expected. Also reject exact ops because we
2885 * are going to do an inexact transformation with it.
2886 */
2887 if (!fmul || fmul->op != nir_op_fmul || fmul->exact || num_fmuls == 3 ||
2888 !gather_fmul_tess_coord(iter->instr, fmul, vertex_index,
2889 &tess_coord_swizzle, &tess_coord_used,
2890 &load_tess_coord))
2891 return false;
2892
2893 num_fmuls++;
2894
2895 /* The multiplication must only be used by fadd. Also reject exact ops.
2896 */
2897 nir_alu_instr *fadd = get_single_use_as_alu(&fmul->def);
2898 if (!fadd || fadd->op != nir_op_fadd || fadd->exact)
2899 return false;
2900
2901 /* The 3 fmuls must only be used by 2 fadds. */
2902 unsigned i;
2903 for (i = 0; i < num_fadds; i++) {
2904 if (fadds[i] == fadd)
2905 break;
2906 }
2907 if (i == num_fadds) {
2908 if (num_fadds == 2)
2909 return false;
2910
2911 fadds[num_fadds++] = fadd;
2912 }
2913 }
2914
2915 if (num_fmuls != 3 || num_fadds != 2)
2916 return false;
2917
2918 assert(tess_coord_used == 0x7);
2919
2920 /* We have found that the only uses of the 3 fmuls are 2 fadds, which
2921 * implies that at least 2 fmuls are used by the same fadd.
2922 *
2923 * Check that 1 fadd is used by the other fadd, which can only be
2924 * the result of the TessCoord interpolation.
2925 */
2926 for (unsigned i = 0; i < 2; i++) {
2927 if (get_single_use_as_alu(&fadds[i]->def) == fadds[!i]) {
2928 switch (tess_coord_swizzle) {
2929 case 0x210:
2930 slot->consumer.tes_interp_load = fadds[!i];
2931 slot->consumer.tes_interp_mode = FLAG_INTERP_TES_TRIANGLE_UVW;
2932 slot->consumer.tes_load_tess_coord = load_tess_coord;
2933 return true;
2934
2935 case 0x102:
2936 slot->consumer.tes_interp_load = fadds[!i];
2937 slot->consumer.tes_interp_mode = FLAG_INTERP_TES_TRIANGLE_WUV;
2938 slot->consumer.tes_load_tess_coord = load_tess_coord;
2939 return true;
2940
2941 default:
2942 return false;
2943 }
2944 }
2945 }
2946
2947 return false;
2948 }
2949
2950 /**
2951 * Find interpolation of the form:
2952 * fma(input[0].slot, TessCoord.a,
2953 * fma(input[1].slot, TessCoord.b,
2954 * input[2].slot * TessCoord.c))
2955 *
2956 * a,b,c can be any of x,y,z, but each can occur only once.
2957 */
2958 static bool
find_tes_triangle_interp_1fmul_2ffma(struct linkage_info * linkage,unsigned i)2959 find_tes_triangle_interp_1fmul_2ffma(struct linkage_info *linkage, unsigned i)
2960 {
2961 struct scalar_slot *slot = &linkage->slot[i];
2962 unsigned vertices_used = 0;
2963 unsigned tess_coord_used = 0;
2964 unsigned tess_coord_swizzle = 0;
2965 unsigned num_fmuls = 0, num_ffmas = 0;
2966 nir_alu_instr *ffmas[2], *fmul = NULL;
2967 nir_def *load_tess_coord = NULL;
2968
2969 list_for_each_entry(struct list_node, iter, &slot->consumer.loads, head) {
2970 unsigned vertex_index;
2971 nir_alu_instr *alu =
2972 check_tes_input_load_get_single_use_alu(iter->instr, &vertex_index,
2973 &vertices_used, 3);
2974
2975 /* Reject exact ops because we are going to do an inexact transformation
2976 * with it.
2977 */
2978 if (!alu || (alu->op != nir_op_fmul && alu->op != nir_op_ffma) ||
2979 alu->exact ||
2980 !gather_fmul_tess_coord(iter->instr, alu, vertex_index,
2981 &tess_coord_swizzle, &tess_coord_used,
2982 &load_tess_coord))
2983 return false;
2984
2985 /* The multiplication must only be used by ffma. */
2986 if (alu->op == nir_op_fmul) {
2987 nir_alu_instr *ffma = get_single_use_as_alu(&alu->def);
2988 if (!ffma || ffma->op != nir_op_ffma)
2989 return false;
2990
2991 if (num_fmuls == 1)
2992 return false;
2993
2994 fmul = alu;
2995 num_fmuls++;
2996 } else {
2997 if (num_ffmas == 2)
2998 return false;
2999
3000 ffmas[num_ffmas++] = alu;
3001 }
3002 }
3003
3004 if (num_fmuls != 1 || num_ffmas != 2)
3005 return false;
3006
3007 assert(tess_coord_used == 0x7);
3008
3009 /* We have found that fmul has only 1 use and it's ffma, and there are 2
3010 * ffmas. Fail if neither ffma is using fmul.
3011 */
3012 if (ffmas[0]->src[2].src.ssa != &fmul->def &&
3013 ffmas[1]->src[2].src.ssa != &fmul->def)
3014 return false;
3015
3016 /* If one ffma is using the other ffma, it's guaranteed to be src[2]. */
3017 for (unsigned i = 0; i < 2; i++) {
3018 if (get_single_use_as_alu(&ffmas[i]->def) == ffmas[!i]) {
3019 switch (tess_coord_swizzle) {
3020 case 0x210:
3021 slot->consumer.tes_interp_load = ffmas[!i];
3022 slot->consumer.tes_interp_mode = FLAG_INTERP_TES_TRIANGLE_UVW;
3023 slot->consumer.tes_load_tess_coord = load_tess_coord;
3024 return true;
3025
3026 case 0x102:
3027 slot->consumer.tes_interp_load = ffmas[!i];
3028 slot->consumer.tes_interp_mode = FLAG_INTERP_TES_TRIANGLE_WUV;
3029 slot->consumer.tes_load_tess_coord = load_tess_coord;
3030 return true;
3031
3032 default:
3033 return false;
3034 }
3035 }
3036 }
3037
3038 return false;
3039 }
3040
3041 static void
find_open_coded_tes_input_interpolation(struct linkage_info * linkage)3042 find_open_coded_tes_input_interpolation(struct linkage_info *linkage)
3043 {
3044 if (linkage->consumer_stage != MESA_SHADER_TESS_EVAL)
3045 return;
3046
3047 unsigned i;
3048 BITSET_FOREACH_SET(i, linkage->flat32_mask, NUM_SCALAR_SLOTS) {
3049 if (vec4_slot(i) >= VARYING_SLOT_PATCH0 &&
3050 vec4_slot(i) <= VARYING_SLOT_PATCH31)
3051 continue;
3052 if (find_tes_triangle_interp_3fmul_2fadd(linkage, i))
3053 continue;
3054 if (find_tes_triangle_interp_1fmul_2ffma(linkage, i))
3055 continue;
3056 }
3057
3058 BITSET_FOREACH_SET(i, linkage->flat16_mask, NUM_SCALAR_SLOTS) {
3059 if (vec4_slot(i) >= VARYING_SLOT_PATCH0 &&
3060 vec4_slot(i) <= VARYING_SLOT_PATCH31)
3061 continue;
3062 if (find_tes_triangle_interp_3fmul_2fadd(linkage, i))
3063 continue;
3064 if (find_tes_triangle_interp_1fmul_2ffma(linkage, i))
3065 continue;
3066 }
3067 }
3068
3069 /******************************************************************
3070 * BACKWARD INTER-SHADER CODE MOTION
3071 ******************************************************************/
3072
3073 #define NEED_UPDATE_MOVABLE_FLAGS(instr) \
3074 (!((instr)->pass_flags & (FLAG_MOVABLE | FLAG_UNMOVABLE)))
3075
3076 #define GET_SRC_INTERP(alu, i) \
3077 ((alu)->src[i].src.ssa->parent_instr->pass_flags & FLAG_INTERP_MASK)
3078
3079 static bool
can_move_alu_across_interp(struct linkage_info * linkage,nir_alu_instr * alu)3080 can_move_alu_across_interp(struct linkage_info *linkage, nir_alu_instr *alu)
3081 {
3082 /* Exact ALUs can't be moved across interpolation. */
3083 if (alu->exact)
3084 return false;
3085
3086 /* Interpolation converts Infs to NaNs. If we turn a result of an ALU
3087 * instruction into a new interpolated input, it converts Infs to NaNs for
3088 * that instruction, while removing the Infs to NaNs conversion for sourced
3089 * interpolated values. We can't do that if Infs and NaNs must be preserved.
3090 */
3091 if (preserve_infs_nans(linkage->consumer_builder.shader, alu->def.bit_size))
3092 return false;
3093
3094 switch (alu->op) {
3095 /* Always legal if the sources are interpolated identically because:
3096 * interp(x, i, j) + interp(y, i, j) = interp(x + y, i, j)
3097 * interp(x, i, j) + convergent_expr = interp(x + convergent_expr, i, j)
3098 */
3099 case nir_op_fadd:
3100 case nir_op_fsub:
3101 /* This is the same as multiplying by -1, which is always legal, see fmul.
3102 */
3103 case nir_op_fneg:
3104 case nir_op_mov:
3105 return true;
3106
3107 /* At least one side of the multiplication must be convergent because this
3108 * is the only equation with multiplication that is true:
3109 * interp(x, i, j) * convergent_expr = interp(x * convergent_expr, i, j)
3110 */
3111 case nir_op_fmul:
3112 case nir_op_fmulz:
3113 case nir_op_ffma:
3114 case nir_op_ffmaz:
3115 return GET_SRC_INTERP(alu, 0) == FLAG_INTERP_CONVERGENT ||
3116 GET_SRC_INTERP(alu, 1) == FLAG_INTERP_CONVERGENT;
3117
3118 case nir_op_fdiv:
3119 /* The right side must be convergent, which then follows the fmul rule.
3120 */
3121 return GET_SRC_INTERP(alu, 1) == FLAG_INTERP_CONVERGENT;
3122
3123 case nir_op_flrp:
3124 /* Using the same rule as fmul. */
3125 return (GET_SRC_INTERP(alu, 0) == FLAG_INTERP_CONVERGENT &&
3126 GET_SRC_INTERP(alu, 1) == FLAG_INTERP_CONVERGENT) ||
3127 GET_SRC_INTERP(alu, 2) == FLAG_INTERP_CONVERGENT;
3128
3129 default:
3130 /* Moving other ALU instructions across interpolation is illegal. */
3131 return false;
3132 }
3133 }
3134
3135 /* Determine whether an instruction is movable from the consumer to
3136 * the producer. Also determine which interpolation modes each ALU instruction
3137 * should use if its value was promoted to a new input.
3138 */
3139 static void
update_movable_flags(struct linkage_info * linkage,nir_instr * instr)3140 update_movable_flags(struct linkage_info *linkage, nir_instr *instr)
3141 {
3142 /* This function shouldn't be called more than once for each instruction
3143 * to minimize recursive calling.
3144 */
3145 assert(NEED_UPDATE_MOVABLE_FLAGS(instr));
3146
3147 switch (instr->type) {
3148 case nir_instr_type_undef:
3149 case nir_instr_type_load_const:
3150 /* Treat constants as convergent, which means compatible with both flat
3151 * and non-flat inputs.
3152 */
3153 instr->pass_flags |= FLAG_MOVABLE | FLAG_INTERP_CONVERGENT;
3154 return;
3155
3156 case nir_instr_type_alu: {
3157 nir_alu_instr *alu = nir_instr_as_alu(instr);
3158 unsigned num_srcs = nir_op_infos[alu->op].num_inputs;
3159 unsigned alu_interp;
3160
3161 /* Make vector ops unmovable. They are technically movable but more
3162 * complicated, and NIR should be scalarized for this pass anyway.
3163 * The only remaining vector ops should be vecN for intrinsic sources.
3164 */
3165 if (alu->def.num_components > 1) {
3166 instr->pass_flags |= FLAG_UNMOVABLE;
3167 return;
3168 }
3169
3170 alu_interp = FLAG_INTERP_CONVERGENT;
3171
3172 for (unsigned i = 0; i < num_srcs; i++) {
3173 nir_instr *src_instr = alu->src[i].src.ssa->parent_instr;
3174
3175 if (NEED_UPDATE_MOVABLE_FLAGS(src_instr))
3176 update_movable_flags(linkage, src_instr);
3177
3178 if (src_instr->pass_flags & FLAG_UNMOVABLE) {
3179 instr->pass_flags |= FLAG_UNMOVABLE;
3180 return;
3181 }
3182
3183 /* Determine which interpolation mode this ALU instruction should
3184 * use if it was promoted to a new input.
3185 */
3186 unsigned src_interp = src_instr->pass_flags & FLAG_INTERP_MASK;
3187
3188 if (alu_interp == src_interp ||
3189 src_interp == FLAG_INTERP_CONVERGENT) {
3190 /* Nothing to do. */
3191 } else if (alu_interp == FLAG_INTERP_CONVERGENT) {
3192 alu_interp = src_interp;
3193 } else {
3194 assert(alu_interp != FLAG_INTERP_CONVERGENT &&
3195 src_interp != FLAG_INTERP_CONVERGENT &&
3196 alu_interp != src_interp);
3197 /* The ALU instruction sources conflicting interpolation flags.
3198 * It can never become a new input.
3199 */
3200 instr->pass_flags |= FLAG_UNMOVABLE;
3201 return;
3202 }
3203 }
3204
3205 /* Check if we can move the ALU instruction across an interpolated
3206 * load into the previous shader.
3207 */
3208 if (alu_interp > FLAG_INTERP_FLAT &&
3209 !can_move_alu_across_interp(linkage, alu)) {
3210 instr->pass_flags |= FLAG_UNMOVABLE;
3211 return;
3212 }
3213
3214 instr->pass_flags |= FLAG_MOVABLE | alu_interp;
3215 return;
3216 }
3217
3218 case nir_instr_type_intrinsic: {
3219 /* Movable input loads already have FLAG_MOVABLE on them.
3220 * Unmovable input loads skipped by initialization get UNMOVABLE here.
3221 * (e.g. colors, texcoords)
3222 *
3223 * The only other movable intrinsic is load_deref for uniforms and UBOs.
3224 * Other intrinsics are not movable.
3225 */
3226 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
3227
3228 if (intr->intrinsic == nir_intrinsic_load_deref) {
3229 nir_instr *deref = intr->src[0].ssa->parent_instr;
3230
3231 if (NEED_UPDATE_MOVABLE_FLAGS(deref))
3232 update_movable_flags(linkage, deref);
3233
3234 instr->pass_flags |= deref->pass_flags;
3235 return;
3236 }
3237
3238 instr->pass_flags |= FLAG_UNMOVABLE;
3239 return;
3240 }
3241
3242 case nir_instr_type_deref: {
3243 if (!can_move_deref_between_shaders(linkage, instr)) {
3244 instr->pass_flags |= FLAG_UNMOVABLE;
3245 return;
3246 }
3247
3248 nir_deref_instr *deref = nir_instr_as_deref(instr);
3249 nir_deref_instr *parent = nir_deref_instr_parent(deref);
3250
3251 if (parent) {
3252 if (NEED_UPDATE_MOVABLE_FLAGS(&parent->instr))
3253 update_movable_flags(linkage, &parent->instr);
3254
3255 if (parent->instr.pass_flags & FLAG_UNMOVABLE) {
3256 instr->pass_flags |= FLAG_UNMOVABLE;
3257 return;
3258 }
3259 }
3260
3261 switch (deref->deref_type) {
3262 case nir_deref_type_var:
3263 instr->pass_flags |= FLAG_MOVABLE;
3264 return;
3265
3266 case nir_deref_type_struct:
3267 assert(parent->instr.pass_flags & FLAG_MOVABLE);
3268 instr->pass_flags |= parent->instr.pass_flags;
3269 return;
3270
3271 case nir_deref_type_array: {
3272 nir_instr *index = deref->arr.index.ssa->parent_instr;
3273
3274 if (NEED_UPDATE_MOVABLE_FLAGS(index))
3275 update_movable_flags(linkage, index);
3276
3277 /* Integer array indices should be movable only if they are
3278 * convergent or flat.
3279 */
3280 ASSERTED unsigned index_interp = index->pass_flags & FLAG_INTERP_MASK;
3281 assert(index->pass_flags & FLAG_UNMOVABLE ||
3282 (index_interp == FLAG_INTERP_CONVERGENT ||
3283 index_interp == FLAG_INTERP_FLAT));
3284
3285 if (parent) {
3286 unsigned parent_interp = parent->instr.pass_flags & FLAG_INTERP_MASK;
3287
3288 /* Check if the interpolation flags are compatible. */
3289 if (parent_interp != FLAG_INTERP_CONVERGENT &&
3290 index_interp != FLAG_INTERP_CONVERGENT &&
3291 parent_interp != index_interp) {
3292 instr->pass_flags |= FLAG_UNMOVABLE;
3293 return;
3294 }
3295
3296 /* Pick the one that isn't convergent because convergent inputs
3297 * can be in expressions with any other qualifier.
3298 */
3299 if (parent_interp == FLAG_INTERP_CONVERGENT)
3300 instr->pass_flags |= index->pass_flags;
3301 else
3302 instr->pass_flags |= parent->instr.pass_flags;
3303 } else {
3304 instr->pass_flags |= index->pass_flags;
3305 }
3306 return;
3307 }
3308
3309 default:
3310 instr->pass_flags |= FLAG_UNMOVABLE;
3311 return;
3312 }
3313 }
3314
3315 default:
3316 instr->pass_flags |= FLAG_UNMOVABLE;
3317 return;
3318 }
3319 }
3320
3321 /* Gather the input loads used by the post-dominator using DFS. */
3322 static void
gather_used_input_loads(nir_instr * instr,nir_intrinsic_instr * loads[NUM_SCALAR_SLOTS],unsigned * num_loads)3323 gather_used_input_loads(nir_instr *instr,
3324 nir_intrinsic_instr *loads[NUM_SCALAR_SLOTS],
3325 unsigned *num_loads)
3326 {
3327 switch (instr->type) {
3328 case nir_instr_type_undef:
3329 case nir_instr_type_load_const:
3330 return;
3331
3332 case nir_instr_type_alu: {
3333 nir_alu_instr *alu = nir_instr_as_alu(instr);
3334 unsigned num_srcs = nir_op_infos[alu->op].num_inputs;
3335
3336 for (unsigned i = 0; i < num_srcs; i++) {
3337 gather_used_input_loads(alu->src[i].src.ssa->parent_instr,
3338 loads, num_loads);
3339 }
3340 return;
3341 }
3342
3343 case nir_instr_type_intrinsic: {
3344 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
3345
3346 switch (intr->intrinsic) {
3347 case nir_intrinsic_load_tess_coord:
3348 return;
3349
3350 case nir_intrinsic_load_deref:
3351 gather_used_input_loads(intr->src[0].ssa->parent_instr,
3352 loads, num_loads);
3353 return;
3354
3355 case nir_intrinsic_load_input:
3356 case nir_intrinsic_load_per_vertex_input:
3357 case nir_intrinsic_load_interpolated_input:
3358 if (!(intr->instr.pass_flags & FLAG_GATHER_LOADS_VISITED)) {
3359 assert(*num_loads < NUM_SCALAR_SLOTS*8);
3360 loads[(*num_loads)++] = intr;
3361 intr->instr.pass_flags |= FLAG_GATHER_LOADS_VISITED;
3362 }
3363 return;
3364
3365 default:
3366 printf("%u\n", intr->intrinsic);
3367 unreachable("unexpected intrinsic");
3368 }
3369 }
3370
3371 case nir_instr_type_deref: {
3372 nir_deref_instr *deref = nir_instr_as_deref(instr);
3373 nir_deref_instr *parent = nir_deref_instr_parent(deref);
3374
3375 if (parent)
3376 gather_used_input_loads(&parent->instr, loads, num_loads);
3377
3378 switch (deref->deref_type) {
3379 case nir_deref_type_var:
3380 case nir_deref_type_struct:
3381 return;
3382
3383 case nir_deref_type_array:
3384 gather_used_input_loads(deref->arr.index.ssa->parent_instr,
3385 loads, num_loads);
3386 return;
3387
3388 default:
3389 unreachable("unexpected deref type");
3390 }
3391 }
3392
3393 default:
3394 unreachable("unexpected instr type");
3395 }
3396 }
3397
3398 /* Move a post-dominator, which is an ALU opcode, into the previous shader,
3399 * and replace the post-dominator with a new input load.
3400 */
3401 static bool
try_move_postdominator(struct linkage_info * linkage,struct nir_use_dominance_state * postdom_state,nir_instr * postdom,nir_def * load_def,nir_intrinsic_instr * first_load,nir_opt_varyings_progress * progress)3402 try_move_postdominator(struct linkage_info *linkage,
3403 struct nir_use_dominance_state *postdom_state,
3404 nir_instr *postdom,
3405 nir_def *load_def,
3406 nir_intrinsic_instr *first_load,
3407 nir_opt_varyings_progress *progress)
3408 {
3409 #define PRINT 0
3410 #if PRINT
3411 printf("Trying to move post-dom: ");
3412 nir_print_instr(postdom, stdout);
3413 puts("");
3414 #endif
3415
3416 /* Gather the input loads used by the post-dominator using DFS. */
3417 nir_intrinsic_instr *loads[NUM_SCALAR_SLOTS*8];
3418 unsigned num_loads = 0;
3419 gather_used_input_loads(postdom, loads, &num_loads);
3420 assert(num_loads && "no loads were gathered");
3421
3422 /* Clear the flag set by gather_used_input_loads. */
3423 for (unsigned i = 0; i < num_loads; i++)
3424 loads[i]->instr.pass_flags &= ~FLAG_GATHER_LOADS_VISITED;
3425
3426 /* For all the loads, the previous shader must have the corresponding
3427 * output stores in the same basic block because we are going to replace
3428 * them with 1 store. Only TCS and GS can have stores of different outputs
3429 * in different blocks.
3430 */
3431 nir_block *block = NULL;
3432
3433 for (unsigned i = 0; i < num_loads; i++) {
3434 unsigned slot_index = intr_get_scalar_16bit_slot(loads[i]);
3435 struct scalar_slot *slot = &linkage->slot[slot_index];
3436
3437 assert(list_is_singular(&slot->producer.stores));
3438 nir_intrinsic_instr *store =
3439 list_first_entry(&slot->producer.stores, struct list_node,
3440 head)->instr;
3441
3442 if (!block) {
3443 block = store->instr.block;
3444 continue;
3445 }
3446 if (block != store->instr.block)
3447 return false;
3448 }
3449
3450 assert(block);
3451
3452 #if PRINT
3453 printf("Post-dom accepted: ");
3454 nir_print_instr(postdom, stdout);
3455 puts("\n");
3456 #endif
3457
3458 /* Determine the scalar slot index of the new varying. It will reuse
3459 * the slot of the load we started from because the load will be
3460 * removed.
3461 */
3462 unsigned final_slot = intr_get_scalar_16bit_slot(first_load);
3463
3464 /* Replace the post-dominator in the consumer with a new input load.
3465 * Since we are reusing the same slot as the first load and it has
3466 * the right interpolation qualifiers, use it as the new load by using
3467 * it in place of the post-dominator.
3468 *
3469 * Boolean post-dominators are upcast in the producer and then downcast
3470 * in the consumer.
3471 */
3472 unsigned slot_index = final_slot;
3473 struct scalar_slot *slot = &linkage->slot[slot_index];
3474 nir_builder *b = &linkage->consumer_builder;
3475 b->cursor = nir_after_instr(load_def->parent_instr);
3476 nir_def *postdom_def = nir_instr_def(postdom);
3477 unsigned alu_interp = postdom->pass_flags & FLAG_INTERP_MASK;
3478 nir_def *new_input, *new_tes_loads[3];
3479 BITSET_WORD *mask;
3480
3481 /* Convergent instruction results that are not interpolatable (integer or
3482 * FP64) should not be moved because compaction can relocate convergent
3483 * varyings to interpolated vec4 slots because the definition of convergent
3484 * varyings implies that they can be interpolated (which doesn't work with
3485 * integer and FP64 values).
3486 *
3487 * Check the result type and if it's not float and the driver doesn't
3488 * support convergent flat loads from interpolated vec4 slots, don't move
3489 * it.
3490 */
3491 if (linkage->consumer_stage == MESA_SHADER_FRAGMENT &&
3492 alu_interp == FLAG_INTERP_CONVERGENT &&
3493 !linkage->can_mix_convergent_flat_with_interpolated &&
3494 (postdom->type != nir_instr_type_alu ||
3495 (postdom_def->bit_size != 16 && postdom_def->bit_size != 32) ||
3496 !(nir_op_infos[nir_instr_as_alu(postdom)->op].output_type & nir_type_float)))
3497 return false;
3498
3499 /* NIR can't do 1-bit inputs. Convert them to a bigger size. */
3500 assert(postdom_def->bit_size & (1 | 16 | 32));
3501 unsigned new_bit_size = postdom_def->bit_size;
3502
3503 if (new_bit_size == 1) {
3504 assert(alu_interp == FLAG_INTERP_CONVERGENT ||
3505 alu_interp == FLAG_INTERP_FLAT);
3506 /* TODO: We could use 16 bits instead, but that currently fails on AMD.
3507 */
3508 new_bit_size = 32;
3509 }
3510
3511 bool rewrite_convergent_to_flat =
3512 alu_interp == FLAG_INTERP_CONVERGENT &&
3513 linkage->can_mix_convergent_flat_with_interpolated;
3514
3515 /* Create the new input load. This creates a new load (or a series of
3516 * loads in case of open-coded TES interpolation) that's identical to
3517 * the original load(s).
3518 */
3519 if (linkage->consumer_stage == MESA_SHADER_FRAGMENT &&
3520 alu_interp != FLAG_INTERP_FLAT && !rewrite_convergent_to_flat) {
3521 nir_def *baryc = NULL;
3522
3523 /* Determine the barycentric coordinates. */
3524 switch (alu_interp) {
3525 case FLAG_INTERP_PERSP_PIXEL:
3526 case FLAG_INTERP_LINEAR_PIXEL:
3527 baryc = nir_load_barycentric_pixel(b, 32);
3528 break;
3529 case FLAG_INTERP_PERSP_CENTROID:
3530 case FLAG_INTERP_LINEAR_CENTROID:
3531 baryc = nir_load_barycentric_centroid(b, 32);
3532 break;
3533 case FLAG_INTERP_PERSP_SAMPLE:
3534 case FLAG_INTERP_LINEAR_SAMPLE:
3535 baryc = nir_load_barycentric_sample(b, 32);
3536 break;
3537 default:
3538 baryc = first_load->src[0].ssa;
3539 break;
3540 }
3541
3542 if (baryc != first_load->src[0].ssa) {
3543 nir_intrinsic_instr *baryc_i =
3544 nir_instr_as_intrinsic(baryc->parent_instr);
3545
3546 if (alu_interp == FLAG_INTERP_LINEAR_PIXEL ||
3547 alu_interp == FLAG_INTERP_LINEAR_CENTROID ||
3548 alu_interp == FLAG_INTERP_LINEAR_SAMPLE)
3549 nir_intrinsic_set_interp_mode(baryc_i, INTERP_MODE_NOPERSPECTIVE);
3550 else
3551 nir_intrinsic_set_interp_mode(baryc_i, INTERP_MODE_SMOOTH);
3552 }
3553
3554 new_input = nir_load_interpolated_input(
3555 b, 1, new_bit_size, baryc, nir_imm_int(b, 0),
3556 .base = nir_intrinsic_base(first_load),
3557 .component = nir_intrinsic_component(first_load),
3558 .dest_type = nir_alu_type_get_base_type(nir_intrinsic_dest_type(first_load)) |
3559 new_bit_size,
3560 .io_semantics = nir_intrinsic_io_semantics(first_load));
3561
3562 if (alu_interp == FLAG_INTERP_CONVERGENT) {
3563 mask = new_bit_size == 16 ? linkage->convergent16_mask
3564 : linkage->convergent32_mask;
3565 } else if (linkage->has_flexible_interp) {
3566 mask = new_bit_size == 16 ? linkage->interp_fp16_mask
3567 : linkage->interp_fp32_mask;
3568 } else {
3569 /* The index of the qualifier is encoded in alu_interp, so extract it. */
3570 unsigned i = (alu_interp - FLAG_INTERP_PERSP_PIXEL) >> 5;
3571 mask = new_bit_size == 16 ? linkage->interp_fp16_qual_masks[i]
3572 : linkage->interp_fp32_qual_masks[i];
3573 }
3574 } else if (linkage->consumer_stage == MESA_SHADER_TESS_EVAL &&
3575 alu_interp > FLAG_INTERP_FLAT) {
3576 nir_def *zero = nir_imm_int(b, 0);
3577
3578 for (unsigned i = 0; i < 3; i++) {
3579 new_tes_loads[i] =
3580 nir_load_per_vertex_input(b, 1, new_bit_size,
3581 i ? nir_imm_int(b, i) : zero, zero,
3582 .base = nir_intrinsic_base(first_load),
3583 .component = nir_intrinsic_component(first_load),
3584 .dest_type = nir_alu_type_get_base_type(nir_intrinsic_dest_type(first_load)) |
3585 new_bit_size,
3586 .io_semantics = nir_intrinsic_io_semantics(first_load));
3587 }
3588
3589 int remap_uvw[3] = {0, 1, 2};
3590 int remap_wuv[3] = {2, 0, 1};
3591 int *remap;
3592
3593 switch (alu_interp) {
3594 case FLAG_INTERP_TES_TRIANGLE_UVW:
3595 remap = remap_uvw;
3596 break;
3597 case FLAG_INTERP_TES_TRIANGLE_WUV:
3598 remap = remap_wuv;
3599 break;
3600 default:
3601 unreachable("invalid TES interpolation mode");
3602 }
3603
3604 nir_def *tesscoord = slot->consumer.tes_load_tess_coord;
3605 nir_def *defs[3];
3606
3607 for (unsigned i = 0; i < 3; i++) {
3608 if (i == 0) {
3609 defs[i] = nir_fmul(b, new_tes_loads[i],
3610 nir_channel(b, tesscoord, remap[i]));
3611 } else {
3612 defs[i] = nir_ffma(b, new_tes_loads[i],
3613 nir_channel(b, tesscoord, remap[i]),
3614 defs[i - 1]);
3615 }
3616 }
3617 new_input = defs[2];
3618
3619 mask = new_bit_size == 16 ? linkage->flat16_mask
3620 : linkage->flat32_mask;
3621 } else {
3622 /* We have to rewrite convergent to flat here and not during compaction
3623 * because compaction adds code to convert Infs to NaNs for
3624 * "load_interpolated_input -> load_input" replacements, which corrupts
3625 * integer data.
3626 */
3627 assert(linkage->consumer_stage != MESA_SHADER_FRAGMENT ||
3628 alu_interp == FLAG_INTERP_FLAT || rewrite_convergent_to_flat);
3629
3630 new_input =
3631 nir_load_input(b, 1, new_bit_size, nir_imm_int(b, 0),
3632 .base = nir_intrinsic_base(first_load),
3633 .component = nir_intrinsic_component(first_load),
3634 .dest_type = nir_alu_type_get_base_type(nir_intrinsic_dest_type(first_load)) |
3635 new_bit_size,
3636 .io_semantics = nir_intrinsic_io_semantics(first_load));
3637
3638 mask = new_bit_size == 16 ? linkage->flat16_mask
3639 : linkage->flat32_mask;
3640
3641 if (rewrite_convergent_to_flat) {
3642 mask = new_bit_size == 16 ? linkage->convergent16_mask
3643 : linkage->convergent32_mask;
3644 }
3645 }
3646
3647 assert(!BITSET_TEST(linkage->no_varying32_mask, slot_index));
3648 assert(!BITSET_TEST(linkage->no_varying16_mask, slot_index));
3649
3650 /* Re-set the category of the new scalar input. This will cause
3651 * the compaction to treat it as a different type, so that it will be moved
3652 * into the vec4 that has compatible interpolation qualifiers.
3653 *
3654 * This shouldn't be done if any of the interp masks are not set, which
3655 * indicates that compaction is disallowed.
3656 */
3657 if (BITSET_TEST(linkage->interp_fp32_mask, slot_index) ||
3658 BITSET_TEST(linkage->interp_fp16_mask, slot_index) ||
3659 BITSET6_TEST_ANY(linkage->interp_fp32_qual_masks, slot_index) ||
3660 BITSET6_TEST_ANY(linkage->interp_fp16_qual_masks, slot_index) ||
3661 BITSET_TEST(linkage->flat32_mask, slot_index) ||
3662 BITSET_TEST(linkage->flat16_mask, slot_index) ||
3663 BITSET_TEST(linkage->convergent32_mask, slot_index) ||
3664 BITSET_TEST(linkage->convergent16_mask, slot_index)) {
3665 BITSET_CLEAR(linkage->interp_fp32_mask, slot_index);
3666 for (unsigned i = 0; i < NUM_INTERP_QUALIFIERS; i++)
3667 BITSET_CLEAR(linkage->interp_fp32_qual_masks[i], slot_index);
3668 BITSET_CLEAR(linkage->interp_fp16_mask, slot_index);
3669 for (unsigned i = 0; i < NUM_INTERP_QUALIFIERS; i++)
3670 BITSET_CLEAR(linkage->interp_fp16_qual_masks[i], slot_index);
3671 BITSET_CLEAR(linkage->flat16_mask, slot_index);
3672 BITSET_CLEAR(linkage->flat32_mask, slot_index);
3673 BITSET_CLEAR(linkage->convergent16_mask, slot_index);
3674 BITSET_CLEAR(linkage->convergent32_mask, slot_index);
3675 BITSET_SET(mask, slot_index);
3676 }
3677
3678 /* Replace the existing load with the new load in the slot. */
3679 if (linkage->consumer_stage == MESA_SHADER_TESS_EVAL &&
3680 alu_interp >= FLAG_INTERP_TES_TRIANGLE_UVW) {
3681 /* For TES, replace all 3 loads. */
3682 unsigned i = 0;
3683 list_for_each_entry(struct list_node, iter, &slot->consumer.loads,
3684 head) {
3685 assert(i < 3);
3686 iter->instr = nir_instr_as_intrinsic(new_tes_loads[i]->parent_instr);
3687 i++;
3688 }
3689
3690 assert(i == 3);
3691 assert(postdom_def->bit_size != 1);
3692
3693 slot->consumer.tes_interp_load =
3694 nir_instr_as_alu(new_input->parent_instr);
3695 } else {
3696 assert(list_is_singular(&slot->consumer.loads));
3697 list_first_entry(&slot->consumer.loads, struct list_node, head)->instr =
3698 nir_instr_as_intrinsic(new_input->parent_instr);
3699
3700 /* The input is a bigger type even if the post-dominator is boolean. */
3701 if (postdom_def->bit_size == 1)
3702 new_input = nir_ine_imm(b, new_input, 0);
3703 }
3704
3705 nir_def_rewrite_uses(postdom_def, new_input);
3706
3707 /* Clone the post-dominator at the end of the block in the producer
3708 * where the output stores are.
3709 */
3710 b = &linkage->producer_builder;
3711 b->cursor = nir_after_block_before_jump(block);
3712 nir_def *producer_clone = clone_ssa(linkage, b, postdom_def);
3713
3714 /* Boolean post-dominators are upcast in the producer because we can't
3715 * use 1-bit outputs.
3716 */
3717 if (producer_clone->bit_size == 1)
3718 producer_clone = nir_b2bN(b, producer_clone, new_bit_size);
3719
3720 /* Move the existing store to the end of the block and rewrite it to use
3721 * the post-dominator result.
3722 */
3723 nir_intrinsic_instr *store =
3724 list_first_entry(&linkage->slot[final_slot].producer.stores,
3725 struct list_node, head)->instr;
3726 nir_instr_move(b->cursor, &store->instr);
3727 if (nir_src_bit_size(store->src[0]) != producer_clone->bit_size)
3728 nir_intrinsic_set_src_type(store, nir_alu_type_get_base_type(nir_intrinsic_src_type(store)) |
3729 producer_clone->bit_size);
3730 nir_src_rewrite(&store->src[0], producer_clone);
3731
3732 /* Remove all loads and stores that we are replacing from the producer
3733 * and consumer.
3734 */
3735 for (unsigned i = 0; i < num_loads; i++) {
3736 unsigned slot_index = intr_get_scalar_16bit_slot(loads[i]);
3737
3738 if (slot_index == final_slot) {
3739 /* Keep the load and store that we reused. */
3740 continue;
3741 }
3742
3743 /* Remove loads and stores that are dead after the code motion. Only
3744 * those loads that are post-dominated by the post-dominator are dead.
3745 */
3746 struct scalar_slot *slot = &linkage->slot[slot_index];
3747 nir_instr *load;
3748
3749 if (slot->consumer.tes_interp_load) {
3750 load = &slot->consumer.tes_interp_load->instr;
3751
3752 /* With interpolated TES loads, we get here 3 times, once for each
3753 * per-vertex load. Skip this if we've been here before.
3754 */
3755 if (list_is_empty(&slot->producer.stores)) {
3756 assert(list_is_empty(&slot->consumer.loads));
3757 continue;
3758 }
3759 } else {
3760 assert(list_is_singular(&slot->consumer.loads));
3761 load = &list_first_entry(&slot->consumer.loads,
3762 struct list_node, head)->instr->instr;
3763 }
3764
3765 if (nir_instr_dominates_use(postdom_state, postdom, load)) {
3766 list_inithead(&slot->consumer.loads);
3767
3768 /* Remove stores. (transform feedback is allowed here, just not
3769 * in final_slot)
3770 */
3771 remove_all_stores_and_clear_slot(linkage, slot_index, progress);
3772 } else {
3773 /* If a load has 2 uses and one of those uses is moved into the previous
3774 * shader, making that "use" dead, the load and its associated store
3775 * can't be removed because there is still one use remaining. However,
3776 * there are actually 2 uses remaining because the use that is dead isn't
3777 * removed from NIR, but is left dangling there.
3778 *
3779 * When we run this optimization again and make the second use dead,
3780 * which makes the load dead, the output store in the producer isn't removed
3781 * because the post-dominator of the second use doesn't post-dominate
3782 * the load because we left the first use dangling there.
3783 *
3784 * To fix that, we could run DCE, but that would be costly because we would
3785 * need to re-gather all IO. Instead, remove dead uses by replacing them
3786 * with undef here, so that when this code motion pass is entered again,
3787 * the load has its number of uses reduced and the corresponding output store
3788 * will be removed by the code above.
3789 */
3790 nir_foreach_use_safe(src, nir_instr_def(load)) {
3791 if (nir_instr_dominates_use(postdom_state, postdom,
3792 nir_src_parent_instr(src))) {
3793 nir_src_rewrite(src, nir_undef(&linkage->consumer_builder,
3794 src->ssa->num_components,
3795 src->ssa->bit_size));
3796 }
3797 }
3798 }
3799 }
3800
3801 *progress |= nir_progress_producer | nir_progress_consumer;
3802 return true;
3803 }
3804
3805 static bool
backward_inter_shader_code_motion(struct linkage_info * linkage,nir_opt_varyings_progress * progress)3806 backward_inter_shader_code_motion(struct linkage_info *linkage,
3807 nir_opt_varyings_progress *progress)
3808 {
3809 /* These producers are not supported. The description at the beginning
3810 * suggests a possible workaround.
3811 */
3812 if (linkage->producer_stage == MESA_SHADER_GEOMETRY ||
3813 linkage->producer_stage == MESA_SHADER_MESH ||
3814 linkage->producer_stage == MESA_SHADER_TASK)
3815 return false;
3816
3817 /* Clear pass_flags. */
3818 nir_shader_clear_pass_flags(linkage->consumer_builder.shader);
3819
3820 /* Gather inputs that can be moved into the previous shader. These are only
3821 * checked for the basic constraints for movability.
3822 */
3823 struct {
3824 nir_def *def;
3825 nir_intrinsic_instr *first_load;
3826 } movable_loads[NUM_SCALAR_SLOTS];
3827 unsigned num_movable_loads = 0;
3828 unsigned i;
3829
3830 BITSET_FOREACH_SET(i, linkage->output_equal_mask, NUM_SCALAR_SLOTS) {
3831 if (!can_optimize_varying(linkage,
3832 vec4_slot(i)).inter_shader_code_motion)
3833 continue;
3834
3835 struct scalar_slot *slot = &linkage->slot[i];
3836
3837 assert(!list_is_empty(&slot->producer.stores));
3838 assert(!is_interpolated_texcoord(linkage, i));
3839 assert(!is_interpolated_color(linkage, i));
3840
3841 /* Disallow producer loads. */
3842 if (!list_is_empty(&slot->producer.loads))
3843 continue;
3844
3845 /* There should be only 1 store per output. */
3846 if (!list_is_singular(&slot->producer.stores))
3847 continue;
3848
3849 nir_def *load_def = NULL;
3850 nir_intrinsic_instr *load =
3851 list_first_entry(&slot->consumer.loads, struct list_node,
3852 head)->instr;
3853
3854 nir_intrinsic_instr *store =
3855 list_first_entry(&slot->producer.stores, struct list_node,
3856 head)->instr;
3857
3858 /* Set interpolation flags.
3859 * Handle interpolated TES loads first because they are special.
3860 */
3861 if (linkage->consumer_stage == MESA_SHADER_TESS_EVAL &&
3862 slot->consumer.tes_interp_load) {
3863 if (linkage->producer_stage == MESA_SHADER_VERTEX) {
3864 /* VS -> TES has no constraints on VS stores. */
3865 load_def = &slot->consumer.tes_interp_load->def;
3866 load_def->parent_instr->pass_flags |= FLAG_ALU_IS_TES_INTERP_LOAD |
3867 slot->consumer.tes_interp_mode;
3868 } else {
3869 assert(linkage->producer_stage == MESA_SHADER_TESS_CTRL);
3870 assert(store->intrinsic == nir_intrinsic_store_per_vertex_output);
3871
3872 /* The vertex index of the store must InvocationID. */
3873 if (is_sysval(store->src[1].ssa->parent_instr,
3874 SYSTEM_VALUE_INVOCATION_ID)) {
3875 load_def = &slot->consumer.tes_interp_load->def;
3876 load_def->parent_instr->pass_flags |= FLAG_ALU_IS_TES_INTERP_LOAD |
3877 slot->consumer.tes_interp_mode;
3878 } else {
3879 continue;
3880 }
3881 }
3882 } else {
3883 /* Allow only 1 load per input. CSE should be run before this. */
3884 if (!list_is_singular(&slot->consumer.loads))
3885 continue;
3886
3887 /* This can only be TCS -> TES, which is handled above and rejected
3888 * otherwise.
3889 */
3890 if (store->intrinsic == nir_intrinsic_store_per_vertex_output) {
3891 assert(linkage->producer_stage == MESA_SHADER_TESS_CTRL);
3892 continue;
3893 }
3894
3895 /* TODO: handle load_per_vertex_input for TCS and GS.
3896 * TES can also occur here if tes_interp_load is NULL.
3897 */
3898 if (load->intrinsic == nir_intrinsic_load_per_vertex_input)
3899 continue;
3900
3901 load_def = &load->def;
3902
3903 switch (load->intrinsic) {
3904 case nir_intrinsic_load_interpolated_input: {
3905 assert(linkage->consumer_stage == MESA_SHADER_FRAGMENT);
3906 nir_intrinsic_instr *baryc =
3907 nir_instr_as_intrinsic(load->src[0].ssa->parent_instr);
3908 nir_intrinsic_op op = baryc->intrinsic;
3909 enum glsl_interp_mode interp = nir_intrinsic_interp_mode(baryc);
3910 bool linear = interp == INTERP_MODE_NOPERSPECTIVE;
3911 bool convergent = BITSET_TEST(linkage->convergent32_mask, i) ||
3912 BITSET_TEST(linkage->convergent16_mask, i);
3913
3914 assert(interp == INTERP_MODE_NONE ||
3915 interp == INTERP_MODE_SMOOTH ||
3916 interp == INTERP_MODE_NOPERSPECTIVE);
3917
3918 if (convergent) {
3919 load->instr.pass_flags |= FLAG_INTERP_CONVERGENT;
3920 } else if (op == nir_intrinsic_load_barycentric_pixel) {
3921 load->instr.pass_flags |= linear ? FLAG_INTERP_LINEAR_PIXEL
3922 : FLAG_INTERP_PERSP_PIXEL;
3923 } else if (op == nir_intrinsic_load_barycentric_centroid) {
3924 load->instr.pass_flags |= linear ? FLAG_INTERP_LINEAR_CENTROID
3925 : FLAG_INTERP_PERSP_CENTROID;
3926 } else if (op == nir_intrinsic_load_barycentric_sample) {
3927 load->instr.pass_flags |= linear ? FLAG_INTERP_LINEAR_SAMPLE
3928 : FLAG_INTERP_PERSP_SAMPLE;
3929 } else {
3930 /* Optimizing at_offset and at_sample would be possible but
3931 * maybe not worth it if they are not convergent. Convergent
3932 * inputs can trivially switch the barycentric coordinates
3933 * to different ones or flat.
3934 */
3935 continue;
3936 }
3937 break;
3938 }
3939 case nir_intrinsic_load_input:
3940 if (linkage->consumer_stage == MESA_SHADER_FRAGMENT) {
3941 if (BITSET_TEST(linkage->convergent32_mask, i) ||
3942 BITSET_TEST(linkage->convergent16_mask, i))
3943 load->instr.pass_flags |= FLAG_INTERP_CONVERGENT;
3944 else
3945 load->instr.pass_flags |= FLAG_INTERP_FLAT;
3946 } else if (linkage->consumer_stage == MESA_SHADER_TESS_EVAL) {
3947 assert(vec4_slot(i) >= VARYING_SLOT_PATCH0 &&
3948 vec4_slot(i) <= VARYING_SLOT_PATCH31);
3949 /* Patch inputs are always convergent. */
3950 load->instr.pass_flags |= FLAG_INTERP_CONVERGENT;
3951 } else {
3952 /* It's not a fragment shader. We still need to set this. */
3953 load->instr.pass_flags |= FLAG_INTERP_FLAT;
3954 }
3955 break;
3956 case nir_intrinsic_load_per_primitive_input:
3957 case nir_intrinsic_load_input_vertex:
3958 /* Inter-shader code motion is unimplemented these. */
3959 continue;
3960 default:
3961 unreachable("unexpected load intrinsic");
3962 }
3963 }
3964
3965 load_def->parent_instr->pass_flags |= FLAG_MOVABLE;
3966
3967 /* Disallow transform feedback. The load is "movable" for the purpose of
3968 * finding a movable post-dominator, we just can't rewrite the store
3969 * because we need to keep it for xfb, so the post-dominator search
3970 * will have to start from a different load (only that varying will have
3971 * its value rewritten).
3972 */
3973 if (BITSET_TEST(linkage->xfb_mask, i))
3974 continue;
3975
3976 assert(num_movable_loads < ARRAY_SIZE(movable_loads));
3977 movable_loads[num_movable_loads].def = load_def;
3978 movable_loads[num_movable_loads].first_load = load;
3979 num_movable_loads++;
3980 }
3981
3982 if (!num_movable_loads)
3983 return false;
3984
3985 /* Inter-shader code motion turns ALU results into outputs, but not all
3986 * bit sizes are supported by outputs.
3987 *
3988 * The 1-bit type is allowed because the pass always promotes 1-bit
3989 * outputs to 16 or 32 bits, whichever is supported.
3990 *
3991 * TODO: We could support replacing 2 32-bit inputs with one 64-bit
3992 * post-dominator by supporting 64 bits here, but the likelihood of that
3993 * occuring seems low.
3994 */
3995 unsigned supported_io_types = 32 | 1;
3996
3997 if (linkage->producer_builder.shader->options->io_options &
3998 linkage->consumer_builder.shader->options->io_options &
3999 nir_io_16bit_input_output_support)
4000 supported_io_types |= 16;
4001
4002 struct nir_use_dominance_state *postdom_state =
4003 nir_calc_use_dominance_impl(linkage->consumer_builder.impl, true);
4004
4005 for (unsigned i = 0; i < num_movable_loads; i++) {
4006 nir_def *load_def = movable_loads[i].def;
4007 nir_instr *iter = load_def->parent_instr;
4008 nir_instr *movable_postdom = NULL;
4009
4010 /* Find the farthest post-dominator that is movable. */
4011 while (iter) {
4012 iter = nir_get_immediate_use_dominator(postdom_state, iter);
4013 if (iter) {
4014 if (NEED_UPDATE_MOVABLE_FLAGS(iter))
4015 update_movable_flags(linkage, iter);
4016
4017 if (iter->pass_flags & FLAG_UNMOVABLE)
4018 break;
4019
4020 /* We can't move derefs into the previous shader, but we can move
4021 * instructions that use derefs.
4022 */
4023 if (iter->type == nir_instr_type_deref)
4024 continue;
4025
4026 unsigned bit_size;
4027
4028 if (iter->type == nir_instr_type_alu) {
4029 nir_alu_instr *alu = nir_instr_as_alu(iter);
4030
4031 /* Skip comparison opcodes that directly source the first load
4032 * and a constant because any 1-bit values would have to be
4033 * converted to 32 bits in the producer and then converted back
4034 * to 1 bit using nir_op_ine in the consumer, achieving nothing.
4035 */
4036 if (alu->def.bit_size == 1 &&
4037 ((nir_op_infos[alu->op].num_inputs == 1 &&
4038 alu->src[0].src.ssa == load_def) ||
4039 (nir_op_infos[alu->op].num_inputs == 2 &&
4040 ((alu->src[0].src.ssa == load_def &&
4041 alu->src[1].src.ssa->parent_instr->type ==
4042 nir_instr_type_load_const) ||
4043 (alu->src[0].src.ssa->parent_instr->type ==
4044 nir_instr_type_load_const &&
4045 alu->src[1].src.ssa == load_def)))))
4046 continue;
4047
4048 bit_size = alu->def.bit_size;
4049 } else if (iter->type == nir_instr_type_intrinsic) {
4050 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(iter);
4051
4052 /* This is a uniform load with a non-constant index because
4053 * only a non-constant index can be post-dominated by a load.
4054 */
4055 assert(intr->intrinsic == nir_intrinsic_load_deref);
4056
4057 /* Uniform loads must be scalar if their result is immediately
4058 * stored into an output because this pass only works with
4059 * scalar outputs.
4060 */
4061 if (intr->num_components > 1)
4062 continue;
4063
4064 bit_size = intr->def.bit_size;
4065 } else {
4066 unreachable("unexpected instr type");
4067 }
4068
4069 /* Skip unsupported bit sizes and keep searching. */
4070 if (!(bit_size & supported_io_types))
4071 continue;
4072
4073 movable_postdom = iter;
4074 }
4075 }
4076
4077 /* Add the post-dominator to the list unless it's been added already. */
4078 if (movable_postdom &&
4079 !(movable_postdom->pass_flags & FLAG_POST_DOMINATOR_PROCESSED)) {
4080 if (try_move_postdominator(linkage, postdom_state, movable_postdom,
4081 load_def, movable_loads[i].first_load,
4082 progress)) {
4083 /* Moving only one postdominator can change the IR enough that
4084 * we should start from scratch.
4085 */
4086 ralloc_free(postdom_state);
4087 return true;
4088 }
4089
4090 movable_postdom->pass_flags |= FLAG_POST_DOMINATOR_PROCESSED;
4091 }
4092 }
4093
4094 ralloc_free(postdom_state);
4095 return false;
4096 }
4097
4098 /******************************************************************
4099 * COMPACTION
4100 ******************************************************************/
4101
4102 /* Relocate a slot to a new index. Used by compaction. new_index is
4103 * the component index at 16-bit granularity, so the size of vec4 is 8
4104 * in that representation.
4105 */
4106 static void
relocate_slot(struct linkage_info * linkage,struct scalar_slot * slot,unsigned i,unsigned new_index,enum fs_vec4_type fs_vec4_type,bool convergent,nir_opt_varyings_progress * progress)4107 relocate_slot(struct linkage_info *linkage, struct scalar_slot *slot,
4108 unsigned i, unsigned new_index, enum fs_vec4_type fs_vec4_type,
4109 bool convergent, nir_opt_varyings_progress *progress)
4110 {
4111 assert(!list_is_empty(&slot->producer.stores));
4112
4113 list_for_each_entry(struct list_node, iter, &slot->producer.stores, head) {
4114 assert(!nir_intrinsic_io_semantics(iter->instr).no_varying ||
4115 has_xfb(iter->instr) ||
4116 linkage->producer_stage == MESA_SHADER_TESS_CTRL);
4117 assert(!is_active_sysval_output(linkage, i, iter->instr));
4118 }
4119
4120 /* Relocate the slot in all loads and stores. */
4121 struct list_head *instruction_lists[3] = {
4122 &slot->producer.stores,
4123 &slot->producer.loads,
4124 &slot->consumer.loads,
4125 };
4126
4127 for (unsigned i = 0; i < ARRAY_SIZE(instruction_lists); i++) {
4128 list_for_each_entry(struct list_node, iter, instruction_lists[i], head) {
4129 nir_intrinsic_instr *intr = iter->instr;
4130
4131 gl_varying_slot new_semantic = vec4_slot(new_index);
4132 unsigned new_component = (new_index % 8) / 2;
4133 bool new_high_16bits = new_index % 2;
4134
4135 /* We also need to relocate xfb info because it's always relative
4136 * to component 0. This just moves it into the correct xfb slot.
4137 */
4138 if (has_xfb(intr)) {
4139 unsigned old_component = nir_intrinsic_component(intr);
4140 static const nir_io_xfb clear_xfb;
4141 nir_io_xfb xfb;
4142 bool new_is_odd = new_component % 2 == 1;
4143
4144 memset(&xfb, 0, sizeof(xfb));
4145
4146 if (old_component >= 2) {
4147 xfb.out[new_is_odd] = nir_intrinsic_io_xfb2(intr).out[old_component - 2];
4148 nir_intrinsic_set_io_xfb2(intr, clear_xfb);
4149 } else {
4150 xfb.out[new_is_odd] = nir_intrinsic_io_xfb(intr).out[old_component];
4151 nir_intrinsic_set_io_xfb(intr, clear_xfb);
4152 }
4153
4154 if (new_component >= 2)
4155 nir_intrinsic_set_io_xfb2(intr, xfb);
4156 else
4157 nir_intrinsic_set_io_xfb(intr, xfb);
4158 }
4159
4160 nir_io_semantics sem = nir_intrinsic_io_semantics(intr);
4161 unsigned bit_size = nir_intrinsic_infos[intr->intrinsic].has_dest ?
4162 intr->def.bit_size : intr->src[0].ssa->bit_size;
4163
4164 /* Set all types to float to facilitate full IO vectorization.
4165 * This is skipped only if mediump is not lowered to 16 bits.
4166 *
4167 * Set nir_io_mediump_is_32bit if you never lower mediump IO to 16
4168 * bits, which sets nir_io_semantics::mediump_precision = 0 during
4169 * nir_lower_io.
4170 *
4171 * Set nir_shader_compiler_options::lower_mediump_io if you want to
4172 * lower mediump to 16 bits in the GLSL linker before this pass.
4173 */
4174 if (bit_size != 32 || !sem.medium_precision) {
4175 nir_alu_type type = nir_intrinsic_has_src_type(intr) ?
4176 nir_intrinsic_src_type(intr) :
4177 nir_intrinsic_dest_type(intr);
4178 type = nir_alu_type_get_type_size(type) | nir_type_float;
4179
4180 if (nir_intrinsic_has_src_type(intr))
4181 nir_intrinsic_set_src_type(intr, type);
4182 else
4183 nir_intrinsic_set_dest_type(intr, type);
4184 }
4185
4186 /* When relocating a back color store, don't change it to a front
4187 * color as that would be incorrect. Keep it as back color and only
4188 * relocate it between BFC0 and BFC1.
4189 */
4190 if (linkage->consumer_stage == MESA_SHADER_FRAGMENT &&
4191 (sem.location == VARYING_SLOT_BFC0 ||
4192 sem.location == VARYING_SLOT_BFC1)) {
4193 assert(new_semantic == VARYING_SLOT_COL0 ||
4194 new_semantic == VARYING_SLOT_COL1);
4195 new_semantic = VARYING_SLOT_BFC0 +
4196 (new_semantic - VARYING_SLOT_COL0);
4197 }
4198
4199 #if PRINT_RELOCATE_SLOT
4200 assert(bit_size == 16 || bit_size == 32);
4201
4202 fprintf(stderr, "--- relocating: %s.%c%s%s -> %s.%c%s%s FS_VEC4_TYPE_%s\n",
4203 gl_varying_slot_name_for_stage(sem.location, linkage->producer_stage) + 13,
4204 "xyzw"[nir_intrinsic_component(intr) % 4],
4205 (bit_size == 16 && !sem.high_16bits) ? ".lo" : "",
4206 (bit_size == 16 && sem.high_16bits) ? ".hi" : "",
4207 gl_varying_slot_name_for_stage(new_semantic, linkage->producer_stage) + 13,
4208 "xyzw"[new_component % 4],
4209 (bit_size == 16 && !new_high_16bits) ? ".lo" : "",
4210 (bit_size == 16 && new_high_16bits) ? ".hi" : "",
4211 fs_vec4_type_strings[fs_vec4_type]);
4212 #endif /* PRINT_RELOCATE_SLOT */
4213
4214 sem.location = new_semantic;
4215 sem.high_16bits = new_high_16bits;
4216
4217 /* This is never indirectly indexed. Simplify num_slots. */
4218 sem.num_slots = 1;
4219
4220 nir_intrinsic_set_io_semantics(intr, sem);
4221 nir_intrinsic_set_component(intr, new_component);
4222
4223 if (fs_vec4_type == FS_VEC4_TYPE_PER_PRIMITIVE) {
4224 assert(intr->intrinsic == nir_intrinsic_store_per_primitive_output ||
4225 intr->intrinsic == nir_intrinsic_load_per_primitive_output ||
4226 intr->intrinsic == nir_intrinsic_load_per_primitive_input);
4227 } else {
4228 assert(intr->intrinsic != nir_intrinsic_store_per_primitive_output &&
4229 intr->intrinsic != nir_intrinsic_load_per_primitive_output &&
4230 intr->intrinsic != nir_intrinsic_load_per_primitive_input);
4231 }
4232
4233 if (intr->intrinsic != nir_intrinsic_load_interpolated_input)
4234 continue;
4235
4236 /* This path is used when promoting convergent interpolated
4237 * inputs to flat. Replace load_interpolated_input with load_input.
4238 */
4239 if (fs_vec4_type == FS_VEC4_TYPE_FLAT ||
4240 /* Promote all convergent loads to flat if the driver supports it. */
4241 (convergent &&
4242 linkage->can_mix_convergent_flat_with_interpolated)) {
4243 assert(instruction_lists[i] == &slot->consumer.loads);
4244 nir_builder *b = &linkage->consumer_builder;
4245
4246 b->cursor = nir_before_instr(&intr->instr);
4247 nir_def *load =
4248 nir_load_input(b, 1, intr->def.bit_size,
4249 nir_get_io_offset_src(intr)->ssa,
4250 .io_semantics = sem,
4251 .component = new_component,
4252 .dest_type = nir_intrinsic_dest_type(intr));
4253
4254 nir_def_rewrite_uses(&intr->def, load);
4255 iter->instr = nir_instr_as_intrinsic(load->parent_instr);
4256 nir_instr_remove(&intr->instr);
4257 *progress |= nir_progress_consumer;
4258
4259 /* Interpolation converts Infs to NaNs. If we change it to flat,
4260 * we need to convert Infs to NaNs manually in the producer to
4261 * preserve that.
4262 */
4263 if (preserve_nans(linkage->consumer_builder.shader,
4264 load->bit_size)) {
4265 list_for_each_entry(struct list_node, iter,
4266 &slot->producer.stores, head) {
4267 nir_intrinsic_instr *store = iter->instr;
4268
4269 nir_builder *b = &linkage->producer_builder;
4270 b->cursor = nir_before_instr(&store->instr);
4271 nir_def *repl =
4272 build_convert_inf_to_nan(b, store->src[0].ssa);
4273 nir_src_rewrite(&store->src[0], repl);
4274 }
4275 }
4276 continue;
4277 }
4278
4279 /* We are packing convergent inputs with any other interpolated
4280 * inputs in the same vec4, but the interpolation qualifier might not
4281 * be the same between the two. Set the qualifier of the convergent
4282 * input to match the input it's being packed with.
4283 */
4284 if (!linkage->has_flexible_interp && convergent) {
4285 enum fs_vec4_type current_vec4_type =
4286 get_interp_vec4_type(linkage, i, intr);
4287
4288 /* Make the interpolation qualifier match the slot where we are
4289 * moving this input.
4290 */
4291 if (current_vec4_type != fs_vec4_type) {
4292 nir_builder *b = &linkage->consumer_builder;
4293 nir_def *baryc;
4294
4295 b->cursor = nir_before_instr(&intr->instr);
4296
4297 switch (fs_vec4_type) {
4298 case FS_VEC4_TYPE_INTERP_FP32_PERSP_PIXEL:
4299 case FS_VEC4_TYPE_INTERP_FP16_PERSP_PIXEL:
4300 baryc = nir_load_barycentric_pixel(b, 32,
4301 .interp_mode = INTERP_MODE_SMOOTH);
4302 break;
4303 case FS_VEC4_TYPE_INTERP_FP32_PERSP_CENTROID:
4304 case FS_VEC4_TYPE_INTERP_FP16_PERSP_CENTROID:
4305 baryc = nir_load_barycentric_centroid(b, 32,
4306 .interp_mode = INTERP_MODE_SMOOTH);
4307 break;
4308 case FS_VEC4_TYPE_INTERP_FP32_PERSP_SAMPLE:
4309 case FS_VEC4_TYPE_INTERP_FP16_PERSP_SAMPLE:
4310 baryc = nir_load_barycentric_sample(b, 32,
4311 .interp_mode = INTERP_MODE_SMOOTH);
4312 break;
4313 case FS_VEC4_TYPE_INTERP_FP32_LINEAR_PIXEL:
4314 case FS_VEC4_TYPE_INTERP_FP16_LINEAR_PIXEL:
4315 baryc = nir_load_barycentric_pixel(b, 32,
4316 .interp_mode = INTERP_MODE_NOPERSPECTIVE);
4317 break;
4318 case FS_VEC4_TYPE_INTERP_FP32_LINEAR_CENTROID:
4319 case FS_VEC4_TYPE_INTERP_FP16_LINEAR_CENTROID:
4320 baryc = nir_load_barycentric_centroid(b, 32,
4321 .interp_mode = INTERP_MODE_NOPERSPECTIVE);
4322 break;
4323 case FS_VEC4_TYPE_INTERP_FP32_LINEAR_SAMPLE:
4324 case FS_VEC4_TYPE_INTERP_FP16_LINEAR_SAMPLE:
4325 baryc = nir_load_barycentric_sample(b, 32,
4326 .interp_mode = INTERP_MODE_NOPERSPECTIVE);
4327 break;
4328 case FS_VEC4_TYPE_INTERP_COLOR_PIXEL:
4329 baryc = nir_load_barycentric_pixel(b, 32,
4330 .interp_mode = INTERP_MODE_NONE);
4331 break;
4332 case FS_VEC4_TYPE_INTERP_COLOR_CENTROID:
4333 baryc = nir_load_barycentric_centroid(b, 32,
4334 .interp_mode = INTERP_MODE_NONE);
4335 break;
4336 case FS_VEC4_TYPE_INTERP_COLOR_SAMPLE:
4337 baryc = nir_load_barycentric_sample(b, 32,
4338 .interp_mode = INTERP_MODE_NONE);
4339 break;
4340 default:
4341 unreachable("invalid qualifier");
4342 }
4343
4344 nir_src_rewrite(&intr->src[0], baryc);
4345 }
4346 }
4347 }
4348 }
4349 }
4350
4351 /**
4352 * A helper function for compact_varyings(). Assign new slot indices for
4353 * existing slots of a certain vec4 type (FLAT, FP16, or FP32). Skip already-
4354 * assigned scalar slots (determined by assigned_mask) and don't assign to
4355 * vec4 slots that have an incompatible vec4 type (determined by
4356 * assigned_fs_vec4_type). This works with both 32-bit and 16-bit types.
4357 * slot_size is the component size in the units of 16 bits (2 means 32 bits).
4358 *
4359 * The number of slots to assign can optionally be limited by
4360 * max_assigned_slots.
4361 *
4362 * Return how many 16-bit slots are left unused in the last vec4 (up to 8
4363 * slots).
4364 */
4365 static unsigned
fs_assign_slots(struct linkage_info * linkage,BITSET_WORD * assigned_mask,uint8_t assigned_fs_vec4_type[NUM_TOTAL_VARYING_SLOTS],BITSET_WORD * input_mask,enum fs_vec4_type fs_vec4_type,unsigned slot_size,unsigned max_assigned_slots,bool convergent,bool assign_colors,unsigned color_channel_rotate,nir_opt_varyings_progress * progress)4366 fs_assign_slots(struct linkage_info *linkage,
4367 BITSET_WORD *assigned_mask,
4368 uint8_t assigned_fs_vec4_type[NUM_TOTAL_VARYING_SLOTS],
4369 BITSET_WORD *input_mask,
4370 enum fs_vec4_type fs_vec4_type,
4371 unsigned slot_size,
4372 unsigned max_assigned_slots,
4373 bool convergent,
4374 bool assign_colors,
4375 unsigned color_channel_rotate,
4376 nir_opt_varyings_progress *progress)
4377 {
4378 unsigned i, slot_index, max_slot;
4379 unsigned num_assigned_slots = 0;
4380
4381 if (assign_colors) {
4382 slot_index = VARYING_SLOT_COL0 * 8; /* starting slot */
4383 max_slot = VARYING_SLOT_COL1 + 1;
4384 } else {
4385 slot_index = VARYING_SLOT_VAR0 * 8; /* starting slot */
4386 max_slot = VARYING_SLOT_MAX;
4387 }
4388
4389 /* Assign new slot indices for scalar slots. */
4390 BITSET_FOREACH_SET(i, input_mask, NUM_SCALAR_SLOTS) {
4391 if (is_interpolated_color(linkage, i) != assign_colors)
4392 continue;
4393
4394 /* Skip indirectly-indexed scalar slots and slots incompatible
4395 * with the FS vec4 type.
4396 */
4397 while (1) {
4398 /* If the FS vec4 type is incompatible. Move to the next vec4. */
4399 if (fs_vec4_type != FS_VEC4_TYPE_NONE &&
4400 assigned_fs_vec4_type[vec4_slot(slot_index)] !=
4401 FS_VEC4_TYPE_NONE &&
4402 assigned_fs_vec4_type[vec4_slot(slot_index)] != fs_vec4_type) {
4403 slot_index = align(slot_index + slot_size, 8); /* move to next vec4 */
4404 continue;
4405 }
4406
4407 /* This slot is already assigned (assigned_mask is set). Move to
4408 * the next one.
4409 */
4410 if (BITSET_TEST(assigned_mask, slot_index)) {
4411 slot_index += slot_size;
4412 continue;
4413 }
4414 break;
4415 }
4416
4417 /* Assign color channels in this order, starting
4418 * at the color_channel_rotate component first. Cases:
4419 * color_channel_rotate = 0: xyzw
4420 * color_channel_rotate = 1: yzwx
4421 * color_channel_rotate = 2: zwxy
4422 * color_channel_rotate = 3: wxyz
4423 *
4424 * This has no effect on behavior per se, but some drivers merge VARn
4425 * and COLn into one output if each defines different components.
4426 * For example, if we store VAR0.xy and COL0.z, a driver can merge them
4427 * by mapping the same output to 2 different inputs (VAR0 and COL0) if
4428 * color-specific behavior is per component, but it can't merge VAR0.xy
4429 * and COL0.x because they both define x.
4430 */
4431 unsigned new_slot_index = slot_index;
4432 if (assign_colors && color_channel_rotate) {
4433 new_slot_index = (vec4_slot(new_slot_index)) * 8 +
4434 (new_slot_index + color_channel_rotate * 2) % 8;
4435 }
4436
4437 /* Relocate the slot. */
4438 assert(slot_index < max_slot * 8);
4439 relocate_slot(linkage, &linkage->slot[i], i, new_slot_index,
4440 fs_vec4_type, convergent, progress);
4441
4442 for (unsigned i = 0; i < slot_size; ++i)
4443 BITSET_SET(assigned_mask, slot_index + i);
4444
4445 if (assigned_fs_vec4_type)
4446 assigned_fs_vec4_type[vec4_slot(slot_index)] = fs_vec4_type;
4447 slot_index += slot_size; /* move to the next slot */
4448 num_assigned_slots += slot_size;
4449
4450 /* Remove the slot from the input (unassigned) mask. */
4451 BITSET_CLEAR(input_mask, i);
4452
4453 /* The number of slots to assign can optionally be limited. */
4454 assert(num_assigned_slots <= max_assigned_slots);
4455 if (num_assigned_slots == max_assigned_slots)
4456 break;
4457 }
4458
4459 assert(slot_index <= max_slot * 8);
4460
4461 if (!convergent && fs_vec4_type != FS_VEC4_TYPE_NONE) {
4462 /* Count the number of unused 16-bit components. There can be holes
4463 * because indirect inputs are not moved from their original locations.
4464 * The result is used to determine which compoments should be filled
4465 * with convergent inputs.
4466 */
4467 unsigned unused_slots = 0;
4468
4469 for (unsigned i = assign_colors ? VARYING_SLOT_COL0 : VARYING_SLOT_VAR0;
4470 i < max_slot; i++) {
4471 if (assigned_fs_vec4_type[i] != fs_vec4_type)
4472 continue;
4473
4474 unsigned comp_mask =
4475 BITSET_GET_RANGE_INSIDE_WORD(assigned_mask, i * 8, i * 8 + 7);
4476 assert(comp_mask);
4477 assert(comp_mask <= 0xff);
4478
4479 if (comp_mask == 0xff)
4480 continue;
4481
4482 /* Only count full unused 32-bit slots, so that 2 disjoint unused
4483 * 16-bit slots don't give the misleading impression that there is
4484 * a full unused 32-bit slots.
4485 */
4486 for (unsigned i = 0; i < 4; i++) {
4487 if (!(comp_mask & BITFIELD_RANGE(i * 2, 2)))
4488 unused_slots += 2;
4489 }
4490 }
4491 return unused_slots;
4492 }
4493
4494 return 0;
4495 }
4496
4497 /**
4498 * This is called once for 32-bit inputs and once for 16-bit inputs.
4499 * It assigns new slot indices to all scalar slots specified in the masks.
4500 *
4501 * \param linkage Linkage info
4502 * \param assigned_mask Which scalar (16-bit) slots are already taken.
4503 * \param assigned_fs_vec4_type Which vec4 slots have an assigned qualifier
4504 * and can only be filled with compatible slots.
4505 * \param interp_mask The list of interp slots to assign locations for.
4506 * \param flat_mask The list of flat slots to assign locations for.
4507 * \param convergent_mask The list of slots that have convergent output
4508 * stores.
4509 * \param sized_interp_type One of FS_VEC4_TYPE_INTERP_{FP32, FP16, COLOR}*.
4510 * \param slot_size 1 for 16 bits, 2 for 32 bits
4511 * \param color_channel_rotate Assign color channels starting with this index,
4512 * e.g. 2 assigns channels in the zwxy order.
4513 * \param assign_colors Whether to assign only color varyings or only
4514 * non-color varyings.
4515 */
4516 static void
fs_assign_slot_groups(struct linkage_info * linkage,BITSET_WORD * assigned_mask,uint8_t assigned_fs_vec4_type[NUM_TOTAL_VARYING_SLOTS],BITSET_WORD * interp_mask,BITSET_WORD * flat_mask,BITSET_WORD * convergent_mask,BITSET_WORD * color_interp_mask,enum fs_vec4_type sized_interp_type,unsigned slot_size,bool assign_colors,unsigned color_channel_rotate,nir_opt_varyings_progress * progress)4517 fs_assign_slot_groups(struct linkage_info *linkage,
4518 BITSET_WORD *assigned_mask,
4519 uint8_t assigned_fs_vec4_type[NUM_TOTAL_VARYING_SLOTS],
4520 BITSET_WORD *interp_mask,
4521 BITSET_WORD *flat_mask,
4522 BITSET_WORD *convergent_mask,
4523 BITSET_WORD *color_interp_mask,
4524 enum fs_vec4_type sized_interp_type,
4525 unsigned slot_size,
4526 bool assign_colors,
4527 unsigned color_channel_rotate,
4528 nir_opt_varyings_progress *progress)
4529 {
4530 /* Put interpolated slots first. */
4531 unsigned unused_interp_slots =
4532 fs_assign_slots(linkage, assigned_mask, assigned_fs_vec4_type,
4533 interp_mask, sized_interp_type,
4534 slot_size, NUM_SCALAR_SLOTS, false, assign_colors,
4535 color_channel_rotate, progress);
4536
4537 unsigned unused_color_interp_slots = 0;
4538 if (color_interp_mask) {
4539 unused_color_interp_slots =
4540 fs_assign_slots(linkage, assigned_mask, assigned_fs_vec4_type,
4541 color_interp_mask, FS_VEC4_TYPE_INTERP_COLOR,
4542 slot_size, NUM_SCALAR_SLOTS, false, assign_colors,
4543 color_channel_rotate, progress);
4544 }
4545
4546 /* Put flat slots next.
4547 * Note that only flat vec4 slots can have both 32-bit and 16-bit types
4548 * packed in the same vec4. 32-bit flat inputs are packed first, followed
4549 * by 16-bit flat inputs.
4550 */
4551 unsigned unused_flat_slots =
4552 fs_assign_slots(linkage, assigned_mask, assigned_fs_vec4_type,
4553 flat_mask, FS_VEC4_TYPE_FLAT,
4554 slot_size, NUM_SCALAR_SLOTS, false, assign_colors,
4555 color_channel_rotate, progress);
4556
4557 /* Take the inputs with convergent values and assign them as follows.
4558 * Since they can be assigned as both interpolated and flat, we can
4559 * choose. We prefer them to be flat, but if interpolated vec4s have
4560 * unused components, try to fill those before starting a new flat vec4.
4561 *
4562 * First, fill the unused components of flat (if any), then fill
4563 * the unused components of interpolated (if any), and then make
4564 * the remaining convergent inputs flat.
4565 */
4566 if (!linkage->always_interpolate_convergent_fs_inputs &&
4567 unused_flat_slots) {
4568 fs_assign_slots(linkage, assigned_mask, assigned_fs_vec4_type,
4569 convergent_mask, FS_VEC4_TYPE_FLAT,
4570 slot_size, unused_flat_slots, true, assign_colors,
4571 color_channel_rotate, progress);
4572 }
4573 if (unused_interp_slots) {
4574 fs_assign_slots(linkage, assigned_mask, assigned_fs_vec4_type,
4575 convergent_mask, sized_interp_type,
4576 slot_size, unused_interp_slots, true, assign_colors,
4577 color_channel_rotate, progress);
4578 }
4579 if (unused_color_interp_slots) {
4580 fs_assign_slots(linkage, assigned_mask, assigned_fs_vec4_type,
4581 convergent_mask, FS_VEC4_TYPE_INTERP_COLOR,
4582 slot_size, unused_color_interp_slots, true, assign_colors,
4583 color_channel_rotate, progress);
4584 }
4585 fs_assign_slots(linkage, assigned_mask, assigned_fs_vec4_type,
4586 convergent_mask,
4587 linkage->always_interpolate_convergent_fs_inputs ?
4588 (slot_size == 2 ? FS_VEC4_TYPE_INTERP_FP32 :
4589 FS_VEC4_TYPE_INTERP_FP16) :
4590 FS_VEC4_TYPE_FLAT,
4591 slot_size, NUM_SCALAR_SLOTS, true, assign_colors,
4592 color_channel_rotate, progress);
4593 }
4594
4595 /**
4596 * Same as fs_assign_slot_groups, but don't mix different interpolation
4597 * qualifiers in the same vec4.
4598 */
4599 static void
fs_assign_slot_groups_separate_qual(struct linkage_info * linkage,BITSET_WORD * assigned_mask,uint8_t assigned_fs_vec4_type[NUM_TOTAL_VARYING_SLOTS],INTERP_QUAL_BITSET * interp_masks,BITSET_WORD * flat_mask,BITSET_WORD * convergent_mask,COLOR_QUAL_BITSET * color_interp_masks,enum fs_vec4_type sized_interp_type_base,unsigned slot_size,bool assign_colors,unsigned color_channel_rotate,nir_opt_varyings_progress * progress)4600 fs_assign_slot_groups_separate_qual(struct linkage_info *linkage,
4601 BITSET_WORD *assigned_mask,
4602 uint8_t assigned_fs_vec4_type[NUM_TOTAL_VARYING_SLOTS],
4603 INTERP_QUAL_BITSET *interp_masks,
4604 BITSET_WORD *flat_mask,
4605 BITSET_WORD *convergent_mask,
4606 COLOR_QUAL_BITSET *color_interp_masks,
4607 enum fs_vec4_type sized_interp_type_base,
4608 unsigned slot_size,
4609 bool assign_colors,
4610 unsigned color_channel_rotate,
4611 nir_opt_varyings_progress *progress)
4612 {
4613 unsigned unused_interp_slots[NUM_INTERP_QUALIFIERS] = {0};
4614 unsigned unused_color_slots[NUM_COLOR_QUALIFIERS] = {0};
4615
4616 /* Put interpolated slots first. */
4617 for (unsigned i = 0; i < NUM_INTERP_QUALIFIERS; i++) {
4618 unused_interp_slots[i] =
4619 fs_assign_slots(linkage, assigned_mask, assigned_fs_vec4_type,
4620 (*interp_masks)[i], sized_interp_type_base + i,
4621 slot_size, NUM_SCALAR_SLOTS, false, assign_colors,
4622 color_channel_rotate, progress);
4623 }
4624
4625 if (color_interp_masks) {
4626 for (unsigned i = 0; i < NUM_COLOR_QUALIFIERS; i++) {
4627 unused_color_slots[i] =
4628 fs_assign_slots(linkage, assigned_mask, assigned_fs_vec4_type,
4629 (*color_interp_masks)[i],
4630 FS_VEC4_TYPE_INTERP_COLOR_PIXEL + i,
4631 slot_size, NUM_SCALAR_SLOTS, false, assign_colors,
4632 color_channel_rotate, progress);
4633 }
4634 }
4635
4636 /* Put flat slots next.
4637 * Note that only flat vec4 slots can have both 32-bit and 16-bit types
4638 * packed in the same vec4. 32-bit flat inputs are packed first, followed
4639 * by 16-bit flat inputs.
4640 */
4641 unsigned unused_flat_slots =
4642 fs_assign_slots(linkage, assigned_mask, assigned_fs_vec4_type,
4643 flat_mask, FS_VEC4_TYPE_FLAT,
4644 slot_size, NUM_SCALAR_SLOTS, false, assign_colors,
4645 color_channel_rotate, progress);
4646
4647 /* Take the inputs with convergent values and assign them as follows.
4648 * Since they can be assigned as both interpolated and flat, we can
4649 * choose. We prefer them to be flat, but if interpolated vec4s have
4650 * unused components, try to fill those before starting a new flat vec4.
4651 *
4652 * First, fill the unused components of flat (if any) with convergent
4653 * inputs.
4654 */
4655 if (!linkage->always_interpolate_convergent_fs_inputs &&
4656 unused_flat_slots) {
4657 fs_assign_slots(linkage, assigned_mask, assigned_fs_vec4_type,
4658 convergent_mask, FS_VEC4_TYPE_FLAT,
4659 slot_size, unused_flat_slots, true, assign_colors,
4660 color_channel_rotate, progress);
4661 }
4662
4663 /* Then fill the unused components of interpolated slots (if any) with
4664 * convergent inputs.
4665 */
4666 for (unsigned i = 0; i < NUM_INTERP_QUALIFIERS; i++) {
4667 if (unused_interp_slots[i]) {
4668 fs_assign_slots(linkage, assigned_mask, assigned_fs_vec4_type,
4669 convergent_mask, sized_interp_type_base + i,
4670 slot_size, unused_interp_slots[i], true,
4671 assign_colors, color_channel_rotate, progress);
4672 }
4673 }
4674
4675 for (unsigned i = 0; i < NUM_COLOR_QUALIFIERS; i++) {
4676 if (unused_color_slots[i]) {
4677 fs_assign_slots(linkage, assigned_mask, assigned_fs_vec4_type,
4678 convergent_mask, FS_VEC4_TYPE_INTERP_COLOR_PIXEL + i,
4679 slot_size, unused_color_slots[i], true, assign_colors,
4680 color_channel_rotate, progress);
4681 }
4682 }
4683
4684 /* Then make the remaining convergent inputs flat. */
4685 fs_assign_slots(linkage, assigned_mask, assigned_fs_vec4_type,
4686 convergent_mask,
4687 linkage->always_interpolate_convergent_fs_inputs ?
4688 (slot_size == 2 ? FS_VEC4_TYPE_INTERP_FP32_LINEAR_PIXEL :
4689 FS_VEC4_TYPE_INTERP_FP16_LINEAR_PIXEL) :
4690 FS_VEC4_TYPE_FLAT,
4691 slot_size, NUM_SCALAR_SLOTS, true, assign_colors,
4692 color_channel_rotate, progress);
4693 }
4694
4695 static void
vs_tcs_tes_gs_assign_slots(struct linkage_info * linkage,BITSET_WORD * input_mask,unsigned * slot_index,unsigned * patch_slot_index,unsigned slot_size,nir_opt_varyings_progress * progress)4696 vs_tcs_tes_gs_assign_slots(struct linkage_info *linkage,
4697 BITSET_WORD *input_mask,
4698 unsigned *slot_index,
4699 unsigned *patch_slot_index,
4700 unsigned slot_size,
4701 nir_opt_varyings_progress *progress)
4702 {
4703 unsigned i;
4704
4705 BITSET_FOREACH_SET(i, input_mask, NUM_SCALAR_SLOTS) {
4706 if (i >= VARYING_SLOT_PATCH0 * 8 && i < VARYING_SLOT_TESS_MAX * 8) {
4707 /* Skip indirectly-indexed scalar slots at 32-bit granularity.
4708 * We have to do it at this granularity because the low 16-bit
4709 * slot is set to 1 for 32-bit inputs but not the high 16-bit slot.
4710 */
4711 while (BITSET_TEST32(linkage->indirect_mask, *patch_slot_index))
4712 *patch_slot_index = align(*patch_slot_index + 1, 2);
4713
4714 assert(*patch_slot_index < VARYING_SLOT_TESS_MAX * 8);
4715 relocate_slot(linkage, &linkage->slot[i], i, *patch_slot_index,
4716 FS_VEC4_TYPE_NONE, false, progress);
4717 *patch_slot_index += slot_size; /* increment by 16 or 32 bits */
4718 } else {
4719 /* If the driver wants to use POS and we've already used it, move
4720 * to VARn.
4721 */
4722 if (*slot_index < VARYING_SLOT_VAR0 &&
4723 *slot_index >= VARYING_SLOT_POS + 8)
4724 *slot_index = VARYING_SLOT_VAR0 * 8;
4725
4726 /* Skip indirectly-indexed scalar slots at 32-bit granularity. */
4727 while (BITSET_TEST32(linkage->indirect_mask, *slot_index))
4728 *slot_index = align(*slot_index + 1, 2);
4729
4730 assert(*slot_index < VARYING_SLOT_MAX * 8);
4731 relocate_slot(linkage, &linkage->slot[i], i, *slot_index,
4732 FS_VEC4_TYPE_NONE, false, progress);
4733 *slot_index += slot_size; /* increment by 16 or 32 bits */
4734 }
4735 }
4736 }
4737
4738 static void
vs_tcs_tes_gs_assign_slots_2sets(struct linkage_info * linkage,BITSET_WORD * input32_mask,BITSET_WORD * input16_mask,unsigned * slot_index,unsigned * patch_slot_index,nir_opt_varyings_progress * progress)4739 vs_tcs_tes_gs_assign_slots_2sets(struct linkage_info *linkage,
4740 BITSET_WORD *input32_mask,
4741 BITSET_WORD *input16_mask,
4742 unsigned *slot_index,
4743 unsigned *patch_slot_index,
4744 nir_opt_varyings_progress *progress)
4745 {
4746 /* Compact 32-bit inputs, followed by 16-bit inputs allowing them to
4747 * share vec4 slots with 32-bit inputs.
4748 */
4749 vs_tcs_tes_gs_assign_slots(linkage, input32_mask, slot_index,
4750 patch_slot_index, 2, progress);
4751 vs_tcs_tes_gs_assign_slots(linkage, input16_mask, slot_index,
4752 patch_slot_index, 1, progress);
4753
4754 assert(*slot_index <= VARYING_SLOT_MAX * 8);
4755 assert(!patch_slot_index || *patch_slot_index <= VARYING_SLOT_TESS_MAX * 8);
4756 }
4757
4758 /**
4759 * Compaction means scalarizing and then packing scalar components into full
4760 * vec4s, so that we minimize the number of unused components in vec4 slots.
4761 *
4762 * Compaction is as simple as moving a scalar input from one scalar slot
4763 * to another. Indirectly-indexed slots are not touched, so the compaction
4764 * has to compact around them. Unused 32-bit components of indirectly-indexed
4765 * slots are still filled, so no space is wasted there, but if indirectly-
4766 * indexed 16-bit components have the other 16-bit half unused, that half is
4767 * wasted.
4768 */
4769 static void
compact_varyings(struct linkage_info * linkage,nir_opt_varyings_progress * progress)4770 compact_varyings(struct linkage_info *linkage,
4771 nir_opt_varyings_progress *progress)
4772 {
4773 if (linkage->consumer_stage == MESA_SHADER_FRAGMENT) {
4774 /* These arrays are used to track which scalar slots we've already
4775 * assigned. We can fill unused components of indirectly-indexed slots,
4776 * but only if the vec4 slot type (FLAT, FP16, or FP32) is the same.
4777 * Assign vec4 slot type separately, skipping over already assigned
4778 * scalar slots.
4779 */
4780 uint8_t assigned_fs_vec4_type[NUM_TOTAL_VARYING_SLOTS] = {0};
4781 BITSET_DECLARE(assigned_mask, NUM_SCALAR_SLOTS);
4782 BITSET_ZERO(assigned_mask);
4783
4784 /* Iterate over all indirectly accessed inputs and set the assigned vec4
4785 * type of each occupied slot to the vec4 type of indirect inputs, so
4786 * that compaction doesn't put inputs of a different vec4 type in
4787 * the same vec4.
4788 *
4789 * We don't try to compact indirect input arrays, though we could.
4790 */
4791 unsigned i;
4792 BITSET_FOREACH_SET(i, linkage->indirect_mask, NUM_SCALAR_SLOTS) {
4793 struct scalar_slot *slot = &linkage->slot[i];
4794
4795 /* The slot of the first array element contains all loads for all
4796 * elements, including all direct accesses, while all other array
4797 * elements are empty (on purpose).
4798 */
4799 if (list_is_empty(&linkage->slot[i].consumer.loads))
4800 continue;
4801
4802 assert(slot->num_slots >= 2);
4803
4804 for (unsigned array_index = 0; array_index < slot->num_slots;
4805 array_index++) {
4806 unsigned vec4_index = vec4_slot(i) + array_index;
4807 unsigned scalar_index = i + array_index * 8;
4808 assigned_fs_vec4_type[vec4_index] = linkage->fs_vec4_type[vec4_index];
4809 /* Indirectly-indexed slots are marked to always occupy 32 bits
4810 * (2 16-bit slots), though we waste the high 16 bits if they are unused.
4811 */
4812 BITSET_SET_RANGE_INSIDE_WORD(assigned_mask, scalar_index, scalar_index + 1);
4813 }
4814 }
4815
4816 if (linkage->has_flexible_interp) {
4817 /* This codepath packs convergent varyings with both interpolated and
4818 * flat, whichever has free space.
4819 */
4820 fs_assign_slot_groups(linkage, assigned_mask, assigned_fs_vec4_type,
4821 linkage->interp_fp32_mask, linkage->flat32_mask,
4822 linkage->convergent32_mask, NULL,
4823 FS_VEC4_TYPE_INTERP_FP32, 2, false, 0, progress);
4824
4825 /* Now do the same thing, but for 16-bit inputs. */
4826 fs_assign_slot_groups(linkage, assigned_mask, assigned_fs_vec4_type,
4827 linkage->interp_fp16_mask, linkage->flat16_mask,
4828 linkage->convergent16_mask, NULL,
4829 FS_VEC4_TYPE_INTERP_FP16, 1, false, 0, progress);
4830 } else {
4831 /* Basically the same as above. */
4832 fs_assign_slot_groups_separate_qual(
4833 linkage, assigned_mask, assigned_fs_vec4_type,
4834 &linkage->interp_fp32_qual_masks, linkage->flat32_mask,
4835 linkage->convergent32_mask, NULL,
4836 FS_VEC4_TYPE_INTERP_FP32_PERSP_PIXEL, 2, false, 0, progress);
4837
4838 fs_assign_slot_groups_separate_qual(
4839 linkage, assigned_mask, assigned_fs_vec4_type,
4840 &linkage->interp_fp16_qual_masks, linkage->flat16_mask,
4841 linkage->convergent16_mask, NULL,
4842 FS_VEC4_TYPE_INTERP_FP16_PERSP_PIXEL, 1, false, 0, progress);
4843 }
4844
4845 /* Assign INTERP_MODE_EXPLICIT. Both FP32 and FP16 can occupy the same
4846 * slot because the vertex data is passed to FS as-is.
4847 */
4848 fs_assign_slots(linkage, assigned_mask, assigned_fs_vec4_type,
4849 linkage->interp_explicit32_mask, FS_VEC4_TYPE_INTERP_EXPLICIT,
4850 2, NUM_SCALAR_SLOTS, false, false, 0, progress);
4851
4852 fs_assign_slots(linkage, assigned_mask, assigned_fs_vec4_type,
4853 linkage->interp_explicit16_mask, FS_VEC4_TYPE_INTERP_EXPLICIT,
4854 1, NUM_SCALAR_SLOTS, false, false, 0, progress);
4855
4856 /* Same for strict vertex ordering. */
4857 fs_assign_slots(linkage, assigned_mask, assigned_fs_vec4_type,
4858 linkage->interp_explicit_strict32_mask, FS_VEC4_TYPE_INTERP_EXPLICIT_STRICT,
4859 2, NUM_SCALAR_SLOTS, false, false, 0, progress);
4860
4861 fs_assign_slots(linkage, assigned_mask, assigned_fs_vec4_type,
4862 linkage->interp_explicit_strict16_mask, FS_VEC4_TYPE_INTERP_EXPLICIT_STRICT,
4863 1, NUM_SCALAR_SLOTS, false, false, 0, progress);
4864
4865 /* Same for per-primitive. */
4866 fs_assign_slots(linkage, assigned_mask, assigned_fs_vec4_type,
4867 linkage->per_primitive32_mask, FS_VEC4_TYPE_PER_PRIMITIVE,
4868 2, NUM_SCALAR_SLOTS, false, false, 0, progress);
4869
4870 fs_assign_slots(linkage, assigned_mask, assigned_fs_vec4_type,
4871 linkage->per_primitive16_mask, FS_VEC4_TYPE_PER_PRIMITIVE,
4872 1, NUM_SCALAR_SLOTS, false, false, 0, progress);
4873
4874 /* Put transform-feedback-only outputs last. */
4875 fs_assign_slots(linkage, assigned_mask, NULL,
4876 linkage->xfb32_only_mask, FS_VEC4_TYPE_NONE, 2,
4877 NUM_SCALAR_SLOTS, false, false, 0, progress);
4878
4879 fs_assign_slots(linkage, assigned_mask, NULL,
4880 linkage->xfb16_only_mask, FS_VEC4_TYPE_NONE, 1,
4881 NUM_SCALAR_SLOTS, false, false, 0, progress);
4882
4883 /* Color varyings are only compacted among themselves. */
4884 /* Set whether the shader contains any color varyings. */
4885 unsigned col0 = VARYING_SLOT_COL0 * 8;
4886 bool has_colors =
4887 !BITSET_TEST_RANGE_INSIDE_WORD(linkage->interp_fp32_mask, col0,
4888 col0 + 15, 0) ||
4889 !BITSET_TEST_RANGE_INSIDE_WORD(linkage->convergent32_mask, col0,
4890 col0 + 15, 0) ||
4891 !BITSET_TEST_RANGE_INSIDE_WORD(linkage->color32_mask, col0,
4892 col0 + 15, 0) ||
4893 !BITSET_TEST_RANGE_INSIDE_WORD(linkage->flat32_mask, col0,
4894 col0 + 15, 0) ||
4895 !BITSET_TEST_RANGE_INSIDE_WORD(linkage->xfb32_only_mask, col0,
4896 col0 + 15, 0);
4897
4898 for (unsigned i = 0; i < NUM_INTERP_QUALIFIERS; i++) {
4899 has_colors |=
4900 !BITSET_TEST_RANGE_INSIDE_WORD(linkage->interp_fp32_qual_masks[i],
4901 col0, col0 + 15, 0);
4902 }
4903 for (unsigned i = 0; i < NUM_COLOR_QUALIFIERS; i++) {
4904 has_colors |=
4905 !BITSET_TEST_RANGE_INSIDE_WORD(linkage->color32_qual_masks[i],
4906 col0, col0 + 15, 0);
4907 }
4908
4909 if (has_colors) {
4910 unsigned color_channel_rotate = 0;
4911
4912 if (linkage->consumer_builder.shader->options->io_options &
4913 nir_io_compaction_rotates_color_channels) {
4914 color_channel_rotate =
4915 DIV_ROUND_UP(BITSET_LAST_BIT(assigned_mask), 2) % 4;
4916 }
4917
4918 if (linkage->has_flexible_interp) {
4919 fs_assign_slot_groups(linkage, assigned_mask, assigned_fs_vec4_type,
4920 linkage->interp_fp32_mask, linkage->flat32_mask,
4921 linkage->convergent32_mask, linkage->color32_mask,
4922 FS_VEC4_TYPE_INTERP_FP32, 2, true,
4923 color_channel_rotate, progress);
4924 } else {
4925 fs_assign_slot_groups_separate_qual(
4926 linkage, assigned_mask, assigned_fs_vec4_type,
4927 &linkage->interp_fp32_qual_masks, linkage->flat32_mask,
4928 linkage->convergent32_mask, &linkage->color32_qual_masks,
4929 FS_VEC4_TYPE_INTERP_FP32_PERSP_PIXEL, 2, true,
4930 color_channel_rotate, progress);
4931 }
4932
4933 /* Put transform-feedback-only outputs last. */
4934 fs_assign_slots(linkage, assigned_mask, NULL,
4935 linkage->xfb32_only_mask, FS_VEC4_TYPE_NONE, 2,
4936 NUM_SCALAR_SLOTS, false, true, color_channel_rotate,
4937 progress);
4938 }
4939 return;
4940 }
4941
4942 /* If we get here, the consumer can only be TCS, TES, or GS.
4943 *
4944 * "use_pos" says whether the driver prefers that compaction with non-FS
4945 * consumers puts varyings into POS first before using any VARn.
4946 */
4947 bool use_pos = !(linkage->producer_builder.shader->options->io_options &
4948 nir_io_dont_use_pos_for_non_fs_varyings);
4949 unsigned slot_index = (use_pos ? VARYING_SLOT_POS
4950 : VARYING_SLOT_VAR0) * 8;
4951
4952 if (linkage->consumer_stage == MESA_SHADER_TESS_CTRL) {
4953 /* Make tcs_cross_invoc*_mask bits disjoint with flat*_mask bits
4954 * because tcs_cross_invoc*_mask is initially a subset of flat*_mask,
4955 * but we must assign each scalar slot only once.
4956 */
4957 BITSET_ANDNOT(linkage->flat32_mask, linkage->flat32_mask,
4958 linkage->tcs_cross_invoc32_mask);
4959 BITSET_ANDNOT(linkage->flat16_mask, linkage->flat16_mask,
4960 linkage->tcs_cross_invoc16_mask);
4961
4962 /* Put cross-invocation-accessed TCS inputs first. */
4963 vs_tcs_tes_gs_assign_slots_2sets(linkage, linkage->tcs_cross_invoc32_mask,
4964 linkage->tcs_cross_invoc16_mask,
4965 &slot_index, NULL, progress);
4966 /* Remaining TCS inputs. */
4967 vs_tcs_tes_gs_assign_slots_2sets(linkage, linkage->flat32_mask,
4968 linkage->flat16_mask, &slot_index,
4969 NULL, progress);
4970 return;
4971 }
4972
4973 if (linkage->consumer_stage == MESA_SHADER_TESS_EVAL) {
4974 unsigned patch_slot_index = VARYING_SLOT_PATCH0 * 8;
4975
4976 vs_tcs_tes_gs_assign_slots_2sets(linkage, linkage->flat32_mask,
4977 linkage->flat16_mask, &slot_index,
4978 &patch_slot_index, progress);
4979
4980 /* Put no-varying slots last. These are TCS outputs read by TCS but
4981 * not TES.
4982 */
4983 vs_tcs_tes_gs_assign_slots_2sets(linkage, linkage->no_varying32_mask,
4984 linkage->no_varying16_mask, &slot_index,
4985 &patch_slot_index, progress);
4986 return;
4987 }
4988
4989 assert(linkage->consumer_stage == MESA_SHADER_GEOMETRY);
4990 vs_tcs_tes_gs_assign_slots_2sets(linkage, linkage->flat32_mask,
4991 linkage->flat16_mask, &slot_index,
4992 NULL, progress);
4993 }
4994
4995 /******************************************************************
4996 * PUTTING IT ALL TOGETHER
4997 ******************************************************************/
4998
4999 /* A costing function determining the cost of a uniform expression to determine
5000 * whether it's worth propagating from output stores to the next shader stage.
5001 * This tries to model instruction cost of a scalar desktop GPU.
5002 *
5003 * It's used by uniform expression propagation when drivers provide a cost
5004 * limit for such an optimization but don't provide their own costing function,
5005 * which are the majority of drivers.
5006 */
5007 static unsigned
default_varying_estimate_instr_cost(nir_instr * instr)5008 default_varying_estimate_instr_cost(nir_instr *instr)
5009 {
5010 unsigned dst_bit_size, src_bit_size, num_dst_dwords;
5011 nir_op alu_op;
5012
5013 switch (instr->type) {
5014 case nir_instr_type_alu:
5015 dst_bit_size = nir_instr_as_alu(instr)->def.bit_size;
5016 src_bit_size = nir_instr_as_alu(instr)->src[0].src.ssa->bit_size;
5017 alu_op = nir_instr_as_alu(instr)->op;
5018 num_dst_dwords = DIV_ROUND_UP(dst_bit_size, 32);
5019
5020 switch (alu_op) {
5021 /* Moves are free. */
5022 case nir_op_mov:
5023 case nir_op_vec2:
5024 case nir_op_vec3:
5025 case nir_op_vec4:
5026 case nir_op_vec5:
5027 case nir_op_vec8:
5028 case nir_op_vec16:
5029 /* These are usually folded into FP instructions as src or dst
5030 * modifiers.
5031 */
5032 case nir_op_fabs:
5033 case nir_op_fneg:
5034 case nir_op_fsat:
5035 return 0;
5036
5037 /* 16-bit multiplication should be cheap. Greater sizes not so much. */
5038 case nir_op_imul:
5039 case nir_op_umul_low:
5040 case nir_op_imul_2x32_64:
5041 case nir_op_umul_2x32_64:
5042 return dst_bit_size <= 16 ? 1 : 4 * num_dst_dwords;
5043
5044 /* High bits of 64-bit multiplications. */
5045 case nir_op_imul_high:
5046 case nir_op_umul_high:
5047 /* Lowered into multiple instructions typically. */
5048 case nir_op_fsign:
5049 return 4;
5050
5051 /* Transcendental opcodes typically run at 1/4 rate of FMA. */
5052 case nir_op_fexp2:
5053 case nir_op_flog2:
5054 case nir_op_frcp:
5055 case nir_op_frsq:
5056 case nir_op_fsqrt:
5057 case nir_op_fsin:
5058 case nir_op_fcos:
5059 case nir_op_fsin_amd:
5060 case nir_op_fcos_amd:
5061 /* FP64 is usually much slower. */
5062 return dst_bit_size == 64 ? 32 : 4;
5063
5064 case nir_op_fpow:
5065 return 4 + 1 + 4; /* log2 + mul + exp2 */
5066
5067 /* Integer division is slow. */
5068 case nir_op_idiv:
5069 case nir_op_udiv:
5070 case nir_op_imod:
5071 case nir_op_umod:
5072 case nir_op_irem:
5073 return dst_bit_size == 64 ? 80 : 40;
5074
5075 case nir_op_fdiv:
5076 return dst_bit_size == 64 ? 80 : 5; /* FP16 & FP32: rcp + mul */
5077
5078 case nir_op_fmod:
5079 case nir_op_frem:
5080 return dst_bit_size == 64 ? 80 : 8;
5081
5082 default:
5083 /* FP64 is usually much slower. */
5084 if ((dst_bit_size == 64 &&
5085 nir_op_infos[alu_op].output_type & nir_type_float) ||
5086 (src_bit_size == 64 &&
5087 nir_op_infos[alu_op].input_types[0] & nir_type_float))
5088 return 16;
5089
5090 /* 1 per 32-bit result. */
5091 return DIV_ROUND_UP(MAX2(dst_bit_size, src_bit_size), 32);
5092 }
5093
5094 case nir_instr_type_intrinsic:
5095 dst_bit_size = nir_instr_as_intrinsic(instr)->def.bit_size;
5096 num_dst_dwords = DIV_ROUND_UP(dst_bit_size, 32);
5097
5098 /* This can only be a uniform load. Other intrinsics and variables are
5099 * rejected before this is called.
5100 */
5101 switch (nir_instr_as_intrinsic(instr)->intrinsic) {
5102 case nir_intrinsic_load_deref:
5103 /* Uniform loads can appear fast if latency hiding is effective. */
5104 return 2 * num_dst_dwords;
5105
5106 default:
5107 unreachable("unexpected intrinsic");
5108 }
5109
5110 case nir_instr_type_deref: {
5111 nir_deref_instr *deref = nir_instr_as_deref(instr);
5112
5113 switch (deref->deref_type) {
5114 case nir_deref_type_var:
5115 case nir_deref_type_struct:
5116 return 0;
5117 case nir_deref_type_array:
5118 /* Indexing uniforms with a divergent index has a high cost. This cost
5119 * is likely only going to be accepted by the driver if the next
5120 * shader doesn't run after amplification (e.g. VS->TCS, TES->GS).
5121 */
5122 return nir_src_is_const(deref->arr.index) ? 0 : 128;
5123
5124 default:
5125 unreachable("unexpected deref type");
5126 }
5127 }
5128
5129 default:
5130 unreachable("unexpected instr type");
5131 }
5132 }
5133
5134 static void
init_linkage(nir_shader * producer,nir_shader * consumer,bool spirv,unsigned max_uniform_components,unsigned max_ubos_per_stage,struct linkage_info * linkage,nir_opt_varyings_progress * progress)5135 init_linkage(nir_shader *producer, nir_shader *consumer, bool spirv,
5136 unsigned max_uniform_components, unsigned max_ubos_per_stage,
5137 struct linkage_info *linkage, nir_opt_varyings_progress *progress)
5138 {
5139 *linkage = (struct linkage_info){
5140 .spirv = spirv,
5141 .can_mix_convergent_flat_with_interpolated =
5142 consumer->info.stage == MESA_SHADER_FRAGMENT &&
5143 consumer->options->io_options &
5144 nir_io_mix_convergent_flat_with_interpolated,
5145 .has_flexible_interp =
5146 consumer->info.stage == MESA_SHADER_FRAGMENT &&
5147 consumer->options->io_options &
5148 nir_io_has_flexible_input_interpolation_except_flat,
5149 .always_interpolate_convergent_fs_inputs =
5150 consumer->info.stage == MESA_SHADER_FRAGMENT &&
5151 consumer->options->io_options &
5152 nir_io_always_interpolate_convergent_fs_inputs,
5153 .producer_stage = producer->info.stage,
5154 .consumer_stage = consumer->info.stage,
5155 .producer_builder =
5156 nir_builder_create(nir_shader_get_entrypoint(producer)),
5157 .consumer_builder =
5158 nir_builder_create(nir_shader_get_entrypoint(consumer)),
5159
5160 .max_varying_expression_cost =
5161 producer->options->varying_expression_max_cost ?
5162 producer->options->varying_expression_max_cost(producer, consumer) :
5163 producer->options->max_varying_expression_cost,
5164 .varying_estimate_instr_cost =
5165 producer->options->varying_estimate_instr_cost ?
5166 producer->options->varying_estimate_instr_cost :
5167 default_varying_estimate_instr_cost,
5168
5169 .linear_mem_ctx = linear_context(ralloc_context(NULL)),
5170 };
5171
5172 for (unsigned i = 0; i < ARRAY_SIZE(linkage->slot); i++) {
5173 list_inithead(&linkage->slot[i].producer.loads);
5174 list_inithead(&linkage->slot[i].producer.stores);
5175 list_inithead(&linkage->slot[i].consumer.loads);
5176 }
5177
5178 /* Preparation. */
5179 nir_shader_intrinsics_pass(consumer, gather_inputs, 0, linkage);
5180 nir_shader_intrinsics_pass(producer, gather_outputs, 0, linkage);
5181 tidy_up_indirect_varyings(linkage);
5182 determine_uniform_movability(linkage, max_uniform_components);
5183 determine_ubo_movability(linkage, max_ubos_per_stage);
5184 /* This must always be done because it also cleans up bitmasks. */
5185 remove_dead_varyings(linkage, progress);
5186 }
5187
5188 static void
free_linkage(struct linkage_info * linkage)5189 free_linkage(struct linkage_info *linkage)
5190 {
5191 ralloc_free(ralloc_parent_of_linear_context(linkage->linear_mem_ctx));
5192 }
5193
5194 static void
print_shader_linkage(nir_shader * producer,nir_shader * consumer)5195 print_shader_linkage(nir_shader *producer, nir_shader *consumer)
5196 {
5197 struct linkage_info *linkage = MALLOC_STRUCT(linkage_info);
5198 nir_opt_varyings_progress progress = 0;
5199
5200 init_linkage(producer, consumer, false, 0, 0, linkage, &progress);
5201 print_linkage(linkage);
5202 free_linkage(linkage);
5203 FREE(linkage);
5204 }
5205
5206 /**
5207 * Run lots of optimizations on varyings. See the description at the beginning
5208 * of this file.
5209 */
5210 nir_opt_varyings_progress
nir_opt_varyings(nir_shader * producer,nir_shader * consumer,bool spirv,unsigned max_uniform_components,unsigned max_ubos_per_stage)5211 nir_opt_varyings(nir_shader *producer, nir_shader *consumer, bool spirv,
5212 unsigned max_uniform_components, unsigned max_ubos_per_stage)
5213 {
5214 /* Task -> Mesh I/O uses payload variables and not varying slots,
5215 * so this pass can't do anything about it.
5216 */
5217 if (producer->info.stage == MESA_SHADER_TASK)
5218 return 0;
5219
5220 nir_opt_varyings_progress progress = 0;
5221 struct linkage_info *linkage = MALLOC_STRUCT(linkage_info);
5222 if (linkage == NULL)
5223 return 0;
5224
5225 /* Producers before a fragment shader must have up-to-date vertex
5226 * divergence information.
5227 */
5228 if (consumer->info.stage == MESA_SHADER_FRAGMENT) {
5229 nir_vertex_divergence_analysis(producer);
5230 }
5231
5232 /* This also removes dead varyings. */
5233 init_linkage(producer, consumer, spirv, max_uniform_components,
5234 max_ubos_per_stage, linkage, &progress);
5235
5236 /* Part 1: Run optimizations that only remove varyings. (they can move
5237 * instructions between shaders)
5238 */
5239 propagate_uniform_expressions(linkage, &progress);
5240
5241 /* Part 2: Deduplicate outputs. */
5242 deduplicate_outputs(linkage, &progress);
5243
5244 /* Run CSE on the consumer after output deduplication because duplicated
5245 * loads can prevent finding the post-dominator for inter-shader code
5246 * motion.
5247 */
5248 NIR_PASS(_, consumer, nir_opt_cse);
5249
5250 /* Re-gather linkage info after CSE. */
5251 free_linkage(linkage);
5252 init_linkage(producer, consumer, spirv, max_uniform_components,
5253 max_ubos_per_stage, linkage, &progress);
5254
5255 /* This must be done after deduplication and before inter-shader code
5256 * motion.
5257 */
5258 tidy_up_convergent_varyings(linkage);
5259 find_open_coded_tes_input_interpolation(linkage);
5260
5261 /* Part 3: Run optimizations that completely change varyings. */
5262 #if PRINT
5263 int i = 0;
5264 puts("Before:");
5265 nir_print_shader(linkage->producer_builder.shader, stdout);
5266 nir_print_shader(linkage->consumer_builder.shader, stdout);
5267 print_linkage(linkage);
5268 puts("");
5269 #endif
5270
5271 while (backward_inter_shader_code_motion(linkage, &progress)) {
5272 #if PRINT
5273 i++;
5274 printf("Finished: %i\n", i);
5275 nir_print_shader(linkage->producer_builder.shader, stdout);
5276 nir_print_shader(linkage->consumer_builder.shader, stdout);
5277 print_linkage(linkage);
5278 puts("");
5279 #endif
5280 }
5281
5282 /* Part 4: Do compaction. */
5283 compact_varyings(linkage, &progress);
5284
5285 nir_metadata_preserve(linkage->producer_builder.impl,
5286 progress & nir_progress_producer ?
5287 (nir_metadata_control_flow) :
5288 nir_metadata_all);
5289 nir_metadata_preserve(linkage->consumer_builder.impl,
5290 progress & nir_progress_consumer ?
5291 (nir_metadata_control_flow) :
5292 nir_metadata_all);
5293 free_linkage(linkage);
5294 FREE(linkage);
5295
5296 /* Compaction moves CLIP_DIST and CULL_DIST outputs to VARn if the next
5297 * shader is not FS. Clear those fields in shader_info.
5298 */
5299 if (consumer->info.stage <= MESA_SHADER_GEOMETRY) {
5300 producer->info.clip_distance_array_size = 0;
5301 producer->info.cull_distance_array_size = 0;
5302 }
5303
5304 if (progress & nir_progress_producer)
5305 nir_validate_shader(producer, "nir_opt_varyings");
5306 if (progress & nir_progress_consumer)
5307 nir_validate_shader(consumer, "nir_opt_varyings");
5308
5309 if (consumer->info.stage == MESA_SHADER_FRAGMENT) {
5310 /* We have called nir_vertex_divergence_analysis on the producer here.
5311 * We need to reset the divergent field to true, otherwise it will be
5312 * garbage after some other passes are run, and then we end up failing
5313 * assertions in some passes because src is divergent and dst isn't.
5314 */
5315 nir_clear_divergence_info(producer);
5316 }
5317
5318 return progress;
5319 }
5320