1 /*
2 * Copyright 2023 Valve Corporation
3 * SPDX-License-Identifier: MIT
4 */
5
6 #include "compiler/shader_enums.h"
7 #include "agx_nir.h"
8 #include "nir.h"
9 #include "nir_builder.h"
10 #include "nir_builder_opcodes.h"
11 #include "nir_intrinsics.h"
12 #include "nir_intrinsics_indices.h"
13
14 /*
15 * In AGX, the values of fragment shader inputs are represented as coefficient
16 * vectors <A, B, C>, which are dotted with <x, y, 1> to perform interpolation.
17 * x and y are relative to the tile. In other words, A and B are the
18 * screen-space partial derivatives of the input, and C is the value at the
19 * corner of the tile.
20 *
21 * For some interpolation modes, the dot product happens in the iterator
22 * hardware. Other modes are implemented in this file, by lowering to math on
23 * the coefficient vectors.
24 */
25
26 /* XXX: It's not clear what this is for, but seems necessary */
27 static nir_def *
cf_valid(nir_builder * b,nir_def * cf)28 cf_valid(nir_builder *b, nir_def *cf)
29 {
30 nir_def *bit = nir_ieq_imm(b, nir_iand_imm(b, nir_channel(b, cf, 0), 1), 0);
31
32 /* XXX: Apple's compiler actually checks that the significand is nonzero and
33 * the exponent is 0 or 1. This is probably a typo -- it doesn't make any
34 * logical sense. Presumably they just meant to check for denorms, so let's
35 * do that. Either way the tests pass.
36 */
37 nir_def *cf01 = nir_trim_vector(b, cf, 2);
38 return nir_ior(b, bit, nir_fisnormal(b, cf01));
39 }
40
41 static nir_def *
interpolate_at_offset(nir_builder * b,nir_def * cf,nir_def * offset,bool perspective)42 interpolate_at_offset(nir_builder *b, nir_def *cf, nir_def *offset,
43 bool perspective)
44 {
45 /* Get the coordinate of the pixel within the tile */
46 nir_def *pixel_coords = nir_load_pixel_coord(b);
47 nir_def *tile_offs = nir_umod_imm(b, pixel_coords, 32);
48
49 /* Convert to float, getting the center of the pixel */
50 nir_def *center = nir_fadd_imm(b, nir_u2f32(b, tile_offs), 0.5);
51
52 /* Calculate the location to interpolate. offset is defined relative to the
53 * center of the pixel and is a float.
54 */
55 nir_def *pos = nir_fadd(b, center, nir_f2f32(b, offset));
56
57 /* Interpolate with the given coefficients */
58 nir_def *interp = nir_ffma(b, nir_channel(b, pos, 1), nir_channel(b, cf, 1),
59 nir_channel(b, cf, 2));
60
61 interp = nir_ffma(b, nir_channel(b, pos, 0), nir_channel(b, cf, 0), interp);
62
63 /* Divide by RHW. This load will be lowered recursively. */
64 if (perspective) {
65 nir_def *bary = nir_load_barycentric_at_offset(
66 b, 32, offset, .interp_mode = INTERP_MODE_NOPERSPECTIVE);
67
68 nir_def *rhw = nir_load_interpolated_input(
69 b, 1, 32, bary, nir_imm_int(b, 0), .component = 3,
70 .io_semantics = {
71 .location = VARYING_SLOT_POS,
72 .num_slots = 1,
73 });
74
75 interp = nir_fdiv(b, interp, rhw);
76 }
77
78 /* Replace invalid interpolations with the constant channel */
79 return nir_bcsel(b, cf_valid(b, cf), interp, nir_channel(b, cf, 2));
80 }
81
82 static nir_def *
interpolate_flat(nir_builder * b,nir_def * coefficients)83 interpolate_flat(nir_builder *b, nir_def *coefficients)
84 {
85 /* Same value anywhere, so just take the constant (affine) component */
86 return nir_channel(b, coefficients, 2);
87 }
88
89 static enum glsl_interp_mode
interp_mode_for_load(nir_intrinsic_instr * load)90 interp_mode_for_load(nir_intrinsic_instr *load)
91 {
92 if (load->intrinsic == nir_intrinsic_load_input)
93 return INTERP_MODE_FLAT;
94 else
95 return nir_intrinsic_interp_mode(nir_src_as_intrinsic(load->src[0]));
96 }
97
98 static bool
needs_lower(const nir_instr * instr,UNUSED const void * _)99 needs_lower(const nir_instr *instr, UNUSED const void *_)
100 {
101 if (instr->type != nir_instr_type_intrinsic)
102 return false;
103
104 const nir_intrinsic_instr *load = nir_instr_as_intrinsic(instr);
105
106 /* at_offset barycentrics need to be lowered */
107 if (load->intrinsic == nir_intrinsic_load_interpolated_input) {
108 return (nir_src_as_intrinsic(load->src[0])->intrinsic ==
109 nir_intrinsic_load_barycentric_at_offset);
110 }
111
112 /* Flat shading always lowered */
113 return (load->intrinsic == nir_intrinsic_load_input);
114 }
115
116 static nir_def *
interpolate_channel(nir_builder * b,nir_intrinsic_instr * load,unsigned channel)117 interpolate_channel(nir_builder *b, nir_intrinsic_instr *load, unsigned channel)
118 {
119 nir_io_semantics sem = nir_intrinsic_io_semantics(load);
120
121 /* Indirect varyings not supported, just bias the location */
122 sem.location += nir_src_as_uint(*nir_get_io_offset_src(load));
123 sem.num_slots = 1;
124
125 nir_def *coefficients = nir_load_coefficients_agx(
126 b, .component = nir_intrinsic_component(load) + channel,
127 .interp_mode = interp_mode_for_load(load), .io_semantics = sem);
128
129 if (load->intrinsic == nir_intrinsic_load_input) {
130 assert(load->def.bit_size == 32);
131 return interpolate_flat(b, coefficients);
132 } else {
133 nir_intrinsic_instr *bary = nir_src_as_intrinsic(load->src[0]);
134
135 nir_def *interp = interpolate_at_offset(
136 b, coefficients, bary->src[0].ssa,
137 nir_intrinsic_interp_mode(bary) != INTERP_MODE_NOPERSPECTIVE);
138
139 return nir_f2fN(b, interp, load->def.bit_size);
140 }
141 }
142
143 static nir_def *
lower(nir_builder * b,nir_instr * instr,void * data)144 lower(nir_builder *b, nir_instr *instr, void *data)
145 {
146 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
147
148 /* Each component is loaded separated */
149 nir_def *values[NIR_MAX_VEC_COMPONENTS] = {NULL};
150 for (unsigned i = 0; i < intr->def.num_components; ++i) {
151 values[i] = interpolate_channel(b, intr, i);
152 }
153
154 return nir_vec(b, values, intr->def.num_components);
155 }
156
157 bool
agx_nir_lower_interpolation(nir_shader * s)158 agx_nir_lower_interpolation(nir_shader *s)
159 {
160 assert(s->info.stage == MESA_SHADER_FRAGMENT);
161
162 return nir_shader_lower_instructions(s, needs_lower, lower, NULL);
163 }
164