1 /*
2 * Copyright (c) 2019 Zodiac Inflight Innovations
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sub license,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the
12 * next paragraph) shall be included in all copies or substantial portions
13 * of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Jonathan Marek <jonathan@marek.ca>
25 */
26
27 #include "etnaviv_nir.h"
28
29 static inline int
color_index_for_location(unsigned location)30 color_index_for_location(unsigned location)
31 {
32 assert(location != FRAG_RESULT_COLOR &&
33 "gl_FragColor must be lowered before nir_lower_blend");
34
35 if (location < FRAG_RESULT_DATA0)
36 return -1;
37 else
38 return location - FRAG_RESULT_DATA0;
39 }
40
41 /* io related lowering
42 * run after lower_int_to_float because it adds i2f/f2i ops
43 */
44 void
etna_lower_io(nir_shader * shader,struct etna_shader_variant * v)45 etna_lower_io(nir_shader *shader, struct etna_shader_variant *v)
46 {
47 nir_foreach_function_impl(impl, shader) {
48 nir_builder b = nir_builder_create(impl);
49
50 nir_foreach_block(block, impl) {
51 nir_foreach_instr_safe(instr, block) {
52 if (instr->type == nir_instr_type_intrinsic) {
53 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
54
55 switch (intr->intrinsic) {
56 case nir_intrinsic_load_front_face: {
57 /* HW front_face is 0.0/1.0, not 0/~0u for bool
58 * lower with a comparison with 0
59 */
60 intr->def.bit_size = 32;
61
62 b.cursor = nir_after_instr(instr);
63
64 nir_def *ssa = nir_ine_imm(&b, &intr->def, 0);
65 if (v->key.front_ccw)
66 nir_instr_as_alu(ssa->parent_instr)->op = nir_op_ieq;
67
68 nir_def_rewrite_uses_after(&intr->def,
69 ssa,
70 ssa->parent_instr);
71 } break;
72 case nir_intrinsic_store_deref: {
73 nir_deref_instr *deref = nir_src_as_deref(intr->src[0]);
74 if (shader->info.stage != MESA_SHADER_FRAGMENT || !v->key.frag_rb_swap)
75 break;
76
77 assert(deref->deref_type == nir_deref_type_var);
78
79 int rt = color_index_for_location(deref->var->data.location);
80 if (rt == -1)
81 break;
82
83 if (!(v->key.frag_rb_swap & (1 << rt)))
84 break;
85
86 b.cursor = nir_before_instr(instr);
87
88 nir_def *ssa = nir_mov(&b, intr->src[1].ssa);
89 nir_alu_instr *alu = nir_instr_as_alu(ssa->parent_instr);
90 alu->src[0].swizzle[0] = 2;
91 alu->src[0].swizzle[2] = 0;
92 nir_src_rewrite(&intr->src[1], ssa);
93 } break;
94 case nir_intrinsic_load_vertex_id:
95 case nir_intrinsic_load_instance_id:
96 /* detect use of vertex_id/instance_id */
97 v->vs_id_in_reg = v->infile.num_reg;
98 break;
99 default:
100 break;
101 }
102 }
103
104 if (instr->type != nir_instr_type_tex)
105 continue;
106
107 nir_tex_instr *tex = nir_instr_as_tex(instr);
108 nir_src *coord = NULL;
109 nir_src *src1 = NULL;
110 unsigned src1_idx;
111
112 assert(tex->sampler_index == tex->texture_index);
113
114 for (unsigned i = 0; i < tex->num_srcs; i++) {
115 switch (tex->src[i].src_type) {
116 case nir_tex_src_coord:
117 coord = &tex->src[i].src;
118 break;
119 case nir_tex_src_bias:
120 case nir_tex_src_lod:
121 assert(!src1);
122 src1 = &tex->src[i].src;
123 src1_idx = i;
124 break;
125 case nir_tex_src_ddx:
126 case nir_tex_src_ddy:
127 case nir_tex_src_comparator:
128 break;
129 default:
130 assert(0);
131 break;
132 }
133 }
134
135 /* pre HALTI5 needs texture sources in a single source */
136
137 if (!src1 || v->shader->info->halti >= 5)
138 continue;
139
140 assert(coord && src1 && tex->coord_components < 4);
141
142 nir_alu_instr *vec = nir_alu_instr_create(shader, nir_op_vec4);
143 for (unsigned i = 0; i < tex->coord_components; i++) {
144 vec->src[i].src = nir_src_for_ssa(coord->ssa);
145 vec->src[i].swizzle[0] = i;
146 }
147 for (unsigned i = tex->coord_components; i < 4; i++)
148 vec->src[i].src = nir_src_for_ssa(src1->ssa);
149
150 nir_def_init(&vec->instr, &vec->def, 4, 32);
151
152 nir_tex_instr_remove_src(tex, src1_idx);
153 nir_src_rewrite(coord, &vec->def);
154 tex->coord_components = 4;
155
156 nir_instr_insert_before(&tex->instr, &vec->instr);
157 }
158 }
159 }
160 }
161
162 static void
etna_lower_alu_impl(nir_function_impl * impl,bool has_new_transcendentals)163 etna_lower_alu_impl(nir_function_impl *impl, bool has_new_transcendentals)
164 {
165 nir_shader *shader = impl->function->shader;
166
167 nir_builder b = nir_builder_create(impl);
168
169 /* in a seperate loop so we can apply the multiple-uniform logic to the new fmul */
170 nir_foreach_block(block, impl) {
171 nir_foreach_instr_safe(instr, block) {
172 if (instr->type != nir_instr_type_alu)
173 continue;
174
175 nir_alu_instr *alu = nir_instr_as_alu(instr);
176 /* multiply sin/cos src by constant
177 * TODO: do this earlier (but it breaks const_prop opt)
178 */
179 if (alu->op == nir_op_fsin || alu->op == nir_op_fcos) {
180 b.cursor = nir_before_instr(instr);
181
182 nir_def *imm = has_new_transcendentals ?
183 nir_imm_float(&b, 1.0 / M_PI) :
184 nir_imm_float(&b, 2.0 / M_PI);
185
186 nir_src_rewrite(&alu->src[0].src,
187 nir_fmul(&b, alu->src[0].src.ssa, imm));
188 }
189
190 /* change transcendental ops to vec2 and insert vec1 mul for the result
191 * TODO: do this earlier (but it breaks with optimizations)
192 */
193 if (has_new_transcendentals && (
194 alu->op == nir_op_fdiv || alu->op == nir_op_flog2 ||
195 alu->op == nir_op_fsin || alu->op == nir_op_fcos)) {
196 nir_def *ssa = &alu->def;
197
198 assert(ssa->num_components == 1);
199
200 nir_alu_instr *mul = nir_alu_instr_create(shader, nir_op_fmul);
201 mul->src[0].src = mul->src[1].src = nir_src_for_ssa(ssa);
202 mul->src[1].swizzle[0] = 1;
203
204 nir_def_init(&mul->instr, &mul->def, 1, 32);
205
206 alu->src[0].swizzle[1] = 0;
207 ssa->num_components = 2;
208
209 nir_instr_insert_after(instr, &mul->instr);
210
211 nir_def_rewrite_uses_after(ssa, &mul->def,
212 &mul->instr);
213 }
214 }
215 }
216 }
217
218 void
etna_lower_alu(nir_shader * shader,bool has_new_transcendentals)219 etna_lower_alu(nir_shader *shader, bool has_new_transcendentals)
220 {
221 nir_foreach_function_impl(impl, shader) {
222 etna_lower_alu_impl(impl, has_new_transcendentals);
223 }
224 }
225