1 /*
2 * Copyright (C) 2018-2020 Collabora, Ltd.
3 * Copyright (C) 2019-2020 Icecream95
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24
25 #include "pan_ir.h"
26 #include "compiler/nir/nir_builder.h"
27
28 /* Midgard can write all of color, depth and stencil in a single writeout
29 * operation, so we merge depth/stencil stores with color stores.
30 * If there are no color stores, we add a write to the "depth RT".
31 *
32 * For Bifrost, we want these combined so we can properly order
33 * +ZS_EMIT with respect to +ATEST and +BLEND, as well as combining
34 * depth/stencil stores into a single +ZS_EMIT op.
35 */
36
37 /*
38 * Get the type to report for a piece of a combined store, given the store it
39 * is combining from. If there is no store to render target #0, a dummy <0.0,
40 * 0.0, 0.0, 0.0> write is used, so report a matching float32 type.
41 */
42 static nir_alu_type
pan_nir_rt_store_type(nir_intrinsic_instr * store)43 pan_nir_rt_store_type(nir_intrinsic_instr *store)
44 {
45 return store ? nir_intrinsic_src_type(store) : nir_type_float32;
46 }
47
48 static void
pan_nir_emit_combined_store(nir_builder * b,nir_intrinsic_instr * rt0_store,unsigned writeout,nir_intrinsic_instr ** stores)49 pan_nir_emit_combined_store(nir_builder *b,
50 nir_intrinsic_instr *rt0_store,
51 unsigned writeout,
52 nir_intrinsic_instr **stores)
53 {
54 nir_intrinsic_instr *intr = nir_intrinsic_instr_create(b->shader, nir_intrinsic_store_combined_output_pan);
55
56 intr->num_components = rt0_store ? rt0_store->src[0].ssa->num_components : 4;
57
58 if (rt0_store)
59 nir_intrinsic_set_base(intr, nir_intrinsic_base(rt0_store));
60 nir_intrinsic_set_src_type(intr, pan_nir_rt_store_type(rt0_store));
61 nir_intrinsic_set_dest_type(intr, pan_nir_rt_store_type(stores[2]));
62 nir_intrinsic_set_component(intr, writeout);
63
64 nir_ssa_def *zero = nir_imm_int(b, 0);
65 nir_ssa_def *zero4 = nir_imm_ivec4(b, 0, 0, 0, 0);
66
67 nir_ssa_def *src[] = {
68 rt0_store ? rt0_store->src[0].ssa : zero4,
69 rt0_store ? rt0_store->src[1].ssa : zero,
70 stores[0] ? stores[0]->src[0].ssa : zero,
71 stores[1] ? stores[1]->src[0].ssa : zero,
72 stores[2] ? stores[2]->src[0].ssa : zero4,
73 };
74
75 for (int i = 0; i < ARRAY_SIZE(src); ++i)
76 intr->src[i] = nir_src_for_ssa(src[i]);
77
78 nir_builder_instr_insert(b, &intr->instr);
79 }
80 bool
pan_nir_lower_zs_store(nir_shader * nir)81 pan_nir_lower_zs_store(nir_shader *nir)
82 {
83 if (nir->info.stage != MESA_SHADER_FRAGMENT)
84 return false;
85
86 nir_variable *vars[3] = { NULL };
87
88 nir_foreach_shader_out_variable(var, nir) {
89 if (var->data.location == FRAG_RESULT_DEPTH)
90 vars[0] = var;
91 else if (var->data.location == FRAG_RESULT_STENCIL)
92 vars[1] = var;
93 else if (var->data.index)
94 vars[2] = var;
95 }
96
97 if (!vars[0] && !vars[1] && !vars[2])
98 return false;
99
100 bool progress = false;
101
102 nir_foreach_function(function, nir) {
103 if (!function->impl) continue;
104
105 nir_intrinsic_instr *stores[3] = { NULL };
106
107 nir_foreach_block(block, function->impl) {
108 nir_foreach_instr_safe(instr, block) {
109 if (instr->type != nir_instr_type_intrinsic)
110 continue;
111
112 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
113 if (intr->intrinsic != nir_intrinsic_store_output)
114 continue;
115
116 for (unsigned i = 0; i < ARRAY_SIZE(vars); ++i) {
117 if (vars[i] && nir_intrinsic_base(intr) == vars[i]->data.driver_location) {
118 assert(!stores[i]);
119 stores[i] = intr;
120 }
121 }
122 }
123 }
124
125 if (!stores[0] && !stores[1] && !stores[2]) continue;
126
127 nir_block *common_block = NULL;
128
129 /* Ensure all stores are in the same block */
130 for (unsigned i = 0; i < ARRAY_SIZE(stores); ++i) {
131 if (!stores[i])
132 continue;
133
134 nir_block *block = stores[i]->instr.block;
135
136 if (common_block)
137 assert(common_block == block);
138 else
139 common_block = block;
140 }
141
142 unsigned writeout = 0;
143 if (stores[0])
144 writeout |= PAN_WRITEOUT_Z;
145 if (stores[1])
146 writeout |= PAN_WRITEOUT_S;
147 if (stores[2])
148 writeout |= PAN_WRITEOUT_2;
149
150 bool replaced = false;
151
152 nir_foreach_block(block, function->impl) {
153 nir_foreach_instr_safe(instr, block) {
154 if (instr->type != nir_instr_type_intrinsic)
155 continue;
156
157 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
158 if (intr->intrinsic != nir_intrinsic_store_output)
159 continue;
160
161 const nir_variable *var = nir_find_variable_with_driver_location(nir, nir_var_shader_out, nir_intrinsic_base(intr));
162 assert(var);
163
164 if (var->data.location < FRAG_RESULT_DATA0)
165 continue;
166
167 if (var->data.index)
168 continue;
169
170 assert(nir_src_is_const(intr->src[1]) && "no indirect outputs");
171
172 nir_builder b;
173 nir_builder_init(&b, function->impl);
174 b.cursor = nir_after_block_before_jump(instr->block);
175
176 /* Trying to write depth twice results in the
177 * wrong blend shader being executed on
178 * Midgard */
179 unsigned this_store = PAN_WRITEOUT_C | (replaced ? 0 : writeout);
180
181 pan_nir_emit_combined_store(&b, intr, this_store, stores);
182
183 nir_instr_remove(instr);
184
185 replaced = true;
186 }
187 }
188
189 /* Insert a store to the depth RT (0xff) if needed */
190 if (!replaced) {
191 nir_builder b;
192 nir_builder_init(&b, function->impl);
193 b.cursor = nir_after_block_before_jump(common_block);
194
195 pan_nir_emit_combined_store(&b, NULL, writeout, stores);
196 }
197
198 for (unsigned i = 0; i < ARRAY_SIZE(stores); ++i) {
199 if (stores[i])
200 nir_instr_remove(&stores[i]->instr);
201 }
202
203 nir_metadata_preserve(function->impl, nir_metadata_block_index | nir_metadata_dominance);
204 progress = true;
205 }
206
207 return progress;
208 }
209